Coverage for tsfpga/vivado/tcl.py: 95%
218 statements
« prev ^ index » next coverage.py v7.5.1, created at 2024-05-06 20:51 +0000
« prev ^ index » next coverage.py v7.5.1, created at 2024-05-06 20:51 +0000
1# --------------------------------------------------------------------------------------------------
2# Copyright (c) Lukas Vik. All rights reserved.
3#
4# This file is part of the tsfpga project, a project platform for modern FPGA development.
5# https://tsfpga.com
6# https://github.com/tsfpga/tsfpga
7# --------------------------------------------------------------------------------------------------
9# Standard libraries
10from pathlib import Path
11from typing import TYPE_CHECKING, Any, Iterable, Optional
13# First party libraries
14from tsfpga.hdl_file import HdlFile
15from tsfpga.system_utils import create_file
17# Local folder libraries
18from .common import to_tcl_path
19from .generics import get_vivado_tcl_generic_value
21if TYPE_CHECKING:
22 # First party libraries
23 from tsfpga.build_step_tcl_hook import BuildStepTclHook
24 from tsfpga.constraint import Constraint
25 from tsfpga.module_list import ModuleList
28# Number of available Vivado implementation strategies
29NUM_VIVADO_STRATEGIES = 33
32class VivadoTcl:
33 """
34 Class with methods for translating a set of sources into Vivado TCL
35 """
37 def __init__(
38 self,
39 name: str,
40 ) -> None:
41 self.name = name
43 # pylint: disable=too-many-arguments
44 def create(
45 self,
46 project_folder: Path,
47 modules: "ModuleList",
48 part: str,
49 top: str,
50 run_index: int,
51 generics: Optional[dict[str, str]] = None,
52 constraints: Optional[list["Constraint"]] = None,
53 tcl_sources: Optional[list[Path]] = None,
54 build_step_hooks: Optional[list["BuildStepTclHook"]] = None,
55 ip_cache_path: Optional[Path] = None,
56 disable_io_buffers: bool = True,
57 # Add no sources other than IP cores
58 ip_cores_only: bool = False,
59 # Will be passed on to module functions. Enables parameterization of e.g. IP cores.
60 other_arguments: Optional[dict[str, Any]] = None,
61 ) -> str:
62 generics = {} if generics is None else generics
63 other_arguments = {} if other_arguments is None else other_arguments
65 tcl = f"create_project {self.name} {{{to_tcl_path(project_folder)}}} -part {part}\n"
66 tcl += "set_property target_language VHDL [current_project]\n"
68 if ip_cache_path is not None:
69 tcl += f"config_ip_cache -use_cache_location {{{to_tcl_path(ip_cache_path)}}}\n"
70 tcl += "\n"
72 tcl += self._add_tcl_sources(tcl_sources)
73 tcl += "\n"
75 if not ip_cores_only:
76 tcl += self._add_module_source_files(modules=modules, other_arguments=other_arguments)
77 tcl += "\n"
78 tcl += self._add_generics(generics)
79 tcl += "\n"
80 tcl += self._add_constraints(
81 self._iterate_constraints(
82 modules=modules, constraints=constraints, other_arguments=other_arguments
83 )
84 )
85 tcl += "\n"
86 tcl += self._add_build_step_hooks(build_step_hooks, project_folder)
87 tcl += "\n"
89 tcl += self._add_ip_cores(modules=modules, other_arguments=other_arguments)
90 tcl += "\n"
91 tcl += self._add_project_settings()
92 tcl += "\n"
93 tcl += f"current_run [get_runs synth_{run_index}]\n"
94 tcl += "\n"
95 tcl += f"set_property top {top} [current_fileset]\n"
96 tcl += "reorder_files -auto -disable_unused\n"
97 tcl += "\n"
99 if disable_io_buffers:
100 tcl += (
101 "set_property -name {STEPS.SYNTH_DESIGN.ARGS.MORE OPTIONS} "
102 f"-value -no_iobuf -objects [get_runs synth_{run_index}]"
103 )
104 tcl += "\n"
106 tcl += "exit\n"
107 return tcl
109 def _add_module_source_files(
110 self, modules: "ModuleList", other_arguments: dict[str, Any]
111 ) -> str:
112 tcl = ""
113 for module in modules:
114 vhdl_files = []
115 verilog_files = []
116 system_verilog_files = []
118 for hdl_file in module.get_synthesis_files(**other_arguments):
119 if hdl_file.type == HdlFile.Type.VHDL:
120 vhdl_files.append(hdl_file.path)
121 elif hdl_file.type in [HdlFile.Type.VERILOG_SOURCE, HdlFile.Type.VERILOG_HEADER]:
122 verilog_files.append(hdl_file.path)
123 elif hdl_file.type in [
124 HdlFile.Type.SYSTEMVERILOG_SOURCE,
125 HdlFile.Type.SYSTEMVERILOG_HEADER,
126 ]:
127 system_verilog_files.append(hdl_file.path)
128 else:
129 raise NotImplementedError(f"Can not handle file: {hdl_file}")
130 # Encrypted source files (.vp?), etc, I do not know how
131 # to handle, since I have no use case for it at the moment.
133 if vhdl_files:
134 files_string = self._to_file_list(vhdl_files)
135 tcl += f"read_vhdl -library {module.library_name} -vhdl2008 {files_string}\n"
137 if verilog_files:
138 files_string = self._to_file_list(verilog_files)
139 tcl += f"read_verilog {files_string}\n"
141 if system_verilog_files:
142 files_string = self._to_file_list(system_verilog_files)
143 tcl += f"read_verilog -sv {files_string}\n"
145 return tcl
147 @staticmethod
148 def _to_file_list(file_paths: list[Path]) -> str:
149 """
150 Return a TCL snippet for a file list, with each file enclosed in curly braces.
151 E.g. "{file1}" or "{{file1} {file2} {file3}}"
152 """
153 if len(file_paths) == 1:
154 files_string = to_tcl_path(file_paths[0])
155 else:
156 files_string = " ".join([f"{{{to_tcl_path(file_path)}}}" for file_path in file_paths])
158 return f"{{{files_string}}}"
160 @staticmethod
161 def _add_tcl_sources(tcl_sources: Optional[list[Path]]) -> str:
162 if tcl_sources is None:
163 return ""
165 tcl = ""
166 for tcl_source_file in tcl_sources:
167 tcl += f"source -notrace {{{to_tcl_path(tcl_source_file)}}}\n"
168 return tcl
170 @staticmethod
171 def _add_ip_cores(modules: "ModuleList", other_arguments: dict[str, Any]) -> str:
172 tcl = ""
173 for module in modules:
174 for ip_core_file in module.get_ip_core_files(**other_arguments):
175 create_function_name = f"create_ip_core_{ip_core_file.name}"
176 tcl += f"proc {create_function_name} {{}} {{\n"
178 if ip_core_file.variables:
179 for key, value in ip_core_file.variables.items():
180 tcl += f' set {key} "{value}"\n'
182 tcl += f"""\
183 source -notrace {{{to_tcl_path(ip_core_file.path)}}}
184}}
185{create_function_name}
186"""
188 return tcl
190 def _add_build_step_hooks(
191 self, build_step_hooks: Optional[list["BuildStepTclHook"]], project_folder: Path
192 ) -> str:
193 if build_step_hooks is None:
194 return ""
196 # There can be many hooks for the same step. Reorganize them into a dict.
197 hook_steps: dict[str, list["BuildStepTclHook"]] = {}
198 for build_step_hook in build_step_hooks:
199 if build_step_hook.hook_step in hook_steps:
200 hook_steps[build_step_hook.hook_step].append(build_step_hook)
201 else:
202 hook_steps[build_step_hook.hook_step] = [build_step_hook]
204 tcl = ""
205 for step, hooks in hook_steps.items():
206 # Vivado will only accept one TCL script as hook for each step. So if we want
207 # to add more we have to create a new TCL file, that sources the other files,
208 # and add that as the hook to Vivado.
209 if len(hooks) == 1:
210 tcl_file = hooks[0].tcl_file
211 else:
212 tcl_file = project_folder / ("hook_" + step.replace(".", "_") + ".tcl")
213 source_hooks_tcl = "".join(
214 [f"source {{{to_tcl_path(hook.tcl_file)}}}\n" for hook in hooks]
215 )
216 create_file(tcl_file, source_hooks_tcl)
218 # Add to fileset to enable archive and other project based functionality
219 tcl += f"add_files -fileset utils_1 -norecurse {{{to_tcl_path(tcl_file)}}}\n"
221 # Build step hook can only be applied to a run (e.g. impl_1), not on a project basis
222 run_wildcard = "synth_*" if hooks[0].step_is_synth else "impl_*"
223 tcl_block = f"set_property {step} {{{to_tcl_path(tcl_file)}}} ${{run}}"
224 tcl += self._tcl_for_each_run(run_wildcard, tcl_block)
226 return tcl
228 def _add_project_settings(self) -> str:
229 tcl = ""
231 # Default value for when opening project in GUI.
232 # Will be overwritten if using build() function.
233 tcl += "set_param general.maxThreads 7\n"
235 # Enable VHDL assert statements to be evaluated. A severity level of failure will
236 # stop the synthesis and produce an error.
237 tcl_block = "set_property STEPS.SYNTH_DESIGN.ARGS.ASSERT true ${run}"
238 tcl += self._tcl_for_each_run("synth_*", tcl_block)
240 # Enable binary bitstream as well
241 tcl_block = "set_property STEPS.WRITE_BITSTREAM.ARGS.BIN_FILE true ${run}"
242 tcl += self._tcl_for_each_run("impl_*", tcl_block)
244 return tcl
246 @staticmethod
247 def _tcl_for_each_run(run_wildcard: str, tcl_block: str) -> str:
248 """
249 Apply TCL block for each defined run. Use ${run} for run variable in TCL.
250 """
251 tcl = ""
252 tcl += f"foreach run [get_runs {run_wildcard}] {{\n"
253 tcl += tcl_block + "\n"
254 tcl += "}\n"
255 return tcl
257 @staticmethod
258 def _add_generics(generics: Optional[dict[str, Any]]) -> str:
259 """
260 Generics are set according to this weird format:
261 https://www.xilinx.com/support/answers/52217.html
262 """
263 if not generics:
264 return ""
266 generic_list = []
267 for name, value in generics.items():
268 value_tcl_formatted = get_vivado_tcl_generic_value(value=value)
269 generic_list.append(f"{name}={value_tcl_formatted}")
271 generics_string = " ".join(generic_list)
272 return f"set_property generic {{{generics_string}}} [current_fileset]\n"
274 @staticmethod
275 def _iterate_constraints(
276 modules: "ModuleList",
277 constraints: Optional[list["Constraint"]],
278 other_arguments: dict[str, Any],
279 ) -> Iterable["Constraint"]:
280 for module in modules:
281 yield from module.get_scoped_constraints(**other_arguments)
283 if constraints is not None:
284 yield from constraints
286 @staticmethod
287 def _add_constraints(constraints: Iterable["Constraint"]) -> str:
288 tcl = ""
289 for constraint in constraints:
290 constraint_file = to_tcl_path(constraint.file)
292 ref_flags = "" if constraint.ref is None else (f"-ref {constraint.ref} ")
293 managed_flags = "" if constraint_file.endswith("xdc") else "-unmanaged "
294 tcl += f"read_xdc {ref_flags}{managed_flags}{{{constraint_file}}}\n"
296 get_file = f"[get_files {{{constraint_file}}}]"
297 tcl += f"set_property PROCESSING_ORDER {constraint.processing_order} {get_file}\n"
299 if constraint.used_in == "impl":
300 tcl += f"set_property used_in_synthesis false {get_file}\n"
301 elif constraint.used_in == "synth":
302 tcl += f"set_property used_in_implementation false {get_file}\n"
304 return tcl
306 def build(
307 self,
308 project_file: Path,
309 output_path: Path,
310 num_threads: int,
311 run_index: int,
312 generics: Optional[dict[str, Any]] = None,
313 synth_only: bool = False,
314 from_impl: bool = False,
315 impl_explore: bool = False,
316 analyze_synthesis_timing: bool = True,
317 ) -> str:
318 if impl_explore:
319 # For implementation explore, threads are divided to one each per job.
320 # Number of jobs in parallel are the number of threads specified for build.
321 # Clamp max threads between 1 and 32, which are allowed by Vivado 2018.3+.
322 num_threads_general = min(max(1, num_threads // NUM_VIVADO_STRATEGIES), 32)
323 else:
324 # Max value in Vivado 2018.3+. set_param will give an error if higher number.
325 num_threads_general = min(num_threads, 32)
327 num_threads_synth = min(num_threads, 8)
329 tcl = f"open_project {to_tcl_path(project_file)}\n"
330 tcl += f"set_param general.maxThreads {num_threads_general}\n"
331 tcl += f"set_param synth.maxThreads {num_threads_synth}\n"
332 tcl += "\n"
333 tcl += self._add_generics(generics)
334 tcl += "\n"
336 if not from_impl:
337 synth_run = f"synth_{run_index}"
339 tcl += self._synthesis(synth_run, num_threads, analyze_synthesis_timing)
340 tcl += "\n"
342 if not synth_only:
343 impl_run = f"impl_{run_index}"
345 if impl_explore:
346 tcl += self._run_multiple(num_jobs=num_threads)
347 else:
348 tcl += self._run(impl_run, num_threads, to_step="write_bitstream")
349 tcl += "\n"
351 tcl += self._write_hw_platform(output_path)
352 tcl += "\n"
354 tcl += "exit\n"
356 return tcl
358 def _synthesis(self, run: str, num_threads: int, analyze_synthesis_timing: bool) -> str:
359 tcl = self._run(run, num_threads)
360 if analyze_synthesis_timing:
361 # For synthesis flow we perform the timing checks by opening the design. It would have
362 # been more efficient to use a post-synthesis hook (since the design would already be
363 # open), if that mechanism had worked. It seems to be very bugged. So we add the
364 # checkers to the build script.
365 # For implementation, we use a pre-bitstream build hook which seems to work decently.
366 tcl += """
367open_run ${run}
368set run_directory [get_property DIRECTORY [get_runs ${run}]]
370# Generate report on simultaneous switching noise (SSN) for the design.
371# It seems safe to do this after synthesis; inspecting the reports in a test build after both
372# synthesis and implementation shows that the results are identical.
373# Will generate a "Designutils 20-923" message if noise margins are not met.
374# If the user would like this to fail the build, this message severity shall be raised to ERROR.
375# At the moment we do not know how stable this mechanism is, so we do not fail the build
376# per default.
377# The call is very fast (< 1s) so it is fine to run always, even though not everyone will use it.
378set current_part [get_property PART [current_project]]
379set part_supports_ssn [get_parts ${current_part} -filter {ssn_report == 1}]
380if {${part_supports_ssn} != ""} {
381 set output_file [file join ${run_directory} "report_ssn.html"]
382 report_ssn -phase -format html -file ${output_file}
383}
385# This call is duplicated in report_utilization.tcl for implementation.
386set output_file [file join ${run_directory} "hierarchical_utilization.rpt"]
387report_utilization -hierarchical -hierarchical_depth 4 -file ${output_file}
390# After synthesis we check for unhandled clock crossings and abort the build based on the result.
391# Other timing checks, e.g. setup/hold/pulse width violations, are not reliable after synthesis,
392# and should not abort the build. These need to be checked after implementation.
393"""
395 tcl += """
396# This code is duplicated in check_timing.tcl for implementation.
397if {[regexp {\\(unsafe\\)} [report_clock_interaction -delay_type min_max -return_string]]} {
398 puts "ERROR: Unhandled clock crossing in ${run} run. See clock_interaction.rpt and \
399timing_summary.rpt in ${run_directory}."
401 set output_file [file join ${run_directory} "clock_interaction.rpt"]
402 report_clock_interaction -delay_type min_max -file ${output_file}
404 set output_file [file join ${run_directory} "timing_summary.rpt"]
405 report_timing_summary -file ${output_file}
407 exit 1
408}
409"""
410 return tcl
412 @staticmethod
413 def _run(run: str, num_threads: int, to_step: Optional[str] = None) -> str:
414 to_step = "" if to_step is None else f" -to_step {to_step}"
416 tcl = f"""
417set run {run}
418reset_run ${{run}}
419launch_runs ${{run}} -jobs {num_threads}{to_step}
420"""
422 tcl += """
423wait_on_run ${run}
425if {[get_property PROGRESS [get_runs ${run}]] != "100%"} {
426 puts "ERROR: Run ${run} failed."
427 exit 1
428}
429"""
430 return tcl
432 def _run_multiple(self, num_jobs: int = 4, base_name: str = "impl_explore_") -> str:
433 # Currently, this creates .tcl that waits for all active runs to complete
435 tcl = "set build_succeeded 0\n"
436 tcl += f"reset_runs [get_runs {base_name}*]\n"
437 tcl += f"launch_runs -jobs {num_jobs} [get_runs {base_name}*] -to_step write_bitstream\n"
438 tcl += "\n"
440 tcl += f"wait_on_runs -quiet -exit_condition ANY_ONE_MET_TIMING [get_runs {base_name}*]\n"
441 tcl += "\n"
443 tcl += 'reset_runs [get_runs -filter {STATUS == "Queued..."}]\n'
445 # Wait on runs that are still going, since Vivado can't kill runs in progress reliably.
446 # Killing runs in progress causes a zombie process which will lock up VUnit's Process class.
447 tcl += f'wait_on_runs -quiet [get_runs -filter {{STATUS != "Not started"}} {base_name}*]\n'
448 tcl += "\n"
450 tcl_block = """
451 set build_succeeded 1
452 puts "Run $run met timing"
453"""
454 tcl += self._tcl_for_each_run(
455 f'-filter {{PROGRESS == "100%"}} {base_name}*', tcl_block=tcl_block
456 )
458 tcl += """
459if {${build_succeeded} eq 0} {
460 puts "No build met timing, exiting."
461 exit 1
462}
463"""
465 return tcl
467 def _write_hw_platform(self, output_path: Path) -> str:
468 """
469 TCL command to create a Xilinx support archive (.xsa) file, for use as a
470 hardware platform.
471 Used to be known as a "hdf" or "hwdef" file.
473 This is mainly used for Zynq devices to generate code to set up the PS at boot.
474 There is also code generated for each MicroBlaze that is present in the design.
475 If there is neither a block design nor a MicroBlaze available, the .xsa will be empty apart
476 from some info about which part is used, etc.
478 The '-quiet' flag is used since there was a Vivado bug observed in this very
479 specific scenario:
480 * Vivado 2022.1
481 * UltraScale+ non-Zynq device (i.e. no block design)
482 * Design contains MicroBlaze
483 * Design contains ILA
484 In this case the 'write_hw_platform' call would fail.
485 This bug might be present in other Vivado versions and in other scenarios as well.
486 Since this is a very fringe scenario, and it is unlikely that anyone would ever need the
487 .xsa file specifically from the ILA build, we allow the command to fail quietly.
488 """
489 xsa_file = to_tcl_path(output_path / f"{self.name}.xsa")
491 tcl = f"""
492puts "Creating hardware platform {xsa_file}..."
493write_hw_platform -fixed -force -quiet -include_bit {{{xsa_file}}}
494"""
496 return tcl