Coverage for tsfpga/vivado/tcl.py: 94%
204 statements
« prev ^ index » next coverage.py v7.6.12, created at 2025-02-21 20:51 +0000
« prev ^ index » next coverage.py v7.6.12, created at 2025-02-21 20:51 +0000
1# --------------------------------------------------------------------------------------------------
2# Copyright (c) Lukas Vik. All rights reserved.
3#
4# This file is part of the tsfpga project, a project platform for modern FPGA development.
5# https://tsfpga.com
6# https://github.com/tsfpga/tsfpga
7# --------------------------------------------------------------------------------------------------
9from __future__ import annotations
11from typing import TYPE_CHECKING, Any
13from tsfpga.hdl_file import HdlFile
14from tsfpga.system_utils import create_file
16from .common import to_tcl_path
17from .generics import get_vivado_tcl_generic_value
19if TYPE_CHECKING:
20 from collections.abc import Iterable
21 from pathlib import Path
23 from tsfpga.build_step_tcl_hook import BuildStepTclHook
24 from tsfpga.constraint import Constraint
25 from tsfpga.module_list import ModuleList
28# Number of available Vivado implementation strategies
29NUM_VIVADO_STRATEGIES = 33
32class VivadoTcl:
33 """
34 Class with methods for translating a set of sources into Vivado TCL
35 """
37 def __init__(
38 self,
39 name: str,
40 ) -> None:
41 self.name = name
43 def create( # noqa: PLR0913
44 self,
45 project_folder: Path,
46 modules: ModuleList,
47 part: str,
48 top: str,
49 run_index: int,
50 generics: dict[str, str] | None = None,
51 constraints: list[Constraint] | None = None,
52 tcl_sources: list[Path] | None = None,
53 build_step_hooks: list[BuildStepTclHook] | None = None,
54 ip_cache_path: Path | None = None,
55 disable_io_buffers: bool = True,
56 # Add no sources other than IP cores
57 ip_cores_only: bool = False,
58 # Will be passed on to module functions. Enables parameterization of e.g. IP cores.
59 other_arguments: dict[str, Any] | None = None,
60 ) -> str:
61 generics = {} if generics is None else generics
62 other_arguments = {} if other_arguments is None else other_arguments
64 tcl = f"""\
65create_project -part "{part}" "{self.name}" { {to_tcl_path(project_folder)}}
66set_property "target_language" "VHDL" [current_project]
68"""
69 if ip_cache_path is not None:
70 tcl += f"config_ip_cache -use_cache_location { {to_tcl_path(ip_cache_path)}} \n\n"
72 if not ip_cores_only:
73 tcl += self._add_module_source_files(modules=modules, other_arguments=other_arguments)
74 tcl += self._add_tcl_sources(tcl_sources)
75 tcl += self._add_generics(generics)
77 constraints = list(
78 self._iterate_constraints(
79 modules=modules, constraints=constraints, other_arguments=other_arguments
80 )
81 )
82 tcl += self._add_constraints(constraints=constraints)
83 tcl += self._add_build_step_hooks(build_step_hooks, project_folder)
85 tcl += self._add_ip_cores(modules=modules, other_arguments=other_arguments)
86 tcl += self._add_project_settings()
88 tcl += f"""
89# ------------------------------------------------------------------------------
90current_run [get_runs "synth_{run_index}"]
91set_property "top" "{top}" [current_fileset]
92reorder_files -auto -disable_unused
94"""
95 if disable_io_buffers:
96 tcl += f"""\
97set_property -name "STEPS.SYNTH_DESIGN.ARGS.MORE OPTIONS" \
98-value "-no_iobuf" -objects [get_runs "synth_{run_index}"]
100"""
101 tcl += """
102# ------------------------------------------------------------------------------
103exit
104"""
105 return tcl
107 def _add_module_source_files(self, modules: ModuleList, other_arguments: dict[str, Any]) -> str:
108 if len(modules) == 0:
109 return ""
111 tcl = """
112# ------------------------------------------------------------------------------
113"""
114 for module in modules:
115 vhdl_files = []
116 verilog_files = []
117 system_verilog_files = []
119 for hdl_file in module.get_synthesis_files(**other_arguments):
120 if hdl_file.type == HdlFile.Type.VHDL:
121 vhdl_files.append(hdl_file.path)
122 elif hdl_file.type in [HdlFile.Type.VERILOG_SOURCE, HdlFile.Type.VERILOG_HEADER]:
123 verilog_files.append(hdl_file.path)
124 elif hdl_file.type in [
125 HdlFile.Type.SYSTEMVERILOG_SOURCE,
126 HdlFile.Type.SYSTEMVERILOG_HEADER,
127 ]:
128 system_verilog_files.append(hdl_file.path)
129 else:
130 raise NotImplementedError(f"Can not handle file: {hdl_file}")
131 # Encrypted source files (.vp?), etc, I do not know how
132 # to handle, since I have no use case for it at the moment.
134 if vhdl_files:
135 files_string = self._to_file_list(vhdl_files)
136 tcl += f'read_vhdl -library "{module.library_name}" -vhdl2008 {files_string}\n'
138 if verilog_files:
139 files_string = self._to_file_list(verilog_files)
140 tcl += f"read_verilog {files_string}\n"
142 if system_verilog_files:
143 files_string = self._to_file_list(system_verilog_files)
144 tcl += f"read_verilog -sv {files_string}\n"
146 return f"{tcl}\n"
148 @staticmethod
149 def _to_file_list(file_paths: list[Path]) -> str:
150 """
151 Return a TCL snippet for a file list, with each file enclosed in curly braces.
152 E.g. "{file1}" or "{{file1} {file2} {file3}}"
153 """
154 if len(file_paths) == 1:
155 files_string = to_tcl_path(file_paths[0])
156 else:
157 files_string = " ".join([f"{ {to_tcl_path(file_path)}} " for file_path in file_paths])
159 return f"{ {files_string}} "
161 @staticmethod
162 def _add_tcl_sources(tcl_sources: list[Path] | None) -> str:
163 if tcl_sources is None or len(tcl_sources) == 0:
164 return ""
166 tcl = """
167# ------------------------------------------------------------------------------
168"""
169 for tcl_source_file in tcl_sources:
170 tcl += f"source -notrace { {to_tcl_path(tcl_source_file)}} \n"
172 return f"{tcl}\n"
174 @staticmethod
175 def _add_ip_cores(modules: ModuleList, other_arguments: dict[str, Any]) -> str:
176 tcl = ""
177 for module in modules:
178 for ip_core_file in module.get_ip_core_files(**other_arguments):
179 create_function_name = f"create_ip_core_{ip_core_file.name}"
180 tcl += f"proc {create_function_name} { } { \n"
182 if ip_core_file.variables:
183 for key, value in ip_core_file.variables.items():
184 tcl += f' set {key} "{value}"\n'
186 tcl += f"""\
187 source -notrace { {to_tcl_path(ip_core_file.path)}}
188}
189{create_function_name}
191"""
192 if tcl == "":
193 return ""
195 return f"""
196# ------------------------------------------------------------------------------
197{tcl}\
198"""
200 def _add_build_step_hooks(
201 self, build_step_hooks: list[BuildStepTclHook] | None, project_folder: Path
202 ) -> str:
203 if build_step_hooks is None or len(build_step_hooks) == 0:
204 return ""
206 # There can be many hooks for the same step. Reorganize them into a dict.
207 hook_steps: dict[str, list[BuildStepTclHook]] = {}
208 for build_step_hook in build_step_hooks:
209 if build_step_hook.hook_step in hook_steps:
210 hook_steps[build_step_hook.hook_step].append(build_step_hook)
211 else:
212 hook_steps[build_step_hook.hook_step] = [build_step_hook]
214 tcl = """
215# ------------------------------------------------------------------------------
216"""
217 for step, hooks in hook_steps.items():
218 # Vivado will only accept one TCL script as hook for each step. So if we want
219 # to add more we have to create a new TCL file, that sources the other files,
220 # and add that as the hook to Vivado.
221 if len(hooks) == 1:
222 tcl_file = hooks[0].tcl_file
223 else:
224 tcl_file = project_folder / ("hook_" + step.replace(".", "_") + ".tcl")
225 source_hooks_tcl = "".join(
226 [f"source { {to_tcl_path(hook.tcl_file)}} \n" for hook in hooks]
227 )
228 create_file(tcl_file, source_hooks_tcl)
230 # Add to fileset to enable archive and other project based functionality
231 tcl += f'add_files -fileset "utils_1" -norecurse { {to_tcl_path(tcl_file)}} \n'
233 # Build step hook can only be applied to a run (e.g. impl_1), not on a project basis
234 run_wildcard = '"synth_*"' if hooks[0].step_is_synth else '"impl_*"'
235 tcl_block = f'set_property "{step}" { {to_tcl_path(tcl_file)}} ${ run} '
236 tcl += self._tcl_for_each_run(run_wildcard=run_wildcard, tcl_block=tcl_block)
238 return f"{tcl}\n"
240 def _add_project_settings(self) -> str:
241 tcl = """
242# ------------------------------------------------------------------------------
243"""
245 # Default value for when opening project in GUI.
246 # Will be overwritten if using build() function.
247 tcl += 'set_param "general.maxThreads" 7\n'
249 # Enable VHDL assert statements to be evaluated. A severity level of failure will
250 # stop the synthesis and produce an error.
251 tcl_block = 'set_property "STEPS.SYNTH_DESIGN.ARGS.ASSERT" true ${run}'
252 tcl += self._tcl_for_each_run(run_wildcard='"synth_*"', tcl_block=tcl_block)
254 # Enable binary bitstream as well
255 tcl_block = 'set_property "STEPS.WRITE_BITSTREAM.ARGS.BIN_FILE" true ${run}'
256 tcl += self._tcl_for_each_run(run_wildcard='"impl_*"', tcl_block=tcl_block)
258 return f"{tcl}\n"
260 @staticmethod
261 def _tcl_for_each_run(run_wildcard: str, tcl_block: str) -> str:
262 """
263 Apply TCL block for each defined run. Use ${run} for run variable in TCL.
264 """
265 # Apply indentation for all lines within the block.
266 tcl_block = tcl_block.replace("\n", "\n ")
268 return f"""\
269foreach run [get_runs {run_wildcard}] {
270 {tcl_block}
271}
272"""
274 @staticmethod
275 def _add_generics(generics: dict[str, Any] | None) -> str:
276 """
277 Generics are set according to this weird format:
278 https://www.xilinx.com/support/answers/52217.html
279 """
280 if not generics:
281 return ""
283 generic_list = []
284 for name, value in generics.items():
285 value_tcl_formatted = get_vivado_tcl_generic_value(value=value)
286 generic_list.append(f"{name}={value_tcl_formatted}")
288 generics_string = " ".join(generic_list)
289 return f"""
290# ------------------------------------------------------------------------------
291set_property "generic" { {generics_string}} [current_fileset]
293"""
295 @staticmethod
296 def _iterate_constraints(
297 modules: ModuleList,
298 constraints: list[Constraint] | None,
299 other_arguments: dict[str, Any],
300 ) -> Iterable[Constraint]:
301 for module in modules:
302 yield from module.get_scoped_constraints(**other_arguments)
304 if constraints is not None:
305 yield from constraints
307 @staticmethod
308 def _add_constraints(constraints: list[Constraint]) -> str:
309 if len(constraints) == 0:
310 return ""
312 tcl = """
313# ------------------------------------------------------------------------------
314"""
315 for constraint in constraints:
316 constraint_file = to_tcl_path(constraint.file)
318 ref_flags = "" if constraint.ref is None else (f'-ref "{constraint.ref}" ')
319 managed_flags = "" if constraint_file.endswith("xdc") else "-unmanaged "
320 tcl += f"read_xdc {ref_flags}{managed_flags}{ {constraint_file}} \n"
322 get_file = f"[get_files { {constraint_file}} ]"
323 tcl += (
324 'set_property "PROCESSING_ORDER" '
325 f'"{constraint.processing_order.upper()}" {get_file}\n'
326 )
328 if constraint.used_in == "impl":
329 tcl += f'set_property "USED_IN_SYNTHESIS" false {get_file}\n'
330 elif constraint.used_in == "synth":
331 tcl += f'set_property "USED_IN_IMPLEMENTATION" false {get_file}\n'
333 return f"{tcl}\n"
335 def build( # noqa: PLR0913
336 self,
337 project_file: Path,
338 output_path: Path,
339 num_threads: int,
340 run_index: int,
341 generics: dict[str, Any] | None = None,
342 synth_only: bool = False,
343 from_impl: bool = False,
344 impl_explore: bool = False,
345 analyze_synthesis_timing: bool = True,
346 ) -> str:
347 if impl_explore:
348 # For implementation explore, threads are divided to one each per job.
349 # Number of jobs in parallel are the number of threads specified for build.
350 # Clamp max threads between 1 and 32, which are allowed by Vivado 2018.3+.
351 num_threads_general = min(max(1, num_threads // NUM_VIVADO_STRATEGIES), 32)
352 else:
353 # Max value in Vivado 2018.3+. set_param will give an error if higher number.
354 num_threads_general = min(num_threads, 32)
356 num_threads_synth = min(num_threads, 8)
358 tcl = f"open_project { {to_tcl_path(project_file)}} \n"
359 tcl += f'set_param "general.maxThreads" {num_threads_general}\n'
360 tcl += f'set_param "synth.maxThreads" {num_threads_synth}\n\n'
361 tcl += self._add_generics(generics)
363 if not from_impl:
364 synth_run = f"synth_{run_index}"
366 tcl += self._synthesis(synth_run, num_threads, analyze_synthesis_timing)
368 if not synth_only:
369 impl_run = f"impl_{run_index}"
371 if impl_explore:
372 tcl += self._run_multiple(num_jobs=num_threads)
373 else:
374 tcl += self._run(impl_run, num_threads, to_step="write_bitstream")
376 tcl += self._write_hw_platform(output_path)
378 tcl += """
379# ------------------------------------------------------------------------------
380exit
381"""
382 return tcl
384 def _synthesis(self, run: str, num_threads: int, analyze_synthesis_timing: bool) -> str:
385 tcl = self._run(run, num_threads)
386 if not analyze_synthesis_timing:
387 return tcl
389 # For synthesis flow we perform the timing checks by opening the design. It would have
390 # been more efficient to use a post-synthesis hook (since the design would already be
391 # open), if that mechanism had worked. It seems to be very bugged. So we add the
392 # checkers to the build script.
393 # For implementation, we use a pre-bitstream build hook which seems to work decently.
394 #
395 # Timing checks such as setup/hold/pulse width violations, are not reliable after synthesis,
396 # and should not abort the build as we do below.
397 # These need to be checked after implementation.
398 tcl += """
399# ------------------------------------------------------------------------------
400open_run ${run}
401set run_directory [get_property "DIRECTORY" ${run}]
402set should_exit 0
405# ------------------------------------------------------------------------------
406# Generate report on simultaneous switching noise (SSN) for the design.
407# It seems safe to do this after synthesis; inspecting the reports in a test build after both
408# synthesis and implementation shows that the results are identical.
409# Will generate a "Designutils 20-923" message if noise margins are not met.
410# If the user would like this to fail the build, this message severity shall be raised to ERROR.
411# At the moment we do not know how stable this mechanism is, so we do not fail the build
412# per default.
413# The call is very fast (< 1s) so it is fine to run always, even though not everyone will use it.
414set current_part [get_property "PART" [current_project]]
415set part_supports_ssn [get_parts ${current_part} -filter {ssn_report == 1}]
416if {${part_supports_ssn} != ""} {
417 set output_file [file join ${run_directory} "report_ssn.html"]
418 report_ssn -phase -format html -file ${output_file}
419}
422# ------------------------------------------------------------------------------
423# This call is duplicated in 'report_utilization.tcl' for implementation.
424set output_file [file join ${run_directory} "hierarchical_utilization.rpt"]
425report_utilization -hierarchical -hierarchical_depth 4 -file ${output_file}
428# ------------------------------------------------------------------------------
429# This code is duplicated in 'check_timing.tcl' for implementation.
430set clock_interaction_report [
431 report_clock_interaction -delay_type "min_max" -no_header -return_string
432]
433if {[string first "(unsafe)" ${clock_interaction_report}] != -1} {
434 puts "ERROR: Unhandled clock crossing in ${run} run. See 'clock_interaction.rpt' and \
435'timing_summary.rpt' in ${run_directory}."
437 set output_file [file join ${run_directory} "clock_interaction.rpt"]
438 report_clock_interaction -delay_type min_max -file ${output_file}
440 set output_file [file join ${run_directory} "timing_summary.rpt"]
441 report_timing_summary -file ${output_file}
443 set should_exit 1
444}
447# ------------------------------------------------------------------------------
448# This code is duplicated in 'check_cdc.tcl' for implementation.
449# Check that there are no critical CDC rule violations in the design.
450# List of CDC rules: https://docs.amd.com/r/en-US/ug906-vivado-design-analysis/CDC-Rules-Precedence
451# If this makes your build fail on a false positive, you can waive the rule using the
452# 'create_waiver' command in a (scoped) constraint file.
453# Rules can be disable in general (not recommended), or for specific paths using the '-from'
454# and '-to' flags (recommended).
455set cdc_report [report_cdc -return_string -no_header -details -severity "Critical"]
456if {[string first "Critical" ${cdc_report}] != -1} {
457 set output_file [file join ${run_directory} "cdc.rpt"]
458 puts "ERROR: Critical CDC rule violation in ${run} run. See ${output_file}."
460 report_cdc -details -file ${output_file}
462 set should_exit 1
463}
466# ------------------------------------------------------------------------------
467if {${should_exit} eq 1} {
468 exit 1
469}
471"""
472 return tcl
474 @staticmethod
475 def _run(run: str, num_threads: int, to_step: str | None = None) -> str:
476 to_step = "" if to_step is None else f' -to_step "{to_step}"'
478 tcl = f"""
479# ------------------------------------------------------------------------------
480set run [get_runs "{run}"]
481reset_run ${ run}
482launch_runs ${ run} -jobs {num_threads}{to_step}
483"""
485 tcl += """
486wait_on_run ${run}
488if {[get_property "PROGRESS" ${run}] != "100%"} {
489 puts "ERROR: Run ${run} failed."
490 exit 1
491}
493"""
494 return tcl
496 def _run_multiple(self, num_jobs: int = 4, base_name: str = "impl_explore_") -> str:
497 """
498 Currently, this creates a .tcl that waits for all active runs to complete.
499 """
500 tcl = "\nset build_succeeded 0\n"
501 tcl += f'reset_runs [get_runs "{base_name}*"]\n'
502 tcl += (
503 f'launch_runs -jobs {num_jobs} [get_runs "{base_name}*"] -to_step "write_bitstream"\n'
504 )
505 tcl += "\n"
507 tcl += f'wait_on_runs -quiet -exit_condition ANY_ONE_MET_TIMING [get_runs "{base_name}*"]\n'
508 tcl += "\n"
510 tcl += 'reset_runs [get_runs -filter {STATUS == "Queued..."}]\n'
512 # Wait on runs that are still going, since Vivado can't kill runs in progress reliably.
513 # Killing runs in progress causes a zombie process which will lock up VUnit's Process class.
514 tcl += (
515 f'wait_on_runs -quiet [get_runs -filter { STATUS != "Not started"} "{base_name}*"]\n'
516 )
517 tcl += "\n"
519 tcl_block = """\
520set build_succeeded 1
521puts "Run ${run} met timing"\
522"""
523 tcl += self._tcl_for_each_run(
524 run_wildcard=f'-filter { PROGRESS == "100%"} "{base_name}*"', tcl_block=tcl_block
525 )
527 tcl += """
528if {${build_succeeded} eq 0} {
529 puts "No build met timing, exiting."
530 exit 1
531}
533"""
535 return tcl
537 def _write_hw_platform(self, output_path: Path) -> str:
538 """
539 TCL command to create a Xilinx support archive (.xsa) file, for use as a
540 hardware platform.
541 Used to be known as a "hdf" or "hwdef" file.
543 This is mainly used for Zynq devices to generate code to set up the PS at boot.
544 There is also code generated for each MicroBlaze that is present in the design.
545 If there is neither a block design nor a MicroBlaze available, the .xsa will be empty apart
546 from some info about which part is used, etc.
548 The '-quiet' flag is used since there was a Vivado bug observed in this very
549 specific scenario:
550 * Vivado 2022.1
551 * UltraScale+ non-Zynq device (i.e. no block design)
552 * Design contains MicroBlaze
553 * Design contains ILA
554 In this case the 'write_hw_platform' call would fail.
555 This bug might be present in other Vivado versions and in other scenarios as well.
556 Since this is a very fringe scenario, and it is unlikely that anyone would ever need the
557 .xsa file specifically from the ILA build, we allow the command to fail quietly.
558 """
559 xsa_file = to_tcl_path(output_path / f"{self.name}.xsa")
561 return f"""
562# ------------------------------------------------------------------------------
563puts "Creating hardware platform {xsa_file}..."
564write_hw_platform -fixed -force -quiet -include_bit { {xsa_file}}
566"""