Coverage for tsfpga/vivado/tcl.py: 97%
189 statements
« prev ^ index » next coverage.py v7.10.6, created at 2025-09-16 20:51 +0000
« prev ^ index » next coverage.py v7.10.6, created at 2025-09-16 20:51 +0000
1# --------------------------------------------------------------------------------------------------
2# Copyright (c) Lukas Vik. All rights reserved.
3#
4# This file is part of the tsfpga project, a project platform for modern FPGA development.
5# https://tsfpga.com
6# https://github.com/tsfpga/tsfpga
7# --------------------------------------------------------------------------------------------------
9from __future__ import annotations
11from typing import TYPE_CHECKING, Any
13from tsfpga.hdl_file import HdlFile
15from .common import to_tcl_path
16from .generics import BitVectorGenericValue, StringGenericValue, get_vivado_tcl_generic_value
18if TYPE_CHECKING:
19 from collections.abc import Iterable
20 from pathlib import Path
22 from tsfpga.build_step_tcl_hook import BuildStepTclHook
23 from tsfpga.constraint import Constraint
24 from tsfpga.module_list import ModuleList
27# Number of available Vivado implementation strategies
28NUM_VIVADO_STRATEGIES = 33
31class VivadoTcl:
32 """
33 Class with methods for translating a set of sources into Vivado TCL
34 """
36 def __init__(
37 self,
38 name: str,
39 ) -> None:
40 self.name = name
42 def create( # noqa: PLR0913
43 self,
44 project_folder: Path,
45 modules: ModuleList,
46 part: str,
47 top: str,
48 run_index: int,
49 generics: dict[str, bool | float | StringGenericValue | BitVectorGenericValue]
50 | None = None,
51 constraints: list[Constraint] | None = None,
52 tcl_sources: list[Path] | None = None,
53 build_step_hooks: dict[str, tuple[Path, list[BuildStepTclHook]]] | None = None,
54 ip_cache_path: Path | None = None,
55 disable_io_buffers: bool = True,
56 # Add no sources other than IP cores
57 ip_cores_only: bool = False,
58 # Will be passed on to module functions. Enables parameterization of e.g. IP cores.
59 other_arguments: dict[str, Any] | None = None,
60 ) -> str:
61 generics = {} if generics is None else generics
62 other_arguments = {} if other_arguments is None else other_arguments
64 tcl = f"""\
65create_project -part "{part}" "{self.name}" {{{to_tcl_path(project_folder)}}}
66set_property "target_language" "VHDL" [current_project]
68"""
69 if ip_cache_path is not None:
70 tcl += f"config_ip_cache -use_cache_location {{{to_tcl_path(ip_cache_path)}}}\n\n"
72 if not ip_cores_only:
73 tcl += self._add_module_source_files(modules=modules, other_arguments=other_arguments)
74 tcl += self._add_tcl_sources(tcl_sources)
75 tcl += self._add_generics(generics=generics)
77 constraints = list(
78 self._iterate_constraints(
79 modules=modules, constraints=constraints, other_arguments=other_arguments
80 )
81 )
82 tcl += self._add_constraints(constraints=constraints)
83 tcl += self._add_build_step_hooks(build_step_hooks=build_step_hooks)
85 tcl += self._add_ip_cores(modules=modules, other_arguments=other_arguments)
86 tcl += self._add_project_settings()
88 tcl += f"""
89# ------------------------------------------------------------------------------
90current_run [get_runs "synth_{run_index}"]
91set_property "top" "{top}" [current_fileset]
92reorder_files -auto -disable_unused
94"""
95 if disable_io_buffers:
96 tcl += f"""\
97set_property -name "STEPS.SYNTH_DESIGN.ARGS.MORE OPTIONS" \
98-value "-no_iobuf" -objects [get_runs "synth_{run_index}"]
100"""
101 tcl += """
102# ------------------------------------------------------------------------------
103exit
104"""
105 return tcl
107 def _add_module_source_files(self, modules: ModuleList, other_arguments: dict[str, Any]) -> str:
108 if len(modules) == 0:
109 return ""
111 tcl = """
112# ------------------------------------------------------------------------------
113"""
114 for module in modules:
115 vhdl_files = []
116 verilog_files = []
117 system_verilog_files = []
119 for hdl_file in module.get_synthesis_files(**other_arguments):
120 if hdl_file.type == HdlFile.Type.VHDL:
121 vhdl_files.append(hdl_file.path)
122 elif hdl_file.type in [HdlFile.Type.VERILOG_SOURCE, HdlFile.Type.VERILOG_HEADER]:
123 verilog_files.append(hdl_file.path)
124 elif hdl_file.type in [
125 HdlFile.Type.SYSTEMVERILOG_SOURCE,
126 HdlFile.Type.SYSTEMVERILOG_HEADER,
127 ]:
128 system_verilog_files.append(hdl_file.path)
129 else:
130 raise NotImplementedError(f"Can not handle file: {hdl_file}")
131 # Encrypted source files (.vp?), etc, I do not know how
132 # to handle, since I have no use case for it at the moment.
134 if vhdl_files:
135 files_string = self._to_file_list(vhdl_files)
136 tcl += f'read_vhdl -library "{module.library_name}" -vhdl2008 {files_string}\n'
138 if verilog_files:
139 files_string = self._to_file_list(verilog_files)
140 tcl += f"read_verilog {files_string}\n"
142 if system_verilog_files:
143 files_string = self._to_file_list(system_verilog_files)
144 tcl += f"read_verilog -sv {files_string}\n"
146 return f"{tcl}\n"
148 @staticmethod
149 def _to_file_list(file_paths: list[Path]) -> str:
150 """
151 Return a TCL snippet for a file list, with each file enclosed in curly braces.
152 E.g. "{file1}" or "{{file1} {file2} {file3}}"
153 """
154 if len(file_paths) == 1:
155 files_string = to_tcl_path(file_paths[0])
156 else:
157 files_string = " ".join([f"{{{to_tcl_path(file_path)}}}" for file_path in file_paths])
159 return f"{{{files_string}}}"
161 @staticmethod
162 def _add_tcl_sources(tcl_sources: list[Path] | None) -> str:
163 if tcl_sources is None or len(tcl_sources) == 0:
164 return ""
166 tcl = """
167# ------------------------------------------------------------------------------
168"""
169 for tcl_source_file in tcl_sources:
170 tcl += f"source -notrace {{{to_tcl_path(tcl_source_file)}}}\n"
172 return f"{tcl}\n"
174 @staticmethod
175 def _add_ip_cores(modules: ModuleList, other_arguments: dict[str, Any]) -> str:
176 tcl = ""
177 for module in modules:
178 for ip_core_file in module.get_ip_core_files(**other_arguments):
179 create_function_name = f"create_ip_core_{ip_core_file.name}"
180 tcl += f"proc {create_function_name} {{}} {{\n"
182 if ip_core_file.variables:
183 for key, value in ip_core_file.variables.items():
184 tcl += f' set {key} "{value}"\n'
186 tcl += f"""\
187 source -notrace {{{to_tcl_path(ip_core_file.path)}}}
188}}
189{create_function_name}
191"""
192 if tcl == "":
193 return ""
195 return f"""
196# ------------------------------------------------------------------------------
197{tcl}\
198"""
200 def _add_build_step_hooks(
201 self, build_step_hooks: dict[str, tuple[Path, list[BuildStepTclHook]]] | None
202 ) -> str:
203 if not build_step_hooks:
204 return ""
206 tcl = """
207# ------------------------------------------------------------------------------
208"""
209 for step_name, (tcl_file, hooks) in build_step_hooks.items():
210 # Add to file set to enable archive and other project-based functionality
211 tcl += f'add_files -fileset "utils_1" -norecurse {{{to_tcl_path(tcl_file)}}}\n'
213 # Build step hook can only be applied to a run (e.g. impl_1), not on a project basis
214 run_wildcard = '"synth_*"' if hooks[0].step_is_synth else '"impl_*"'
215 tcl_block = f'set_property "{step_name}" {{{to_tcl_path(tcl_file)}}} ${{run}}'
216 tcl += self._tcl_for_each_run(run_wildcard=run_wildcard, tcl_block=tcl_block)
218 return f"{tcl}\n"
220 def _add_project_settings(self) -> str:
221 tcl = """
222# ------------------------------------------------------------------------------
223"""
225 # Default value for when opening project in GUI.
226 # Will be overwritten if using build() function.
227 tcl += 'set_param "general.maxThreads" 7\n'
229 # Enable VHDL assert statements to be evaluated. A severity level of failure will
230 # stop the synthesis and produce an error.
231 tcl_block = 'set_property "STEPS.SYNTH_DESIGN.ARGS.ASSERT" true ${run}'
232 tcl += self._tcl_for_each_run(run_wildcard='"synth_*"', tcl_block=tcl_block)
234 # Enable binary bitstream as well
235 tcl_block = 'set_property "STEPS.WRITE_BITSTREAM.ARGS.BIN_FILE" true ${run}'
236 tcl += self._tcl_for_each_run(run_wildcard='"impl_*"', tcl_block=tcl_block)
238 return f"{tcl}\n"
240 @staticmethod
241 def _tcl_for_each_run(run_wildcard: str, tcl_block: str) -> str:
242 """
243 Apply TCL block for each defined run. Use ${run} for run variable in TCL.
244 """
245 # Apply indentation for all lines within the block.
246 tcl_block = tcl_block.replace("\n", "\n ")
248 return f"""\
249foreach run [get_runs {run_wildcard}] {{
250 {tcl_block}
251}}
252"""
254 @staticmethod
255 def _add_generics(
256 generics: dict[
257 str,
258 bool | float | StringGenericValue | BitVectorGenericValue,
259 ]
260 | None,
261 ) -> str:
262 """
263 Generics are set according to this weird format:
264 https://www.xilinx.com/support/answers/52217.html
265 """
266 if not generics:
267 return ""
269 generic_list = []
270 for name, value in generics.items():
271 value_tcl_formatted = get_vivado_tcl_generic_value(value=value)
272 generic_list.append(f"{name}={value_tcl_formatted}")
274 generics_string = " ".join(generic_list)
275 return f"""
276# ------------------------------------------------------------------------------
277set_property "generic" {{{generics_string}}} [current_fileset]
279"""
281 @staticmethod
282 def _iterate_constraints(
283 modules: ModuleList,
284 constraints: list[Constraint] | None,
285 other_arguments: dict[str, Any],
286 ) -> Iterable[Constraint]:
287 for module in modules:
288 yield from module.get_scoped_constraints(**other_arguments)
290 if constraints is not None:
291 yield from constraints
293 @staticmethod
294 def _add_constraints(constraints: list[Constraint]) -> str:
295 if len(constraints) == 0:
296 return ""
298 tcl = """
299# ------------------------------------------------------------------------------
300"""
301 for constraint in constraints:
302 constraint_file = to_tcl_path(constraint.file)
304 ref_flags = "" if constraint.ref is None else (f'-ref "{constraint.ref}" ')
305 managed_flags = "" if constraint_file.endswith("xdc") else "-unmanaged "
306 tcl += f"read_xdc {ref_flags}{managed_flags}{{{constraint_file}}}\n"
308 get_file = f"[get_files {{{constraint_file}}}]"
309 tcl += (
310 'set_property "PROCESSING_ORDER" '
311 f'"{constraint.processing_order.upper()}" {get_file}\n'
312 )
314 if not constraint.used_in_synthesis:
315 tcl += f'set_property "USED_IN_SYNTHESIS" false {get_file}\n'
316 if not constraint.used_in_implementation:
317 tcl += f'set_property "USED_IN_IMPLEMENTATION" false {get_file}\n'
319 return f"{tcl}\n"
321 def build( # noqa: PLR0913
322 self,
323 project_file: Path,
324 output_path: Path | None,
325 num_threads: int,
326 run_index: int,
327 generics: dict[str, bool | float | StringGenericValue | BitVectorGenericValue]
328 | None = None,
329 synth_only: bool = False,
330 from_impl: bool = False,
331 impl_explore: bool = False,
332 open_and_analyze_synthesized_design: bool = True,
333 ) -> str:
334 if impl_explore:
335 # For implementation explore, threads are divided to one each per job.
336 # Number of jobs in parallel are the number of threads specified for build.
337 # Clamp max threads between 1 and 32, which are allowed by Vivado 2018.3+.
338 num_threads_general = min(max(1, num_threads // NUM_VIVADO_STRATEGIES), 32)
339 else:
340 # Max value in Vivado 2018.3+. set_param will give an error if higher number.
341 num_threads_general = min(num_threads, 32)
343 num_threads_synth = min(num_threads, 8)
345 tcl = f"open_project {{{to_tcl_path(project_file)}}}\n"
346 tcl += f'set_param "general.maxThreads" {num_threads_general}\n'
347 tcl += f'set_param "synth.maxThreads" {num_threads_synth}\n\n'
348 tcl += self._add_generics(generics=generics)
350 if not from_impl:
351 synth_run = f"synth_{run_index}"
353 tcl += self._synthesis(
354 run=synth_run,
355 num_threads=num_threads,
356 open_and_analyze=open_and_analyze_synthesized_design,
357 )
359 if not synth_only:
360 impl_run = f"impl_{run_index}"
362 if impl_explore:
363 tcl += self._run_multiple(num_jobs=num_threads)
364 else:
365 tcl += self._run(impl_run, num_threads, to_step="write_bitstream")
367 if output_path is None:
368 raise ValueError("Output path must be set for implementation builds.")
369 tcl += self._write_hw_platform(output_path)
371 tcl += """
372# ------------------------------------------------------------------------------
373exit
374"""
375 return tcl
377 def _synthesis(self, run: str, num_threads: int, open_and_analyze: bool) -> str:
378 tcl = self._run(run=run, num_threads=num_threads)
379 if not open_and_analyze:
380 return tcl
382 # It would have been more efficient to use post-synthesis hooks (since the design would
383 # already be open), IF that mechanism had worked.
384 # It seems to be very bugged.
385 # So we add all these checks to the build script.
386 # For the implementation step, we use a pre-bitstream build hook which seems to work.
387 #
388 # Timing checks such as setup/hold/pulse width violations, are not reliable after synthesis,
389 # and should not abort the build.
390 # These need to be checked after implementation.
391 # Checks likes CDC or unhandled clock crossings, however, are reliable after synthesis,
392 # and hence we abort the build below if such issues are found.
393 tcl += """
394# ------------------------------------------------------------------------------
395open_run ${run}
396set run_directory [get_property "DIRECTORY" ${run}]
397set should_exit 0
400# ------------------------------------------------------------------------------
401# Generate report on simultaneous switching noise (SSN) for the design.
402# It seems safe to do this after synthesis; inspecting the reports in a test build after both
403# synthesis and implementation shows that the results are identical.
404# Will generate a "Designutils 20-923" message if noise margins are not met.
405# If the user would like this to fail the build, this message severity shall be raised to ERROR.
406# At the moment we do not know how stable this mechanism is, so we do not fail the build
407# per default.
408# The call is very fast (< 1s) so it is fine to run always, even though not everyone will use it.
409set current_part [get_property "PART" [current_project]]
410set part_supports_ssn [get_parts ${current_part} -filter {ssn_report == 1}]
411if {${part_supports_ssn} != ""} {
412 set output_file [file join ${run_directory} "report_ssn.html"]
413 report_ssn -phase -format html -file ${output_file}
414}
417# ------------------------------------------------------------------------------
418# This code is duplicated in 'check_timing.tcl' for implementation.
419set clock_interaction_report [
420 report_clock_interaction -delay_type "min_max" -no_header -return_string
421]
422if {[string first "(unsafe)" ${clock_interaction_report}] != -1} {
423 puts "ERROR: Unhandled clock crossing in ${run} run. See 'clock_interaction.rpt' and \
424'timing_summary.rpt' in ${run_directory}."
426 set output_file [file join ${run_directory} "clock_interaction.rpt"]
427 report_clock_interaction -delay_type min_max -file ${output_file}
429 set output_file [file join ${run_directory} "timing_summary.rpt"]
430 report_timing_summary -file ${output_file}
432 set should_exit 1
433}
436# ------------------------------------------------------------------------------
437# This code is duplicated in 'check_cdc.tcl' for implementation.
438# Check that there are no critical CDC rule violations in the design.
439# List of CDC rules: https://docs.amd.com/r/en-US/ug906-vivado-design-analysis/CDC-Rules-Precedence
440# If this makes your build fail on a false positive, you can waive the rule using the
441# 'create_waiver' command in a (scoped) constraint file.
442# Rules can be disable in general (not recommended), or for specific paths using the '-from'
443# and '-to' flags (recommended).
444set cdc_report [report_cdc -return_string -no_header -details -severity "Critical"]
445if {[string first "Critical" ${cdc_report}] != -1} {
446 set output_file [file join ${run_directory} "cdc.rpt"]
447 puts "ERROR: Critical CDC rule violation in ${run} run. See ${output_file}."
449 report_cdc -details -file ${output_file}
451 set should_exit 1
452}
455# ------------------------------------------------------------------------------
456# The below reports are used heavily by netlist builds, but do not really have a use case for
457# full builds.
458# The calls are very fast though (< 1s even on a decently sized design) so it is fine to run always.
460# This call is duplicated in 'report_logic_level_distribution.tcl'.
461set output_file [file join ${run_directory} "logic_level_distribution.rpt"]
462report_design_analysis -logic_level_distribution -file ${output_file}
464# This call is duplicated in 'report_utilization.tcl' for implementation.
465set output_file [file join ${run_directory} "hierarchical_utilization.rpt"]
466report_utilization -hierarchical -hierarchical_depth 4 -file ${output_file}
468set output_file [file join ${run_directory} "timing.rpt"]
469report_timing -setup -no_header -file ${output_file}
472# ------------------------------------------------------------------------------
473if {${should_exit} eq 1} {
474 exit 1
475}
477"""
478 return tcl
480 @staticmethod
481 def _run(run: str, num_threads: int, to_step: str | None = None) -> str:
482 to_step = "" if to_step is None else f' -to_step "{to_step}"'
484 tcl = f"""
485# ------------------------------------------------------------------------------
486set run [get_runs "{run}"]
487reset_run ${{run}}
488launch_runs ${{run}} -jobs {num_threads}{to_step}
489"""
491 tcl += """
492wait_on_run ${run}
494if {[get_property "PROGRESS" ${run}] != "100%"} {
495 puts "ERROR: Run ${run} failed."
496 exit 1
497}
499"""
500 return tcl
502 def _run_multiple(self, num_jobs: int = 4, base_name: str = "impl_explore_") -> str:
503 """
504 Currently, this creates a .tcl that waits for all active runs to complete.
505 """
506 tcl = "\nset build_succeeded 0\n"
507 tcl += f'reset_runs [get_runs "{base_name}*"]\n'
508 tcl += (
509 f'launch_runs -jobs {num_jobs} [get_runs "{base_name}*"] -to_step "write_bitstream"\n'
510 )
511 tcl += "\n"
513 tcl += f'wait_on_runs -quiet -exit_condition ANY_ONE_MET_TIMING [get_runs "{base_name}*"]\n'
514 tcl += "\n"
516 tcl += 'reset_runs [get_runs -filter {STATUS == "Queued..."}]\n'
518 # Wait on runs that are still going, since Vivado can't kill runs in progress reliably.
519 # Killing runs in progress causes a zombie process which will lock up VUnit's Process class.
520 tcl += (
521 f'wait_on_runs -quiet [get_runs -filter {{STATUS != "Not started"}} "{base_name}*"]\n'
522 )
523 tcl += "\n"
525 tcl_block = """\
526set build_succeeded 1
527puts "Run ${run} met timing"\
528"""
529 tcl += self._tcl_for_each_run(
530 run_wildcard=f'-filter {{PROGRESS == "100%"}} "{base_name}*"', tcl_block=tcl_block
531 )
533 tcl += """
534if {${build_succeeded} eq 0} {
535 puts "No build met timing, exiting."
536 exit 1
537}
539"""
541 return tcl
543 def _write_hw_platform(self, output_path: Path) -> str:
544 """
545 TCL command to create a Xilinx support archive (.xsa) file, for use as a
546 hardware platform.
547 Used to be known as a "hdf" or "hwdef" file.
549 This is mainly used for Zynq devices to generate code to set up the PS at boot.
550 There is also code generated for each MicroBlaze that is present in the design.
551 If there is neither a block design nor a MicroBlaze available, the .xsa will be empty apart
552 from some info about which part is used, etc.
554 The '-quiet' flag is used since there was a Vivado bug observed in this very
555 specific scenario:
556 * Vivado 2022.1
557 * UltraScale+ non-Zynq device (i.e. no block design)
558 * Design contains MicroBlaze
559 * Design contains ILA
560 In this case the 'write_hw_platform' call would fail.
561 This bug might be present in other Vivado versions and in other scenarios as well.
562 Since this is a very fringe scenario, and it is unlikely that anyone would ever need the
563 .xsa file specifically from the ILA build, we allow the command to fail quietly.
564 """
565 xsa_file = to_tcl_path(output_path / f"{self.name}.xsa")
567 return f"""
568# ------------------------------------------------------------------------------
569puts "Creating hardware platform {xsa_file}..."
570write_hw_platform -fixed -force -quiet -include_bit {{{xsa_file}}}
572"""