Coverage for tsfpga/vivado/tcl.py: 95%

203 statements  

« prev     ^ index     » next       coverage.py v7.6.9, created at 2024-12-20 20:52 +0000

1# -------------------------------------------------------------------------------------------------- 

2# Copyright (c) Lukas Vik. All rights reserved. 

3# 

4# This file is part of the tsfpga project, a project platform for modern FPGA development. 

5# https://tsfpga.com 

6# https://github.com/tsfpga/tsfpga 

7# -------------------------------------------------------------------------------------------------- 

8 

9# Standard libraries 

10from pathlib import Path 

11from typing import TYPE_CHECKING, Any, Iterable, Optional 

12 

13# First party libraries 

14from tsfpga.hdl_file import HdlFile 

15from tsfpga.system_utils import create_file 

16 

17# Local folder libraries 

18from .common import to_tcl_path 

19from .generics import get_vivado_tcl_generic_value 

20 

21if TYPE_CHECKING: 

22 # First party libraries 

23 from tsfpga.build_step_tcl_hook import BuildStepTclHook 

24 from tsfpga.constraint import Constraint 

25 from tsfpga.module_list import ModuleList 

26 

27 

28# Number of available Vivado implementation strategies 

29NUM_VIVADO_STRATEGIES = 33 

30 

31 

32class VivadoTcl: 

33 """ 

34 Class with methods for translating a set of sources into Vivado TCL 

35 """ 

36 

37 def __init__( 

38 self, 

39 name: str, 

40 ) -> None: 

41 self.name = name 

42 

43 # pylint: disable=too-many-arguments 

44 def create( 

45 self, 

46 project_folder: Path, 

47 modules: "ModuleList", 

48 part: str, 

49 top: str, 

50 run_index: int, 

51 generics: Optional[dict[str, str]] = None, 

52 constraints: Optional[list["Constraint"]] = None, 

53 tcl_sources: Optional[list[Path]] = None, 

54 build_step_hooks: Optional[list["BuildStepTclHook"]] = None, 

55 ip_cache_path: Optional[Path] = None, 

56 disable_io_buffers: bool = True, 

57 # Add no sources other than IP cores 

58 ip_cores_only: bool = False, 

59 # Will be passed on to module functions. Enables parameterization of e.g. IP cores. 

60 other_arguments: Optional[dict[str, Any]] = None, 

61 ) -> str: 

62 generics = {} if generics is None else generics 

63 other_arguments = {} if other_arguments is None else other_arguments 

64 

65 tcl = f"""\ 

66create_project -part "{part}" "{self.name}" { {to_tcl_path(project_folder)}} 

67set_property "target_language" "VHDL" [current_project] 

68 

69""" 

70 if ip_cache_path is not None: 

71 tcl += f"config_ip_cache -use_cache_location { {to_tcl_path(ip_cache_path)}} \n\n" 

72 

73 if not ip_cores_only: 

74 tcl += self._add_module_source_files(modules=modules, other_arguments=other_arguments) 

75 tcl += self._add_tcl_sources(tcl_sources) 

76 tcl += self._add_generics(generics) 

77 

78 constraints = list( 

79 self._iterate_constraints( 

80 modules=modules, constraints=constraints, other_arguments=other_arguments 

81 ) 

82 ) 

83 tcl += self._add_constraints(constraints=constraints) 

84 tcl += self._add_build_step_hooks(build_step_hooks, project_folder) 

85 

86 tcl += self._add_ip_cores(modules=modules, other_arguments=other_arguments) 

87 tcl += self._add_project_settings() 

88 

89 tcl += f""" 

90# ------------------------------------------------------------------------------ 

91current_run [get_runs "synth_{run_index}"] 

92set_property "top" "{top}" [current_fileset] 

93reorder_files -auto -disable_unused 

94 

95""" 

96 if disable_io_buffers: 

97 tcl += f"""\ 

98set_property -name "STEPS.SYNTH_DESIGN.ARGS.MORE OPTIONS" \ 

99-value "-no_iobuf" -objects [get_runs "synth_{run_index}"] 

100 

101""" 

102 tcl += """ 

103# ------------------------------------------------------------------------------ 

104exit 

105""" 

106 return tcl 

107 

108 def _add_module_source_files( 

109 self, modules: "ModuleList", other_arguments: dict[str, Any] 

110 ) -> str: 

111 if len(modules) == 0: 

112 return "" 

113 

114 tcl = """ 

115# ------------------------------------------------------------------------------ 

116""" 

117 for module in modules: 

118 vhdl_files = [] 

119 verilog_files = [] 

120 system_verilog_files = [] 

121 

122 for hdl_file in module.get_synthesis_files(**other_arguments): 

123 if hdl_file.type == HdlFile.Type.VHDL: 

124 vhdl_files.append(hdl_file.path) 

125 elif hdl_file.type in [HdlFile.Type.VERILOG_SOURCE, HdlFile.Type.VERILOG_HEADER]: 

126 verilog_files.append(hdl_file.path) 

127 elif hdl_file.type in [ 

128 HdlFile.Type.SYSTEMVERILOG_SOURCE, 

129 HdlFile.Type.SYSTEMVERILOG_HEADER, 

130 ]: 

131 system_verilog_files.append(hdl_file.path) 

132 else: 

133 raise NotImplementedError(f"Can not handle file: {hdl_file}") 

134 # Encrypted source files (.vp?), etc, I do not know how 

135 # to handle, since I have no use case for it at the moment. 

136 

137 if vhdl_files: 

138 files_string = self._to_file_list(vhdl_files) 

139 tcl += f'read_vhdl -library "{module.library_name}" -vhdl2008 {files_string}\n' 

140 

141 if verilog_files: 

142 files_string = self._to_file_list(verilog_files) 

143 tcl += f"read_verilog {files_string}\n" 

144 

145 if system_verilog_files: 

146 files_string = self._to_file_list(system_verilog_files) 

147 tcl += f"read_verilog -sv {files_string}\n" 

148 

149 return f"{tcl}\n" 

150 

151 @staticmethod 

152 def _to_file_list(file_paths: list[Path]) -> str: 

153 """ 

154 Return a TCL snippet for a file list, with each file enclosed in curly braces. 

155 E.g. "{file1}" or "{{file1} {file2} {file3}}" 

156 """ 

157 if len(file_paths) == 1: 

158 files_string = to_tcl_path(file_paths[0]) 

159 else: 

160 files_string = " ".join([f"{ {to_tcl_path(file_path)}} " for file_path in file_paths]) 

161 

162 return f"{ {files_string}} " 

163 

164 @staticmethod 

165 def _add_tcl_sources(tcl_sources: Optional[list[Path]]) -> str: 

166 if tcl_sources is None or len(tcl_sources) == 0: 

167 return "" 

168 

169 tcl = """ 

170# ------------------------------------------------------------------------------ 

171""" 

172 for tcl_source_file in tcl_sources: 

173 tcl += f"source -notrace { {to_tcl_path(tcl_source_file)}} \n" 

174 

175 return f"{tcl}\n" 

176 

177 @staticmethod 

178 def _add_ip_cores(modules: "ModuleList", other_arguments: dict[str, Any]) -> str: 

179 tcl = "" 

180 for module in modules: 

181 for ip_core_file in module.get_ip_core_files(**other_arguments): 

182 create_function_name = f"create_ip_core_{ip_core_file.name}" 

183 tcl += f"proc {create_function_name} { } { \n" 

184 

185 if ip_core_file.variables: 

186 for key, value in ip_core_file.variables.items(): 

187 tcl += f' set {key} "{value}"\n' 

188 

189 tcl += f"""\ 

190 source -notrace { {to_tcl_path(ip_core_file.path)}} 

191} 

192{create_function_name} 

193 

194""" 

195 if tcl == "": 

196 return "" 

197 

198 return f""" 

199# ------------------------------------------------------------------------------ 

200{tcl}\ 

201""" 

202 

203 def _add_build_step_hooks( 

204 self, build_step_hooks: Optional[list["BuildStepTclHook"]], project_folder: Path 

205 ) -> str: 

206 if build_step_hooks is None or len(build_step_hooks) == 0: 

207 return "" 

208 

209 # There can be many hooks for the same step. Reorganize them into a dict. 

210 hook_steps: dict[str, list["BuildStepTclHook"]] = {} 

211 for build_step_hook in build_step_hooks: 

212 if build_step_hook.hook_step in hook_steps: 

213 hook_steps[build_step_hook.hook_step].append(build_step_hook) 

214 else: 

215 hook_steps[build_step_hook.hook_step] = [build_step_hook] 

216 

217 tcl = """ 

218# ------------------------------------------------------------------------------ 

219""" 

220 for step, hooks in hook_steps.items(): 

221 # Vivado will only accept one TCL script as hook for each step. So if we want 

222 # to add more we have to create a new TCL file, that sources the other files, 

223 # and add that as the hook to Vivado. 

224 if len(hooks) == 1: 

225 tcl_file = hooks[0].tcl_file 

226 else: 

227 tcl_file = project_folder / ("hook_" + step.replace(".", "_") + ".tcl") 

228 source_hooks_tcl = "".join( 

229 [f"source { {to_tcl_path(hook.tcl_file)}} \n" for hook in hooks] 

230 ) 

231 create_file(tcl_file, source_hooks_tcl) 

232 

233 # Add to fileset to enable archive and other project based functionality 

234 tcl += f'add_files -fileset "utils_1" -norecurse { {to_tcl_path(tcl_file)}} \n' 

235 

236 # Build step hook can only be applied to a run (e.g. impl_1), not on a project basis 

237 run_wildcard = '"synth_*"' if hooks[0].step_is_synth else '"impl_*"' 

238 tcl_block = f'set_property "{step}" { {to_tcl_path(tcl_file)}} ${ run} ' 

239 tcl += self._tcl_for_each_run(run_wildcard=run_wildcard, tcl_block=tcl_block) 

240 

241 return f"{tcl}\n" 

242 

243 def _add_project_settings(self) -> str: 

244 tcl = """ 

245# ------------------------------------------------------------------------------ 

246""" 

247 

248 # Default value for when opening project in GUI. 

249 # Will be overwritten if using build() function. 

250 tcl += 'set_param "general.maxThreads" 7\n' 

251 

252 # Enable VHDL assert statements to be evaluated. A severity level of failure will 

253 # stop the synthesis and produce an error. 

254 tcl_block = 'set_property "STEPS.SYNTH_DESIGN.ARGS.ASSERT" true ${run}' 

255 tcl += self._tcl_for_each_run(run_wildcard='"synth_*"', tcl_block=tcl_block) 

256 

257 # Enable binary bitstream as well 

258 tcl_block = 'set_property "STEPS.WRITE_BITSTREAM.ARGS.BIN_FILE" true ${run}' 

259 tcl += self._tcl_for_each_run(run_wildcard='"impl_*"', tcl_block=tcl_block) 

260 

261 return f"{tcl}\n" 

262 

263 @staticmethod 

264 def _tcl_for_each_run(run_wildcard: str, tcl_block: str) -> str: 

265 """ 

266 Apply TCL block for each defined run. Use ${run} for run variable in TCL. 

267 """ 

268 # Apply indentation for all lines within the block. 

269 tcl_block = tcl_block.replace("\n", "\n ") 

270 

271 return f"""\ 

272foreach run [get_runs {run_wildcard}] { 

273 {tcl_block} 

274} 

275""" 

276 

277 @staticmethod 

278 def _add_generics(generics: Optional[dict[str, Any]]) -> str: 

279 """ 

280 Generics are set according to this weird format: 

281 https://www.xilinx.com/support/answers/52217.html 

282 """ 

283 if not generics: 

284 return "" 

285 

286 generic_list = [] 

287 for name, value in generics.items(): 

288 value_tcl_formatted = get_vivado_tcl_generic_value(value=value) 

289 generic_list.append(f"{name}={value_tcl_formatted}") 

290 

291 generics_string = " ".join(generic_list) 

292 return f""" 

293# ------------------------------------------------------------------------------ 

294set_property "generic" { {generics_string}} [current_fileset] 

295 

296""" 

297 

298 @staticmethod 

299 def _iterate_constraints( 

300 modules: "ModuleList", 

301 constraints: Optional[list["Constraint"]], 

302 other_arguments: dict[str, Any], 

303 ) -> Iterable["Constraint"]: 

304 for module in modules: 

305 yield from module.get_scoped_constraints(**other_arguments) 

306 

307 if constraints is not None: 

308 yield from constraints 

309 

310 @staticmethod 

311 def _add_constraints(constraints: list["Constraint"]) -> str: 

312 if len(constraints) == 0: 

313 return "" 

314 

315 tcl = """ 

316# ------------------------------------------------------------------------------ 

317""" 

318 for constraint in constraints: 

319 constraint_file = to_tcl_path(constraint.file) 

320 

321 ref_flags = "" if constraint.ref is None else (f'-ref "{constraint.ref}" ') 

322 managed_flags = "" if constraint_file.endswith("xdc") else "-unmanaged " 

323 tcl += f"read_xdc {ref_flags}{managed_flags}{ {constraint_file}} \n" 

324 

325 get_file = f"[get_files { {constraint_file}} ]" 

326 tcl += ( 

327 'set_property "PROCESSING_ORDER" ' 

328 f'"{constraint.processing_order.upper()}" {get_file}\n' 

329 ) 

330 

331 if constraint.used_in == "impl": 

332 tcl += f'set_property "USED_IN_SYNTHESIS" false {get_file}\n' 

333 elif constraint.used_in == "synth": 

334 tcl += f'set_property "USED_IN_IMPLEMENTATION" false {get_file}\n' 

335 

336 return f"{tcl}\n" 

337 

338 def build( 

339 self, 

340 project_file: Path, 

341 output_path: Path, 

342 num_threads: int, 

343 run_index: int, 

344 generics: Optional[dict[str, Any]] = None, 

345 synth_only: bool = False, 

346 from_impl: bool = False, 

347 impl_explore: bool = False, 

348 analyze_synthesis_timing: bool = True, 

349 ) -> str: 

350 if impl_explore: 

351 # For implementation explore, threads are divided to one each per job. 

352 # Number of jobs in parallel are the number of threads specified for build. 

353 # Clamp max threads between 1 and 32, which are allowed by Vivado 2018.3+. 

354 num_threads_general = min(max(1, num_threads // NUM_VIVADO_STRATEGIES), 32) 

355 else: 

356 # Max value in Vivado 2018.3+. set_param will give an error if higher number. 

357 num_threads_general = min(num_threads, 32) 

358 

359 num_threads_synth = min(num_threads, 8) 

360 

361 tcl = f"open_project { {to_tcl_path(project_file)}} \n" 

362 tcl += f'set_param "general.maxThreads" {num_threads_general}\n' 

363 tcl += f'set_param "synth.maxThreads" {num_threads_synth}\n\n' 

364 tcl += self._add_generics(generics) 

365 

366 if not from_impl: 

367 synth_run = f"synth_{run_index}" 

368 

369 tcl += self._synthesis(synth_run, num_threads, analyze_synthesis_timing) 

370 

371 if not synth_only: 

372 impl_run = f"impl_{run_index}" 

373 

374 if impl_explore: 

375 tcl += self._run_multiple(num_jobs=num_threads) 

376 else: 

377 tcl += self._run(impl_run, num_threads, to_step="write_bitstream") 

378 

379 tcl += self._write_hw_platform(output_path) 

380 

381 tcl += """ 

382# ------------------------------------------------------------------------------ 

383exit 

384""" 

385 return tcl 

386 

387 def _synthesis(self, run: str, num_threads: int, analyze_synthesis_timing: bool) -> str: 

388 tcl = self._run(run, num_threads) 

389 if not analyze_synthesis_timing: 

390 return tcl 

391 

392 # For synthesis flow we perform the timing checks by opening the design. It would have 

393 # been more efficient to use a post-synthesis hook (since the design would already be 

394 # open), if that mechanism had worked. It seems to be very bugged. So we add the 

395 # checkers to the build script. 

396 # For implementation, we use a pre-bitstream build hook which seems to work decently. 

397 # 

398 # Timing checks such as setup/hold/pulse width violations, are not reliable after synthesis, 

399 # and should not abort the build as we do below. 

400 # These need to be checked after implementation. 

401 tcl += """ 

402# ------------------------------------------------------------------------------ 

403open_run ${run} 

404set run_directory [get_property "DIRECTORY" ${run}] 

405set should_exit 0 

406 

407 

408# ------------------------------------------------------------------------------ 

409# Generate report on simultaneous switching noise (SSN) for the design. 

410# It seems safe to do this after synthesis; inspecting the reports in a test build after both 

411# synthesis and implementation shows that the results are identical. 

412# Will generate a "Designutils 20-923" message if noise margins are not met. 

413# If the user would like this to fail the build, this message severity shall be raised to ERROR. 

414# At the moment we do not know how stable this mechanism is, so we do not fail the build 

415# per default. 

416# The call is very fast (< 1s) so it is fine to run always, even though not everyone will use it. 

417set current_part [get_property "PART" [current_project]] 

418set part_supports_ssn [get_parts ${current_part} -filter {ssn_report == 1}] 

419if {${part_supports_ssn} != ""} { 

420 set output_file [file join ${run_directory} "report_ssn.html"] 

421 report_ssn -phase -format html -file ${output_file} 

422} 

423 

424 

425# ------------------------------------------------------------------------------ 

426# This call is duplicated in 'report_utilization.tcl' for implementation. 

427set output_file [file join ${run_directory} "hierarchical_utilization.rpt"] 

428report_utilization -hierarchical -hierarchical_depth 4 -file ${output_file} 

429 

430 

431# ------------------------------------------------------------------------------ 

432# This code is duplicated in 'check_timing.tcl' for implementation. 

433set clock_interaction_report [ 

434 report_clock_interaction -delay_type "min_max" -no_header -return_string 

435] 

436if {[string first "(unsafe)" ${clock_interaction_report}] != -1} { 

437 puts "ERROR: Unhandled clock crossing in ${run} run. See 'clock_interaction.rpt' and \ 

438'timing_summary.rpt' in ${run_directory}." 

439 

440 set output_file [file join ${run_directory} "clock_interaction.rpt"] 

441 report_clock_interaction -delay_type min_max -file ${output_file} 

442 

443 set output_file [file join ${run_directory} "timing_summary.rpt"] 

444 report_timing_summary -file ${output_file} 

445 

446 set should_exit 1 

447} 

448 

449 

450# ------------------------------------------------------------------------------ 

451# This code is duplicated in 'check_cdc.tcl' for implementation. 

452# Check that there are no critical CDC rule violations in the design. 

453# List of CDC rules: https://docs.amd.com/r/en-US/ug906-vivado-design-analysis/CDC-Rules-Precedence 

454# If this makes your build fail on a false positive, you can waive the rule using the 

455# 'create_waiver' command in a (scoped) constraint file. 

456# Rules can be disable in general (not recommended), or for specific paths using the '-from' 

457# and '-to' flags (recommended). 

458set cdc_report [report_cdc -return_string -no_header -details -severity "Critical"] 

459if {[string first "Critical" ${cdc_report}] != -1} { 

460 set output_file [file join ${run_directory} "cdc.rpt"] 

461 puts "ERROR: Critical CDC rule violation in ${run} run. See ${output_file}." 

462 

463 report_cdc -details -file ${output_file} 

464 

465 set should_exit 1 

466} 

467 

468 

469# ------------------------------------------------------------------------------ 

470if {${should_exit} eq 1} { 

471 exit 1 

472} 

473 

474""" 

475 return tcl 

476 

477 @staticmethod 

478 def _run(run: str, num_threads: int, to_step: Optional[str] = None) -> str: 

479 to_step = "" if to_step is None else f' -to_step "{to_step}"' 

480 

481 tcl = f""" 

482# ------------------------------------------------------------------------------ 

483set run [get_runs "{run}"] 

484reset_run ${ run} 

485launch_runs ${ run} -jobs {num_threads}{to_step} 

486""" 

487 

488 tcl += """ 

489wait_on_run ${run} 

490 

491if {[get_property "PROGRESS" ${run}] != "100%"} { 

492 puts "ERROR: Run ${run} failed." 

493 exit 1 

494} 

495 

496""" 

497 return tcl 

498 

499 def _run_multiple(self, num_jobs: int = 4, base_name: str = "impl_explore_") -> str: 

500 """ 

501 Currently, this creates a .tcl that waits for all active runs to complete. 

502 """ 

503 tcl = "\nset build_succeeded 0\n" 

504 tcl += f'reset_runs [get_runs "{base_name}*"]\n' 

505 tcl += ( 

506 f'launch_runs -jobs {num_jobs} [get_runs "{base_name}*"] -to_step "write_bitstream"\n' 

507 ) 

508 tcl += "\n" 

509 

510 tcl += f'wait_on_runs -quiet -exit_condition ANY_ONE_MET_TIMING [get_runs "{base_name}*"]\n' 

511 tcl += "\n" 

512 

513 tcl += 'reset_runs [get_runs -filter {STATUS == "Queued..."}]\n' 

514 

515 # Wait on runs that are still going, since Vivado can't kill runs in progress reliably. 

516 # Killing runs in progress causes a zombie process which will lock up VUnit's Process class. 

517 tcl += ( 

518 f'wait_on_runs -quiet [get_runs -filter { STATUS != "Not started"} "{base_name}*"]\n' 

519 ) 

520 tcl += "\n" 

521 

522 tcl_block = """\ 

523set build_succeeded 1 

524puts "Run ${run} met timing"\ 

525""" 

526 tcl += self._tcl_for_each_run( 

527 run_wildcard=f'-filter { PROGRESS == "100%"} "{base_name}*"', tcl_block=tcl_block 

528 ) 

529 

530 tcl += """ 

531if {${build_succeeded} eq 0} { 

532 puts "No build met timing, exiting." 

533 exit 1 

534} 

535 

536""" 

537 

538 return tcl 

539 

540 def _write_hw_platform(self, output_path: Path) -> str: 

541 """ 

542 TCL command to create a Xilinx support archive (.xsa) file, for use as a 

543 hardware platform. 

544 Used to be known as a "hdf" or "hwdef" file. 

545 

546 This is mainly used for Zynq devices to generate code to set up the PS at boot. 

547 There is also code generated for each MicroBlaze that is present in the design. 

548 If there is neither a block design nor a MicroBlaze available, the .xsa will be empty apart 

549 from some info about which part is used, etc. 

550 

551 The '-quiet' flag is used since there was a Vivado bug observed in this very 

552 specific scenario: 

553 * Vivado 2022.1 

554 * UltraScale+ non-Zynq device (i.e. no block design) 

555 * Design contains MicroBlaze 

556 * Design contains ILA 

557 In this case the 'write_hw_platform' call would fail. 

558 This bug might be present in other Vivado versions and in other scenarios as well. 

559 Since this is a very fringe scenario, and it is unlikely that anyone would ever need the 

560 .xsa file specifically from the ILA build, we allow the command to fail quietly. 

561 """ 

562 xsa_file = to_tcl_path(output_path / f"{self.name}.xsa") 

563 

564 tcl = f""" 

565# ------------------------------------------------------------------------------ 

566puts "Creating hardware platform {xsa_file}..." 

567write_hw_platform -fixed -force -quiet -include_bit { {xsa_file}} 

568 

569""" 

570 

571 return tcl