Merge pull request #1497 from firesim/ntnu-integration

Bare Xilinx U250/U280 shell support
This commit is contained in:
Abraham Gonzalez 2023-05-09 13:12:30 -07:00 committed by GitHub
commit 6a69e3912f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
123 changed files with 5193 additions and 853 deletions

View File

@ -11,13 +11,15 @@ def run_docs_generated_components_check():
with cd(manager_fsim_dir), prefix('source sourceme-f1-manager.sh'):
with prefix("cd deploy"):
run("cat config_runtime.yaml")
run("cat ../docs/Running-Simulations-Tutorial/DOCS_EXAMPLE_config_runtime.yaml")
run("diff config_runtime.yaml ../docs/Running-Simulations-Tutorial/DOCS_EXAMPLE_config_runtime.yaml")
path = 'docs/Getting-Started-Guides/AWS-EC2-F1-Tutorial/Running-Simulations-Tutorial/DOCS_EXAMPLE_config_runtime.yaml'
run(f"cat ../{path}")
run(f"diff config_runtime.yaml ../{path}")
run("firesim --help")
run("cat ../docs/Advanced-Usage/Manager/HELP_OUTPUT")
path = "docs/Advanced-Usage/Manager/HELP_OUTPUT"
run(f"cat ../{path}")
run("firesim --help &> TEMP_HELP_OUTPUT")
run("cat TEMP_HELP_OUTPUT")
run("diff TEMP_HELP_OUTPUT ../docs/Advanced-Usage/Manager/HELP_OUTPUT")
run(f"diff TEMP_HELP_OUTPUT ../{path}")
if __name__ == "__main__":
set_fabric_firesim_pem()

View File

@ -21,8 +21,9 @@ def run_docs_generated_components_check():
run("firesim managerinit --platform vitis")
with prefix("cd deploy"):
run("cat config_runtime.yaml")
run("cat ../docs/Running-OnPrem-Simulations-Tutorial/DOCS_EXAMPLE_config_runtime.yaml")
run("diff config_runtime.yaml ../docs/Running-OnPrem-Simulations-Tutorial/DOCS_EXAMPLE_config_runtime.yaml")
path = "docs/Getting-Started-Guides/On-Premises-FPGA-Tutorial/Running-Simulations/DOCS_EXAMPLE_config_runtime.yaml"
run(f"cat ../{path}")
run(f"diff config_runtime.yaml ../{path}")
if __name__ == "__main__":
execute(run_docs_generated_components_check, hosts=["localhost"])

View File

@ -394,28 +394,24 @@ jobs:
name: documentation-check
needs: change-filters
runs-on: ubuntu-20.04
container:
image: firesim/firesim-ci:v1.3
options: --entrypoint /bin/bash
env:
JVM_MEMORY: 3500M # Default JVM maximum heap limit
defaults:
run:
shell: bash -el {0}
steps:
- uses: actions/checkout@v3
- uses: ./.github/actions/job-start
id: job-start
- name: Check that documentation builds with no warnings/errors
if: steps.job-start.outputs.run_result != 'success'
- uses: conda-incubator/setup-miniconda@v2
with:
environment-file: conda-reqs/docs.yaml
miniforge-version: latest
- name: Check that documentation builds with selective warnings/errors
run: |
sudo yum update -y
sudo yum install -y python3-pip make
sudo pip3 install -r docs/requirements.txt
make -C docs html
! grep -v "ERROR: Undefined substitution referenced" warnings.txt
- name: Show error log and dump objects.inv from sphinx if failed
if: ${{ steps.job-start.outputs.run_result != 'success' && failure() }}
if: ${{ failure() }}
run: |
python3 -m sphinx.ext.intersphinx docs/_build/html/objects.inv
cat /tmp/sphinx-err*.log
- uses: ./.github/actions/job-end
cpp-lint:
name: cpp-lint

3
.gitignore vendored
View File

@ -10,3 +10,6 @@ build
scala-doc-env.sh
*.swp
/.conda-env
.metals
vivado*.log
vivado*.jou

View File

@ -3,9 +3,7 @@ version: 2
build:
os: ubuntu-20.04
tools:
python: "3.6"
formats: all
python: "mambaforge-4.10"
sphinx:
configuration: docs/conf.py
@ -13,6 +11,5 @@ sphinx:
submodules:
exclude: all
python:
install:
- requirements: docs/requirements.txt
conda:
environment: conda-reqs/docs.yaml

24
conda-reqs/docs.yaml Normal file
View File

@ -0,0 +1,24 @@
channels:
- conda-forge
- ucb-bar
- nodefaults
dependencies:
# https://conda-forge.org/feedstock-outputs/
# filterable list of all conda-forge packages
# https://conda-forge.org/#contribute
# instructions on adding a recipe
# https://docs.conda.io/projects/conda/en/latest/user-guide/concepts/pkg-specs.html#package-match-specifications
# documentation on package_spec syntax for constraining versions
- python>=3
- sphinx
- Pygments
- sphinx-autobuild
- sphinx_rtd_theme<1.2.0,>=1.0.0
- requests
- sphinx-tabs
- sphinx-copybutton
- pip
- pip:
- Sphinx-Substitution-Extensions

View File

@ -104,14 +104,14 @@ def share_agfi_in_all_regions(agfi_id, useridlist):
afi_id = get_afi_for_agfi(agfi_id, region)
share_afi_with_users(afi_id, region, useridlist)
def firesim_tags_to_description(build_quadruplet, deploy_quadruplet, build_triplet, deploy_triplet, commit):
def firesim_tags_to_description(build_quintuplet, deploy_quintuplet, build_triplet, deploy_triplet, commit):
""" Serialize the tags we want to set for storage in the AGFI description """
# note that the serialized rep still includes "triplets" for compat
return """firesim-buildquadruplet:{},firesim-deployquadruplet:{},firesim-buildtriplet:{},firesim-deploytriplet:{},firesim-commit:{}""".format(build_quadruplet,deploy_quadruplet,build_triplet,deploy_triplet,commit)
# note: the serialized rep still includes "triplets" for future manager versions to be compatible with old agfis
return f"""firesim-buildquintuplet:{build_quintuplet},firesim-deployquintuplet:{deploy_quintuplet},firesim-buildtriplet:{build_triplet},firesim-deploytriplet:{deploy_triplet},firesim-commit:{commit}"""
def firesim_description_to_tags(description):
""" Deserialize the tags we want to read from the AGFI description string.
Return dictionary of keys/vals [buildtriplet, deploytriplet, commit]. """
Return dictionary of keys/vals [{build,deploy}quintuplet, {build,deploy}triplet, commit]. """
returndict = dict()
desc_split = description.split(",")
for keypair in desc_split:
@ -135,20 +135,19 @@ def get_firesim_tagval_for_agfi(agfi_id, tagkey):
afi_id = get_afi_for_agfi(agfi_id)
return get_firesim_tagval_for_afi(afi_id, tagkey)
def get_firesim_deploy_quadruplet_for_agfi(agfi_id):
""" Given an agfi_id, return the deploy_quadruplet. """
quad = get_firesim_tagval_for_agfi(agfi_id, 'firesim-deployquadruplet')
if quad is None:
def get_firesim_deploy_quintuplet_for_agfi(agfi_id):
""" Given an agfi_id, return the deploy_quintuplet. """
quin = get_firesim_tagval_for_agfi(agfi_id, 'firesim-deployquintuplet')
if quin is None:
# for old AGFIs that use the old "triplet" key
quad = get_firesim_tagval_for_agfi(agfi_id, 'firesim-deploytriplet')
if len(quad.split("-")) == 3:
quin = get_firesim_tagval_for_agfi(agfi_id, 'firesim-deploytriplet')
if len(quin.split("-")) == 3:
# handle old AGFIs that only have triplet value:
return 'firesim-' + quad
return quad
return 'f1-firesim-' + quin
return quin
## Note that there are no set_firesim_tagval functions, because applying tags is
## done at create-fpga-image time
if __name__ == '__main__':
pass

View File

@ -0,0 +1,16 @@
# Build-time bitbuilder design configuration for the FireSim Simulation Manager
# See https://docs.fires.im/en/stable/Advanced-Usage/Manager/Manager-Configuration-Files.html for documentation of all of these params.
###########
# Schema:
###########
# # Class name of the bitbuilder type.
# # This can be determined from `deploy/buildtools/bitbuilder.py`).
# bitbuilder_type: <TYPE NAME>
# args:
# # Bitbuilder arguments that are passed to the `BitBuilder`
# # object. Determined by looking at `_parse_args` function of class.
# <K/V pairs of args>
bit_builder_type: XilinxAlveoU250BitBuilder
args: null

View File

@ -0,0 +1,16 @@
# Build-time bitbuilder design configuration for the FireSim Simulation Manager
# See https://docs.fires.im/en/stable/Advanced-Usage/Manager/Manager-Configuration-Files.html for documentation of all of these params.
###########
# Schema:
###########
# # Class name of the bitbuilder type.
# # This can be determined from `deploy/buildtools/bitbuilder.py`).
# bitbuilder_type: <TYPE NAME>
# args:
# # Bitbuilder arguments that are passed to the `BitBuilder`
# # object. Determined by looking at `_parse_args` function of class.
# <K/V pairs of args>
bit_builder_type: XilinxAlveoU280BitBuilder
args: null

View File

@ -56,15 +56,32 @@ class BitBuilder(metaclass=abc.ABCMeta):
"""Any setup needed before `replace_rtl`, `build_driver`, and `build_bitstream` is run."""
raise NotImplementedError
@abc.abstractmethod
def replace_rtl(self) -> None:
"""Generate Verilog from build config. Should run on the manager host."""
raise NotImplementedError
rootLogger.info(f"Building Verilog for {self.build_config.get_chisel_quintuplet()}")
with InfoStreamLogger('stdout'), \
prefix(f'cd {get_deploy_dir()}/../'), \
prefix(f'export RISCV={os.getenv("RISCV", "")}'), \
prefix(f'export PATH={os.getenv("PATH", "")}'), \
prefix(f'export LD_LIBRARY_PATH={os.getenv("LD_LIBRARY_PATH", "")}'), \
prefix('source sourceme-f1-manager.sh --skip-ssh-setup'), \
InfoStreamLogger('stdout'), \
prefix('cd sim/'):
run(self.build_config.make_recipe("replace-rtl"))
@abc.abstractmethod
def build_driver(self) -> None:
"""Build FireSim FPGA driver from build config."""
raise NotImplementedError
"""Build FireSim FPGA driver from build config. Should run on the manager host."""
rootLogger.info(f"Building FPGA driver for {self.build_config.get_chisel_quintuplet()}")
with InfoStreamLogger('stdout'), \
prefix(f'cd {get_deploy_dir()}/../'), \
prefix(f'export RISCV={os.getenv("RISCV", "")}'), \
prefix(f'export PATH={os.getenv("PATH", "")}'), \
prefix(f'export LD_LIBRARY_PATH={os.getenv("LD_LIBRARY_PATH", "")}'), \
prefix('source sourceme-f1-manager.sh --skip-ssh-setup'), \
prefix('cd sim/'):
run(self.build_config.make_recipe("driver"))
@abc.abstractmethod
def build_bitstream(self, bypass: bool = False) -> bool:
@ -79,6 +96,34 @@ class BitBuilder(metaclass=abc.ABCMeta):
"""
raise NotImplementedError
def get_metadata_string(self) -> str:
"""Standardized metadata format used across different FPGA platforms
"""
# construct the "tags" we store in the metadata description
tag_build_quintuplet = self.build_config.get_chisel_quintuplet()
tag_deploy_quintuplet = self.build_config.get_effective_deploy_quintuplet()
tag_build_triplet = self.build_config.get_chisel_triplet()
tag_deploy_triplet = self.build_config.get_effective_deploy_triplet()
# the asserts are left over from when we tried to do this with tags
# - technically I don't know how long these descriptions are allowed to be,
# but it's at least 2048 chars, so I'll leave these here for now as sanity
# checks.
assert len(tag_build_quintuplet) <= 255, "ERR: does not support tags longer than 256 chars for build_quintuplet"
assert len(tag_deploy_quintuplet) <= 255, "ERR: does not support tags longer than 256 chars for deploy_quintuplet"
assert len(tag_build_triplet) <= 255, "ERR: does not support tags longer than 256 chars for build_triplet"
assert len(tag_deploy_triplet) <= 255, "ERR: does not support tags longer than 256 chars for deploy_triplet"
is_dirty_str = local("if [[ $(git status --porcelain) ]]; then echo '-dirty'; fi", capture=True)
hash = local("git rev-parse HEAD", capture=True)
tag_fsimcommit = hash + is_dirty_str
assert len(tag_fsimcommit) <= 255, "ERR: aws does not support tags longer than 256 chars for fsimcommit"
# construct the serialized description from these tags.
return firesim_tags_to_description(tag_build_quintuplet, tag_deploy_quintuplet, tag_build_triplet, tag_deploy_triplet, tag_fsimcommit)
class F1BitBuilder(BitBuilder):
"""Bit builder class that builds a AWS EC2 F1 AGFI (bitstream) from the build config.
@ -109,42 +154,17 @@ class F1BitBuilder(BitBuilder):
# check to see email notifications can be subscribed
get_snsname_arn()
def replace_rtl(self) -> None:
rootLogger.info(f"Building Verilog for {self.build_config.get_chisel_quadruplet()}")
with InfoStreamLogger('stdout'), \
prefix(f'cd {get_deploy_dir()}/../'), \
prefix(f'export RISCV={os.getenv("RISCV", "")}'), \
prefix(f'export PATH={os.getenv("PATH", "")}'), \
prefix(f'export LD_LIBRARY_PATH={os.getenv("LD_LIBRARY_PATH", "")}'), \
prefix('source sourceme-f1-manager.sh --skip-ssh-setup'), \
InfoStreamLogger('stdout'), \
prefix('cd sim/'):
run(self.build_config.make_recipe("PLATFORM=f1 replace-rtl"))
def build_driver(self) -> None:
rootLogger.info(f"Building FPGA driver for {self.build_config.get_chisel_quadruplet()}")
with InfoStreamLogger('stdout'), \
prefix(f'cd {get_deploy_dir()}/../'), \
prefix(f'export RISCV={os.getenv("RISCV", "")}'), \
prefix(f'export PATH={os.getenv("PATH", "")}'), \
prefix(f'export LD_LIBRARY_PATH={os.getenv("LD_LIBRARY_PATH", "")}'), \
prefix('source sourceme-f1-manager.sh --skip-ssh-setup'), \
prefix('cd sim/'):
run(self.build_config.make_recipe("PLATFORM=f1 driver"))
def cl_dir_setup(self, chisel_triplet: str, dest_build_dir: str) -> str:
def cl_dir_setup(self, chisel_quintuplet: str, dest_build_dir: str) -> str:
"""Setup CL_DIR on build host.
Args:
chisel_triplet: Build config chisel triplet used to uniquely identify build dir.
chisel_quintuplet: Build config chisel quintuplet used to uniquely identify build dir.
dest_build_dir: Destination base directory to use.
Returns:
Path to CL_DIR directory (that is setup) or `None` if invalid.
"""
fpga_build_postfix = f"hdk/cl/developer_designs/cl_{chisel_triplet}"
fpga_build_postfix = f"hdk/cl/developer_designs/cl_{chisel_quintuplet}"
# local paths
local_awsfpga_dir = f"{get_deploy_dir()}/../platforms/f1/aws-fpga"
@ -185,8 +205,10 @@ class F1BitBuilder(BitBuilder):
Returns:
Boolean indicating if the build passed or failed.
"""
build_farm = self.build_config.build_config_file.build_farm
if bypass:
self.build_config.build_config_file.build_farm.release_build_host(self.build_config)
build_farm.release_build_host(self.build_config)
return True
# The default error-handling procedure. Send an email and teardown instance
@ -195,25 +217,23 @@ class F1BitBuilder(BitBuilder):
message_title = "FireSim FPGA Build Failed"
message_body = "Your FPGA build failed for quadruplet: " + self.build_config.get_chisel_quadruplet()
message_body = "Your FPGA build failed for quintuplet: " + self.build_config.get_chisel_quintuplet()
send_firesim_notification(message_title, message_body)
rootLogger.info(message_title)
rootLogger.info(message_body)
self.build_config.build_config_file.build_farm.release_build_host(self.build_config)
build_farm.release_build_host(self.build_config)
rootLogger.info("Building AWS F1 AGFI from Verilog")
local_deploy_dir = get_deploy_dir()
fpga_build_postfix = f"hdk/cl/developer_designs/cl_{self.build_config.get_chisel_triplet()}"
fpga_build_postfix = f"hdk/cl/developer_designs/cl_{self.build_config.get_chisel_quintuplet()}"
local_results_dir = f"{local_deploy_dir}/results-build/{self.build_config.get_build_dir_name()}"
build_farm = self.build_config.build_config_file.build_farm
# 'cl_dir' holds the eventual directory in which vivado will run.
cl_dir = self.cl_dir_setup(self.build_config.get_chisel_triplet(), build_farm.get_build_host(self.build_config).dest_build_dir)
cl_dir = self.cl_dir_setup(self.build_config.get_chisel_quintuplet(), build_farm.get_build_host(self.build_config).dest_build_dir)
vivado_result = 0
@ -251,7 +271,7 @@ class F1BitBuilder(BitBuilder):
on_build_failure()
return False
self.build_config.build_config_file.build_farm.release_build_host(self.build_config)
build_farm.release_build_host(self.build_config)
return True
@ -272,35 +292,13 @@ class F1BitBuilder(BitBuilder):
s3bucket = self.s3_bucketname
afiname = self.build_config.name
# construct the "tags" we store in the AGFI description
tag_build_quadruplet = self.build_config.get_chisel_quadruplet()
tag_deploy_quadruplet = self.build_config.get_effective_deploy_quadruplet()
tag_build_triplet = self.build_config.get_chisel_triplet()
tag_deploy_triplet = self.build_config.get_effective_deploy_triplet()
# the asserts are left over from when we tried to do this with tags
# - technically I don't know how long these descriptions are allowed to be,
# but it's at least 2048 chars, so I'll leave these here for now as sanity
# checks.
assert len(tag_build_quadruplet) <= 255, "ERR: aws does not support tags longer than 256 chars for build_quadruplet"
assert len(tag_deploy_quadruplet) <= 255, "ERR: aws does not support tags longer than 256 chars for deploy_quadruplet"
assert len(tag_build_triplet) <= 255, "ERR: aws does not support tags longer than 256 chars for build_triplet"
assert len(tag_deploy_triplet) <= 255, "ERR: aws does not support tags longer than 256 chars for deploy_triplet"
is_dirty_str = local("if [[ $(git status --porcelain) ]]; then echo '-dirty'; fi", capture=True)
hash = local("git rev-parse HEAD", capture=True)
tag_fsimcommit = hash + is_dirty_str
assert len(tag_fsimcommit) <= 255, "ERR: aws does not support tags longer than 256 chars for fsimcommit"
# construct the serialized description from these tags.
description = firesim_tags_to_description(tag_build_quadruplet, tag_deploy_quadruplet, tag_build_triplet, tag_deploy_triplet, tag_fsimcommit)
description = self.get_metadata_string()
# if we're unlucky, multiple vivado builds may launch at the same time. so we
# append the build node IP + a random string to diff them in s3
global_append = "-" + str(env.host_string) + "-" + ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(10)) + ".tar"
with lcd(f"{local_results_dir}/cl_{self.build_config.get_chisel_triplet()}/build/checkpoints/to_aws/"):
with lcd(f"{local_results_dir}/cl_{self.build_config.get_chisel_quintuplet()}/build/checkpoints/to_aws/"):
files = local('ls *.tar', capture=True)
rootLogger.debug(files)
rootLogger.debug(files.stderr)
@ -337,7 +335,7 @@ class F1BitBuilder(BitBuilder):
message_title = "FireSim FPGA Build Completed"
agfi_entry = afiname + ":\n"
agfi_entry += " agfi: " + agfi + "\n"
agfi_entry += " deploy_quadruplet_override: null\n"
agfi_entry += " deploy_quintuplet_override: null\n"
agfi_entry += " custom_runtime_config: null\n"
message_body = "Your AGFI has been created!\nAdd\n\n" + agfi_entry + "\nto your config_hwdb.yaml to use this hardware configuration."
@ -383,41 +381,17 @@ class VitisBitBuilder(BitBuilder):
def setup(self) -> None:
return
def replace_rtl(self):
rootLogger.info(f"Building Verilog for {self.build_config.get_chisel_quadruplet()}")
with InfoStreamLogger('stdout'), \
prefix(f'cd {get_deploy_dir()}/../'), \
prefix(f'export RISCV={os.getenv("RISCV", "")}'), \
prefix(f'export PATH={os.getenv("PATH", "")}'), \
prefix(f'export LD_LIBRARY_PATH={os.getenv("LD_LIBRARY_PATH", "")}'), \
prefix('source sourceme-f1-manager.sh --skip-ssh-setup'), \
prefix('cd sim/'):
run(self.build_config.make_recipe("PLATFORM=vitis replace-rtl"))
def build_driver(self):
rootLogger.info("Building FPGA driver for {}".format(str(self.build_config.get_chisel_quadruplet())))
with InfoStreamLogger('stdout'), \
prefix(f'cd {get_deploy_dir()}/../'), \
prefix(f'export RISCV={os.getenv("RISCV", "")}'), \
prefix(f'export PATH={os.getenv("PATH", "")}'), \
prefix(f'export LD_LIBRARY_PATH={os.getenv("LD_LIBRARY_PATH", "")}'), \
prefix('source sourceme-f1-manager.sh --skip-ssh-setup'), \
prefix('cd sim/'):
run(self.build_config.make_recipe("PLATFORM=vitis driver"))
def cl_dir_setup(self, chisel_triplet: str, dest_build_dir: str) -> str:
def cl_dir_setup(self, chisel_quintuplet: str, dest_build_dir: str) -> str:
"""Setup CL_DIR on build host.
Args:
chisel_triplet: Build config chisel triplet used to uniquely identify build dir.
chisel_quintuplet: Build config chisel quintuplet used to uniquely identify build dir.
dest_build_dir: Destination base directory to use.
Returns:
Path to CL_DIR directory (that is setup) or `None` if invalid.
"""
fpga_build_postfix = f"cl_{chisel_triplet}"
fpga_build_postfix = f"cl_{chisel_quintuplet}"
# local paths
local_vitis_dir = f"{get_deploy_dir()}/../platforms/vitis"
@ -457,8 +431,10 @@ class VitisBitBuilder(BitBuilder):
Returns:
Boolean indicating if the build passed or failed.
"""
build_farm = self.build_config.build_config_file.build_farm
if bypass:
self.build_config.build_config_file.build_farm.release_build_host(self.build_config)
build_farm.release_build_host(self.build_config)
return True
# The default error-handling procedure. Send an email and teardown instance
@ -467,26 +443,23 @@ class VitisBitBuilder(BitBuilder):
message_title = "FireSim Vitis FPGA Build Failed"
message_body = "Your FPGA build failed for quadruplet: " + self.build_config.get_chisel_quadruplet()
message_body = "Your FPGA build failed for quintuplet: " + self.build_config.get_chisel_quintuplet()
rootLogger.info(message_title)
rootLogger.info(message_body)
self.build_config.build_config_file.build_farm.release_build_host(self.build_config)
build_farm.release_build_host(self.build_config)
rootLogger.info("Building Vitis Bitstream from Verilog")
local_deploy_dir = get_deploy_dir()
fpga_build_postfix = f"cl_{self.build_config.get_chisel_triplet()}"
fpga_build_postfix = f"cl_{self.build_config.get_chisel_quintuplet()}"
local_results_dir = f"{local_deploy_dir}/results-build/{self.build_config.get_build_dir_name()}"
build_farm = self.build_config.build_config_file.build_farm
# 'cl_dir' holds the eventual directory in which vivado will run.
cl_dir = self.cl_dir_setup(self.build_config.get_chisel_triplet(), build_farm.get_build_host(self.build_config).dest_build_dir)
cl_dir = self.cl_dir_setup(self.build_config.get_chisel_quintuplet(), build_farm.get_build_host(self.build_config).dest_build_dir)
vitis_result = 0
# TODO: Put script within Vitis area
# copy script to the cl_dir and execute
rsync_cap = rsync_project(
local_dir=f"{local_deploy_dir}/../platforms/vitis/build-bitstream.sh",
@ -517,13 +490,12 @@ class VitisBitBuilder(BitBuilder):
return False
hwdb_entry_name = self.build_config.name
xclbin_path = cl_dir + f"/bitstream/build_dir.{self.device}/firesim.xclbin"
results_build_dir = """{}/""".format(local_results_dir)
local_cl_dir = f"{local_results_dir}/{fpga_build_postfix}"
xclbin_path = "file://" + local_cl_dir + f"/bitstream/build_dir.{self.device}/firesim.xclbin"
hwdb_entry = hwdb_entry_name + ":\n"
hwdb_entry += " xclbin: " + xclbin_path + "\n"
hwdb_entry += f" deploy_quadruplet_override: {self.build_config.get_chisel_quadruplet()}\n"
hwdb_entry += f" deploy_quintuplet_override: {self.build_config.get_chisel_quintuplet()}\n"
hwdb_entry += " custom_runtime_config: null\n"
message_title = "FireSim FPGA Build Completed"
@ -547,6 +519,187 @@ class VitisBitBuilder(BitBuilder):
rootLogger.info(f"Build complete! Vitis bitstream ready. See {os.path.join(hwdb_entry_file_location,hwdb_entry_name)}.")
self.build_config.build_config_file.build_farm.release_build_host(self.build_config)
build_farm.release_build_host(self.build_config)
return True
class XilinxAlveoBitBuilder(BitBuilder):
"""Bit builder class that builds a Xilinx Alveo bitstream from the build config."""
BOARD_NAME: Optional[str]
def __init__(self, build_config: BuildConfig, args: Dict[str, Any]) -> None:
super().__init__(build_config, args)
self.BOARD_NAME = None
def setup(self) -> None:
return
def cl_dir_setup(self, chisel_quintuplet: str, dest_build_dir: str) -> str:
"""Setup CL_DIR on build host.
Args:
chisel_quintuplet: Build config chisel quintuplet used to uniquely identify build dir.
dest_build_dir: Destination base directory to use.
Returns:
Path to CL_DIR directory (that is setup) or `None` if invalid.
"""
fpga_build_postfix = f"cl_{chisel_quintuplet}"
# local paths
local_alveo_dir = f"{get_deploy_dir()}/../platforms/{self.build_config.PLATFORM}"
dest_alveo_dir = f"{dest_build_dir}/platforms/{self.build_config.PLATFORM}"
# copy alveo files to the build instance.
# do the rsync, but ignore any checkpoints that might exist on this machine
# (in case builds were run locally)
# extra_opts -L resolves symlinks
run(f'mkdir -p {dest_alveo_dir}')
rsync_cap = rsync_project(
local_dir=local_alveo_dir,
remote_dir=dest_alveo_dir,
ssh_opts="-o StrictHostKeyChecking=no",
exclude="cl_*",
extra_opts="-L", capture=True)
rootLogger.debug(rsync_cap)
rootLogger.debug(rsync_cap.stderr)
rsync_cap = rsync_project(
local_dir=f"{local_alveo_dir}/{fpga_build_postfix}/",
remote_dir=f'{dest_alveo_dir}/{fpga_build_postfix}',
ssh_opts="-o StrictHostKeyChecking=no",
extra_opts="-L", capture=True)
rootLogger.debug(rsync_cap)
rootLogger.debug(rsync_cap.stderr)
return f"{dest_alveo_dir}/{fpga_build_postfix}"
def build_bitstream(self, bypass: bool = False) -> bool:
""" Run Vivado to generate an bit file. Then terminate the instance at the end.
Args:
bypass: If true, immediately return and terminate build host. Used for testing purposes.
Returns:
Boolean indicating if the build passed or failed.
"""
build_farm = self.build_config.build_config_file.build_farm
if bypass:
build_farm.release_build_host(self.build_config)
return True
# The default error-handling procedure. Send an email and teardown instance
def on_build_failure():
""" Terminate build host and notify user that build failed """
message_title = f"FireSim Xilinx Alveo {self.build_config.PLATFORM} FPGA Build Failed"
message_body = "Your FPGA build failed for quintuplet: " + self.build_config.get_chisel_quintuplet()
rootLogger.info(message_title)
rootLogger.info(message_body)
build_farm.release_build_host(self.build_config)
rootLogger.info(f"Building Xilinx Alveo {self.build_config.PLATFORM} Bitstream from Verilog")
local_deploy_dir = get_deploy_dir()
fpga_build_postfix = f"cl_{self.build_config.get_chisel_quintuplet()}"
local_results_dir = f"{local_deploy_dir}/results-build/{self.build_config.get_build_dir_name()}"
# 'cl_dir' holds the eventual directory in which vivado will run.
cl_dir = self.cl_dir_setup(self.build_config.get_chisel_quintuplet(), build_farm.get_build_host(self.build_config).dest_build_dir)
alveo_result = 0
# copy script to the cl_dir and execute
rsync_cap = rsync_project(
local_dir=f"{local_deploy_dir}/../platforms/{self.build_config.PLATFORM}/build-bitstream.sh",
remote_dir=f"{cl_dir}/",
ssh_opts="-o StrictHostKeyChecking=no",
extra_opts="-L", capture=True)
rootLogger.debug(rsync_cap)
rootLogger.debug(rsync_cap.stderr)
fpga_frequency = self.build_config.get_frequency()
build_strategy = self.build_config.get_strategy().name
with InfoStreamLogger('stdout'), settings(warn_only=True):
alveo_result = run(f"{cl_dir}/build-bitstream.sh --cl_dir {cl_dir} --frequency {fpga_frequency} --strategy {build_strategy} --board {self.BOARD_NAME}").return_code
# put build results in the result-build area
rsync_cap = rsync_project(
local_dir=f"{local_results_dir}/",
remote_dir=cl_dir,
ssh_opts="-o StrictHostKeyChecking=no", upload=False, extra_opts="-l",
capture=True)
rootLogger.debug(rsync_cap)
rootLogger.debug(rsync_cap.stderr)
if alveo_result != 0:
on_build_failure()
return False
# make hwdb entry from locally stored results
hwdb_entry_name = self.build_config.name
local_cl_dir = f"{local_results_dir}/{fpga_build_postfix}"
bit_path = f"{local_cl_dir}/vivado_proj/firesim.bit"
tar_staging_path = f"{local_cl_dir}/{self.build_config.PLATFORM}"
tar_name = "firesim.tar.gz"
# store files into staging dir
local(f"rm -rf {tar_staging_path}")
local(f"mkdir -p {tar_staging_path}")
# store bitfile
local(f"cp {bit_path} {tar_staging_path}")
# store metadata string
local(f"""echo '{self.get_metadata_string()}' >> {tar_staging_path}/metadata""")
# form tar.gz
with prefix(f"cd {local_cl_dir}"):
local(f"tar zcvf {tar_name} {self.build_config.PLATFORM}/")
hwdb_entry = hwdb_entry_name + ":\n"
hwdb_entry += f" bitstream_tar: file://{local_cl_dir}/{tar_name}\n"
hwdb_entry += f" deploy_quintuplet_override: null\n"
hwdb_entry += " custom_runtime_config: null\n"
message_title = "FireSim FPGA Build Completed"
message_body = f"Your bitstream has been created!\nAdd\n\n{hwdb_entry}\nto your config_hwdb.yaml to use this hardware configuration."
rootLogger.info(message_title)
rootLogger.info(message_body)
# for convenience when generating a bunch of images. you can just
# cat all the files in this directory after your builds finish to get
# all the entries to copy into config_hwdb.yaml
hwdb_entry_file_location = f"{local_deploy_dir}/built-hwdb-entries/"
local("mkdir -p " + hwdb_entry_file_location)
with open(hwdb_entry_file_location + "/" + hwdb_entry_name, "w") as outputfile:
outputfile.write(hwdb_entry)
if self.build_config.post_build_hook:
localcap = local(f"{self.build_config.post_build_hook} {local_results_dir}", capture=True)
rootLogger.debug("[localhost] " + str(localcap))
rootLogger.debug("[localhost] " + str(localcap.stderr))
rootLogger.info(f"Build complete! Xilinx Alveo {self.build_config.PLATFORM} bitstream ready. See {os.path.join(hwdb_entry_file_location,hwdb_entry_name)}.")
build_farm.release_build_host(self.build_config)
return True
class XilinxAlveoU280BitBuilder(XilinxAlveoBitBuilder):
def __init__(self, build_config: BuildConfig, args: Dict[str, Any]) -> None:
super().__init__(build_config, args)
self.BOARD_NAME = "au280"
class XilinxAlveoU250BitBuilder(XilinxAlveoBitBuilder):
def __init__(self, build_config: BuildConfig, args: Dict[str, Any]) -> None:
super().__init__(build_config, args)
self.BOARD_NAME = "au250"

View File

@ -52,7 +52,7 @@ class BuildConfig:
TARGET_PROJECT: Target project to build.
DESIGN: Design to build.
TARGET_CONFIG: Target config to build.
deploy_quadruplet: Deploy quadruplet override.
deploy_quintuplet: Deploy quintuplet override.
launch_time: Launch time of the manager.
PLATFORM_CONFIG: Platform config to build.
fpga_frequency: Frequency for the FPGA build.
@ -65,7 +65,7 @@ class BuildConfig:
TARGET_PROJECT: str
DESIGN: str
TARGET_CONFIG: str
deploy_quadruplet: Optional[str]
deploy_quintuplet: Optional[str]
frequency: float
strategy: BuildStrategy
launch_time: str
@ -88,24 +88,25 @@ class BuildConfig:
self.name = name
self.build_config_file = build_config_file
# default provided for old build recipes that don't specify TARGET_PROJECT
# default provided for old build recipes that don't specify TARGET_PROJECT, PLATFORM
self.PLATFORM = recipe_config_dict.get('PLATFORM', 'f1')
self.TARGET_PROJECT = recipe_config_dict.get('TARGET_PROJECT', 'firesim')
self.DESIGN = recipe_config_dict['DESIGN']
self.TARGET_CONFIG = recipe_config_dict['TARGET_CONFIG']
if 'deploy_triplet' in recipe_config_dict.keys() and 'deploy_quadruplet' in recipe_config_dict.keys():
rootLogger.error("Cannot have both deploy_quadruplet and deploy_triplet in build config. Define only deploy_quadruplet.")
if 'deploy_triplet' in recipe_config_dict.keys() and 'deploy_quintuplet' in recipe_config_dict.keys():
rootLogger.error("Cannot have both 'deploy_quintuplet' and 'deploy_triplet' in build config. Define only 'deploy_quintuplet'.")
sys.exit(1)
elif 'deploy_triplet' in recipe_config_dict.keys():
rootLogger.warning("Please rename your 'deploy_triplet' key in your build config to 'deploy_quadruplet'. Support for 'deploy_triplet' will be removed in the future.")
rootLogger.warning("Please rename your 'deploy_triplet' key in your build config to 'deploy_quintuplet'. Support for 'deploy_triplet' will be removed in the future.")
self.deploy_quadruplet = recipe_config_dict.get('deploy_quadruplet')
if self.deploy_quadruplet is None:
self.deploy_quintuplet = recipe_config_dict.get('deploy_quintuplet')
if self.deploy_quintuplet is None:
# temporarily support backwards compat
self.deploy_quadruplet = recipe_config_dict.get('deploy_triplet')
self.deploy_quintuplet = recipe_config_dict.get('deploy_triplet')
if self.deploy_quadruplet is not None and len(self.deploy_quadruplet.split("-")) == 3:
self.deploy_quadruplet = 'firesim-' + self.deploy_quadruplet
if self.deploy_quintuplet is not None and len(self.deploy_quintuplet.split("-")) == 3:
self.deploy_quintuplet = 'f1-firesim-' + self.deploy_quintuplet
self.launch_time = launch_time
# run platform specific options
@ -150,14 +151,6 @@ class BuildConfig:
"""
return f"{self.DESIGN}-{self.TARGET_CONFIG}-{self.PLATFORM_CONFIG}"
def get_chisel_quadruplet(self) -> str:
"""Get the unique build-specific '-' deliminated quadruplet.
Returns:
Chisel quadruplet
"""
return f"{self.TARGET_PROJECT}-{self.DESIGN}-{self.TARGET_CONFIG}-{self.PLATFORM_CONFIG}"
def get_effective_deploy_triplet(self) -> str:
"""Get the effective deploy triplet, i.e. the triplet version of
get_effective_deploy_quadruplet().
@ -165,18 +158,26 @@ class BuildConfig:
Returns:
Effective deploy triplet
"""
return "-".join(self.get_effective_deploy_quadruplet().split("-")[1:])
return "-".join(self.get_effective_deploy_quintuplet().split("-")[2:])
def get_effective_deploy_quadruplet(self) -> str:
"""Get the effective deploy triplet, i.e. the value specified in
deploy_quadruplet if specified, otherwise just get_chisel_quadruplet().
def get_chisel_quintuplet(self) -> str:
"""Get the unique build-specific '-' deliminated quintuplet.
Returns:
Effective deploy quadruplet
Chisel quintuplet
"""
if self.deploy_quadruplet:
return self.deploy_quadruplet
return self.get_chisel_quadruplet()
return f"{self.PLATFORM}-{self.TARGET_PROJECT}-{self.DESIGN}-{self.TARGET_CONFIG}-{self.PLATFORM_CONFIG}"
def get_effective_deploy_quintuplet(self) -> str:
"""Get the effective deploy quintuplet, i.e. the value specified in
deploy_quintuplet if specified, otherwise just get_chisel_quintuplet().
Returns:
Effective deploy quintuplet
"""
if self.deploy_quintuplet:
return self.deploy_quintuplet
return self.get_chisel_quintuplet()
def get_frequency(self) -> float:
"""Get the desired fpga frequency.
@ -211,12 +212,10 @@ class BuildConfig:
Returns:
Fully specified make command.
"""
return f"""make TARGET_PROJECT={self.TARGET_PROJECT} DESIGN={self.DESIGN} TARGET_CONFIG={self.TARGET_CONFIG} PLATFORM_CONFIG={self.PLATFORM_CONFIG} {recipe}"""
return f"""make PLATFORM={self.PLATFORM} TARGET_PROJECT={self.TARGET_PROJECT} DESIGN={self.DESIGN} TARGET_CONFIG={self.TARGET_CONFIG} PLATFORM_CONFIG={self.PLATFORM_CONFIG} {recipe}"""
def __repr__(self) -> str:
return f"< {type(self)}(name={self.name!r}, build_config_file={self.build_config_file!r}) @{id(self)} >"
def __str__(self) -> str:
return pprint.pformat(vars(self), width=1, indent=10)

View File

@ -184,7 +184,7 @@ def managerinit(args: argparse.Namespace):
"managerinit replace start",
"managerinit replace end",
bf_recipe_lines)
elif args.platform == 'vitis':
elif args.platform == 'vitis' or args.platform == 'xilinx_alveo_u250' or args.platform == 'xilinx_alveo_u280':
runfarm_default_file = "run-farm-recipes/externally_provisioned.yaml"
with open(runfarm_default_file, "r") as f:
rf_recipe_lines = f.readlines()

View File

@ -272,7 +272,7 @@ class FireSimServerNode(FireSimNode):
str(self.server_hardware_config))
return msg
def get_sim_start_command(self, slotno: int, sudo: bool) -> str:
def get_sim_start_command(self, slotno: int, sudo: bool, fpga_physical_selection: Optional[str]) -> str:
""" get the command to run a simulation. assumes it will be
called in a directory where its required_files are already located.
"""
@ -310,7 +310,9 @@ class FireSimServerNode(FireSimNode):
self.hostdebug_config,
self.synthprint_config,
sudo,
self.plusarg_passthrough)
fpga_physical_selection,
self.plusarg_passthrough,
"")
return runcommand
@ -363,10 +365,10 @@ class FireSimServerNode(FireSimNode):
return script_path
def write_sim_start_script(self, slotno: int, sudo: bool) -> str:
def write_sim_start_script(self, slotno: int, sudo: bool, fpga_physical_selection: Optional[str]) -> str:
""" Write sim-run.sh script to local job results dir and return its
path. """
start_cmd = self.get_sim_start_command(slotno, sudo)
start_cmd = self.get_sim_start_command(slotno, sudo, fpga_physical_selection)
sim_start_script_local_path = self.write_script("sim-run.sh", start_cmd)
return sim_start_script_local_path
@ -510,7 +512,7 @@ class FireSimServerNode(FireSimNode):
if hwcfg.driver_tar is not None:
return None
return (str(self.get_resolved_server_hardware_config().local_tarball_path(InstanceDeployManager.get_driver_tar_filename())), InstanceDeployManager.get_driver_tar_filename())
return (str(hwcfg.local_tarball_path(hwcfg.get_driver_tar_filename())), hwcfg.get_driver_tar_filename())
@ -634,7 +636,7 @@ class FireSimSuperNodeServerNode(FireSimServerNode):
num_siblings = self.supernode_get_num_siblings_plus_one()
return [self.get_rootfs_name()] + [self.supernode_get_sibling(x).get_rootfs_name() for x in range(1, num_siblings)]
def get_sim_start_command(self, slotno: int, sudo: bool) -> str:
def get_sim_start_command(self, slotno: int, sudo: bool, fpga_physical_selection: Optional[str]) -> str:
""" get the command to run a simulation. assumes it will be
called in a directory where its required_files are already located."""
@ -681,7 +683,9 @@ class FireSimSuperNodeServerNode(FireSimServerNode):
self.hostdebug_config,
self.synthprint_config,
sudo,
self.plusarg_passthrough)
fpga_physical_selection,
self.plusarg_passthrough,
"")
return runcommand

View File

@ -316,8 +316,6 @@ class FireSimTopologyWithPasses:
RuntimeHWConfig object then keep it the same.
2) If a node's hardware config is none, give it the default
hardware config.
3) In either case, call get_deployquadruplet_for_config() once to
make the API call and cache the result for the deployquadruplet.
"""
servers = self.firesimtopol.get_dfs_order_servers()
@ -331,7 +329,6 @@ class FireSimTopologyWithPasses:
hw_cfg = runtimehwconfig_lookup_fn(self.defaulthwconfig)
elif isinstance(hw_cfg, str):
hw_cfg = runtimehwconfig_lookup_fn(hw_cfg)
hw_cfg.get_deployquadruplet_for_config()
server.set_server_hardware_config(hw_cfg)
def pass_apply_default_params(self) -> None:
@ -406,7 +403,7 @@ class FireSimTopologyWithPasses:
continue # skip building or tarballing if we have a prebuilt one
resolved_cfg.build_sim_driver()
resolved_cfg.build_sim_tarball(server.get_tarball_files_paths(), InstanceDeployManager.get_driver_tar_filename())
resolved_cfg.build_sim_tarball(server.get_tarball_files_paths(), resolved_cfg.get_driver_tar_filename())
servers = self.firesimtopol.get_dfs_order_servers()
execute(build_drivers_helper, servers, hosts=['localhost'])
@ -419,19 +416,18 @@ class FireSimTopologyWithPasses:
for switch in switches:
switch.build_switch_sim_binary()
def pass_fetch_URI_resolve_runtime_cfg(self, dir: str) -> None:
"""Locally download URIs, and use any URI-contained metadata to resolve runtime config values"""
servers = self.firesimtopol.get_dfs_order_servers()
for server in servers:
resolved_cfg = server.get_resolved_server_hardware_config()
resolved_cfg.fetch_all_URI(dir)
resolved_cfg.resolve_hwcfg_values(dir)
def infrasetup_passes(self, use_mock_instances_for_testing: bool) -> None:
""" extra passes needed to do infrasetup """
self.run_farm.post_launch_binding(use_mock_instances_for_testing)
self.pass_build_required_drivers()
self.pass_build_required_switches()
def serial_fetch_URI(run_farm: RunFarm, dir: str) -> None:
my_node = run_farm.lookup_by_host(env.host_string)
assert my_node is not None
assert my_node.instance_deploy_manager is not None
my_node.instance_deploy_manager.fetch_all_URI(dir)
@parallel
def infrasetup_node_wrapper(run_farm: RunFarm, dir: str) -> None:
my_node = run_farm.lookup_by_host(env.host_string)
@ -442,15 +438,23 @@ class FireSimTopologyWithPasses:
all_run_farm_ips = [x.get_host() for x in self.run_farm.get_all_bound_host_nodes()]
execute(instance_liveness, hosts=all_run_farm_ips)
# Both steps occur within the context of a tempdir.
# Steps occur within the context of a tempdir.
# This allows URI's to survive until after deploy, and cleanup upon error
with TemporaryDirectory() as uridir:
execute(serial_fetch_URI, self.run_farm, hosts=all_run_farm_ips, dir=uridir)
execute(infrasetup_node_wrapper, self.run_farm, hosts=all_run_farm_ips, dir=uridir)
self.pass_fetch_URI_resolve_runtime_cfg(uridir)
self.pass_build_required_drivers()
self.pass_build_required_switches()
execute(infrasetup_node_wrapper, self.run_farm, uridir, hosts=all_run_farm_ips)
def build_driver_passes(self) -> None:
""" Only run passes to build drivers. """
self.pass_build_required_drivers()
# Steps occur within the context of a tempdir.
# This allows URI's to survive until after deploy, and cleanup upon error
with TemporaryDirectory() as uridir:
self.pass_fetch_URI_resolve_runtime_cfg(uridir)
self.pass_build_required_drivers()
def boot_simulation_passes(self, use_mock_instances_for_testing: bool, skip_instance_binding: bool = False) -> None:
""" Passes that setup for boot and boot the simulation.
@ -471,6 +475,11 @@ class FireSimTopologyWithPasses:
assert my_node.instance_deploy_manager is not None
my_node.instance_deploy_manager.start_switches_instance()
# Steps occur within the context of a tempdir.
# This allows URI's to survive until after deploy, and cleanup upon error
with TemporaryDirectory() as uridir:
self.pass_fetch_URI_resolve_runtime_cfg(uridir)
all_run_farm_ips = [x.get_host() for x in self.run_farm.get_all_bound_host_nodes()]
execute(instance_liveness, hosts=all_run_farm_ips)
execute(boot_switch_wrapper, self.run_farm, hosts=all_run_farm_ips)

View File

@ -8,17 +8,10 @@ import abc
import json
from fabric.api import prefix, local, run, env, cd, warn_only, put, settings, hide # type: ignore
from fabric.contrib.project import rsync_project # type: ignore
import time
from os.path import join as pjoin
from os.path import basename, expanduser
from os import PathLike, fspath
import os
from fsspec.core import url_to_fs # type: ignore
from pathlib import Path
import hashlib
from util.streamlogger import StreamLogger
from util.io import downloadURI
from awstools.awstools import terminate_instances, get_instance_ids_for_instances
from runtools.utils import has_sudo
@ -29,9 +22,6 @@ if TYPE_CHECKING:
rootLogger = logging.getLogger()
# from https://github.com/pandas-dev/pandas/blob/96b036cbcf7db5d3ba875aac28c4f6a678214bfb/pandas/io/common.py#L73
_RFC_3986_PATTERN = re.compile(r"^[A-Za-z][A-Za-z0-9+\-+.]*://")
class NBDTracker:
"""Track allocation of NBD devices on an instance. Used for mounting
qcow2 images."""
@ -61,110 +51,6 @@ class NBDTracker:
return self.allocated_dict[imagename]
class URIContainer:
""" A class which contains the details for downloading a single URI. """
"""a property name on RuntimeHWConfig"""
hwcfg_prop: str
""" the final filename inside sim_slot_x, this is a filename, not a path"""
destination_name: str
def __init__(self, hwcfg_prop: str, destination_name: str):
self.hwcfg_prop = hwcfg_prop
self.destination_name = destination_name
# this filename will be used when pre-downloading
@classmethod
def hashed_name(cls, uri) -> str:
m = hashlib.sha256()
m.update(bytes(uri, 'utf-8'))
return m.hexdigest()
def _resolve_vanilla_path(self, hwcfg) -> Optional[str]:
""" Allows fallback to a vanilla path. Relative paths are resolved realtive to firesim/deploy/.
This will convert a vanilla path to a URI, or return None."""
uri: Optional[str] = getattr(hwcfg, self.hwcfg_prop)
# do nothing if there isn't a URI
if uri is None:
return None
# if already a URI, exit early returning unmodified string
is_uri = re.match(_RFC_3986_PATTERN, uri)
if is_uri:
return uri
# expanduser() is required to get ~ home directory expansion working
# relative paths are relative to firesim/deploy
expanded = Path(expanduser(uri))
try:
# strict=True will throw if the file doesn't exist
resolved = expanded.resolve(strict=True)
except FileNotFoundError as e:
raise Exception(f"{self.hwcfg_prop} file fallback at path '{uri}' or '{expanded}' was not found")
return f"file://{resolved}"
def _choose_path(self, local_dir: str, hwcfg) -> Optional[Tuple[str, str]]:
""" Return a deterministic path, given a parent folder and a RuntimeHWConfig object. The URI
as generated from hwcfg is also returned. """
uri: Optional[str] = self._resolve_vanilla_path(hwcfg)
# do nothing if there isn't a URI
if uri is None:
return None
# choose a repeatable, path based on the hash of the URI
destination = pjoin(local_dir, self.hashed_name(uri))
return (uri, destination)
def local_pre_download(self, local_dir: str, hwcfg) -> Optional[Tuple[str, str]]:
""" Cached download of the URI contained in this class to a user-specified
destination folder. The destination name is a SHA256 hash of the URI.
If the file exists this will NOT overwrite. """
# resolve the URI and the path '/{dir}/{hash}' we should download to
both = self._choose_path(local_dir, hwcfg)
# do nothing if there isn't a URI
if both is None:
return None
(uri, destination) = both
# When it exists, return the same information, but skip the download
if Path(destination).exists():
rootLogger.debug(f"Skipping download of uri: '{uri}'")
return (uri, destination)
try:
downloadURI(uri, destination)
except FileNotFoundError as e:
raise Exception(f"{self.hwcfg_prop} path '{uri}' was not found")
# return, this is not passed to rsync
return (uri, destination)
def get_rsync_path(self, local_dir: str, hwcfg) -> Optional[Tuple[str, str]]:
""" Does not download. Returns the rsync path required to send an already downloaded
URI to the runhost. """
# resolve the URI and the path '/{dir}/{hash}' we should download to
both = self._choose_path(local_dir, hwcfg)
# do nothing if there isn't a URI
if both is None:
return None
(uri, destination) = both
# because the local file has a nonsense name (the hash)
# we are required to specifcy the destination name to rsync
return (destination, self.destination_name)
class InstanceDeployManager(metaclass=abc.ABCMeta):
"""Class used to represent different "run platforms" and how to start/stop and setup simulations.
@ -173,8 +59,6 @@ class InstanceDeployManager(metaclass=abc.ABCMeta):
"""
parent_node: Inst
nbd_tracker: Optional[NBDTracker]
""" A list of URIContainer objects, one for each URI that is able to be specified """
uri_list: list[URIContainer]
def __init__(self, parent_node: Inst) -> None:
"""
@ -187,9 +71,6 @@ class InstanceDeployManager(metaclass=abc.ABCMeta):
# subclass if your system supports the NBD kernel module.
self.nbd_tracker = None
self.uri_list = list()
self.uri_list.append(URIContainer('driver_tar', self.get_driver_tar_filename()))
@abc.abstractmethod
def infrasetup_instance(self, uridir: str) -> None:
"""Run platform specific implementation of how to setup simulations.
@ -285,29 +166,6 @@ class InstanceDeployManager(metaclass=abc.ABCMeta):
return remote_sim_dir
def fetch_all_URI(self, dir: str) -> None:
""" Downloads all URI. Local filenames use a hash which will be re-calculated later. Duplicate downloads
are skipped via an exists() check on the filesystem. """
if not self.instance_assigned_simulations():
return
for slotno in range(len(self.parent_node.sim_slots)):
hwcfg = self.parent_node.sim_slots[slotno].get_resolved_server_hardware_config()
for container in self.uri_list:
container.local_pre_download(dir, hwcfg)
def get_local_uri_paths(self, slotno: int, dir: str) -> list[Tuple[str, str]]:
""" Get all paths of local URIs that were previously downloaded. """
hwcfg = self.parent_node.sim_slots[slotno].get_resolved_server_hardware_config()
ret = list()
for container in self.uri_list:
maybe_file = container.get_rsync_path(dir, hwcfg)
if maybe_file is not None:
ret.append(maybe_file)
return ret
def copy_sim_slot_infrastructure(self, slotno: int, uridir: str) -> None:
""" copy all the simulation infrastructure to the remote node. """
if self.instance_assigned_simulations():
@ -323,7 +181,8 @@ class InstanceDeployManager(metaclass=abc.ABCMeta):
files_to_copy = serv.get_required_files_local_paths()
# Append required URI paths to the end of this list
files_to_copy.extend(self.get_local_uri_paths(slotno, uridir))
hwcfg = serv.get_resolved_server_hardware_config()
files_to_copy.extend(hwcfg.get_local_uri_paths(uridir))
for local_path, remote_path in files_to_copy:
# -z --inplace
@ -340,11 +199,13 @@ class InstanceDeployManager(metaclass=abc.ABCMeta):
assert slotno < len(self.parent_node.sim_slots)
serv = self.parent_node.sim_slots[slotno]
hwcfg = serv.get_resolved_server_hardware_config()
remote_sim_dir = self.get_remote_sim_dir_for_slot(slotno)
options = "-xf"
with cd(remote_sim_dir):
run(f"tar {options} {self.get_driver_tar_filename()}")
run(f"tar {options} {hwcfg.get_driver_tar_filename()}")
def copy_switch_slot_infrastructure(self, switchslot: int) -> None:
""" copy all the switch infrastructure to the remote node. """
@ -383,7 +244,7 @@ class InstanceDeployManager(metaclass=abc.ABCMeta):
# make the local job results dir for this sim slot
server.mkdir_and_prep_local_job_results_dir()
sim_start_script_local_path = server.write_sim_start_script(slotno, (self.sim_command_requires_sudo() and has_sudo()))
sim_start_script_local_path = server.write_sim_start_script(slotno, (self.sim_command_requires_sudo() and has_sudo()), None)
put(sim_start_script_local_path, remote_sim_dir)
with cd(remote_sim_dir):
@ -597,11 +458,6 @@ class InstanceDeployManager(metaclass=abc.ABCMeta):
assert False
@classmethod
def get_driver_tar_filename(cls) -> str:
""" Get the name of the tarball inside the sim_slot_X directory on the run host. """
return "driver-bundle.tar.gz"
def remote_kmsg(message: str) -> None:
""" This will let you write whatever is passed as message into the kernel
log of the remote machine. Useful for figuring what the manager is doing
@ -814,17 +670,9 @@ class VitisInstanceDeployManager(InstanceDeployManager):
""" This sim does not require sudo. """
return False
@classmethod
def get_xclbin_filename(cls) -> str:
""" Get the name of the xclbin inside the sim_slot_X directory on the run host. """
return "bitstream.xclbin"
def __init__(self, parent_node: Inst) -> None:
super().__init__(parent_node)
# Vitis runs add the additional handling of the xclbin URI
self.uri_list.append(URIContainer('xclbin', self.get_xclbin_filename()))
def clear_fpgas(self) -> None:
if self.instance_assigned_simulations():
self.instance_logger("""Clearing all FPGA Slots.""")
@ -872,3 +720,134 @@ class VitisInstanceDeployManager(InstanceDeployManager):
def terminate_instance(self) -> None:
""" VitisInstanceDeployManager machines cannot be terminated. """
return
class XilinxAlveoInstanceDeployManager(InstanceDeployManager):
""" This class manages a Xilinx Alveo-enabled instance """
PLATFORM_NAME: Optional[str]
BOARD_NAME: Optional[str]
@classmethod
def sim_command_requires_sudo(cls) -> bool:
""" This sim does requires sudo. """
return True
def __init__(self, parent_node: Inst) -> None:
super().__init__(parent_node)
self.PLATFORM_NAME = None
self.BOARD_NAME = None
def unload_xdma(self) -> None:
if self.instance_assigned_simulations():
self.instance_logger("Unloading XDMA Driver Kernel Module.")
with warn_only():
remote_kmsg("removing_xdma_start")
run('sudo rmmod xdma')
remote_kmsg("removing_xdma_end")
def load_xdma(self) -> None:
""" load the xdma kernel module. """
if self.instance_assigned_simulations():
# unload first
self.unload_xdma()
# load xdma
self.instance_logger("Loading XDMA Driver Kernel Module.")
# must be installed to this path on sim. machine
run(f"sudo insmod /lib/modules/$(uname -r)/extra/xdma.ko poll_mode=1", shell=True)
def flash_fpgas(self) -> None:
if self.instance_assigned_simulations():
self.instance_logger("""Flash all FPGA Slots.""")
for slotno, firesimservernode in enumerate(self.parent_node.sim_slots):
serv = self.parent_node.sim_slots[slotno]
hwcfg = serv.get_resolved_server_hardware_config()
bitstream_tar = hwcfg.get_bitstream_tar_filename()
remote_sim_dir = self.get_remote_sim_dir_for_slot(slotno)
bitstream_tar_unpack_dir = f"{remote_sim_dir}/{self.PLATFORM_NAME}"
bit = f"{remote_sim_dir}/{self.PLATFORM_NAME}/firesim.bit"
# at this point the tar file is in the sim slot
run(f"rm -rf {bitstream_tar_unpack_dir}")
run(f"tar xvf {remote_sim_dir}/{bitstream_tar} -C {remote_sim_dir}")
self.instance_logger(f"""Copying FPGA flashing scripts for {slotno}""")
rsync_cap = rsync_project(
local_dir=f'../platforms/{self.PLATFORM_NAME}/scripts',
remote_dir=remote_sim_dir,
ssh_opts="-o StrictHostKeyChecking=no",
extra_opts="-L -p",
capture=True)
rootLogger.debug(rsync_cap)
rootLogger.debug(rsync_cap.stderr)
self.instance_logger(f"""Determine BDF for {slotno}""")
collect = run('lspci | grep -i serial.*xilinx')
bdfs = [ "0000:" + i[:7] for i in collect.splitlines() if len(i.strip()) >= 0 ]
bdf = bdfs[slotno]
self.instance_logger(f"""Flashing FPGA Slot: {slotno} with bit: {bit}""")
run(f"""EXTENDED_DEVICE_BDF1={bdf} {remote_sim_dir}/scripts/program_fpga.sh {bit} {self.BOARD_NAME}""")
def infrasetup_instance(self, uridir: str) -> None:
""" Handle infrastructure setup for this platform. """
metasim_enabled = self.parent_node.metasimulation_enabled
if self.instance_assigned_simulations():
# This is a sim-host node.
# copy sim infrastructure
for slotno in range(len(self.parent_node.sim_slots)):
self.copy_sim_slot_infrastructure(slotno, uridir)
self.extract_driver_tarball(slotno)
if not metasim_enabled:
# load xdma driver
self.load_xdma()
# flash fpgas
self.flash_fpgas()
if self.instance_assigned_switches():
# all nodes could have a switch
for slotno in range(len(self.parent_node.switch_slots)):
self.copy_switch_slot_infrastructure(slotno)
def terminate_instance(self) -> None:
""" XilinxAlveoInstanceDeployManager machines cannot be terminated. """
return
def start_sim_slot(self, slotno: int) -> None:
""" start a simulation. (same as the default except that you have a mapping from slotno to a specific BDF)"""
if self.instance_assigned_simulations():
self.instance_logger(f"""Starting {self.sim_type_message} simulation for slot: {slotno}.""")
remote_home_dir = self.parent_node.sim_dir
remote_sim_dir = """{}/sim_slot_{}/""".format(remote_home_dir, slotno)
assert slotno < len(self.parent_node.sim_slots), f"{slotno} can not index into sim_slots {len(self.parent_node.sim_slots)} on {self.parent_node.host}"
server = self.parent_node.sim_slots[slotno]
self.instance_logger(f"""Determine BDF for {slotno}""")
collect = run('lspci | grep -i serial.*xilinx')
bdfs = [ i[:2] for i in collect.splitlines() if len(i.strip()) >= 0 ]
bdf = bdfs[slotno]
# make the local job results dir for this sim slot
server.mkdir_and_prep_local_job_results_dir()
sim_start_script_local_path = server.write_sim_start_script(slotno, (self.sim_command_requires_sudo() and has_sudo()), bdf)
put(sim_start_script_local_path, remote_sim_dir)
with cd(remote_sim_dir):
run("chmod +x sim-run.sh")
run("./sim-run.sh")
class XilinxAlveoU250InstanceDeployManager(XilinxAlveoInstanceDeployManager):
def __init__(self, parent_node: Inst) -> None:
super().__init__(parent_node)
self.PLATFORM_NAME = "xilinx_alveo_u250"
self.BOARD_NAME = "au250"
class XilinxAlveoU280InstanceDeployManager(XilinxAlveoInstanceDeployManager):
def __init__(self, parent_node: Inst) -> None:
super().__init__(parent_node)
self.PLATFORM_NAME = "xilinx_alveo_u280"
self.BOARD_NAME = "au280"

View File

@ -3,8 +3,9 @@ simulation tasks. """
from __future__ import annotations
import re
from datetime import timedelta
from time import strftime, gmtime, time
from time import strftime, gmtime
import pprint
import logging
import yaml
@ -14,12 +15,15 @@ from fabric.operations import _stdoutString # type: ignore
from fabric.api import prefix, settings, local, run # type: ignore
from fabric.contrib.project import rsync_project # type: ignore
from os.path import join as pjoin
from os.path import basename, expanduser
from pathlib import Path
from uuid import uuid1
from tempfile import TemporaryDirectory
import hashlib
import json
from awstools.awstools import aws_resource_names
from awstools.afitools import get_firesim_deploy_quadruplet_for_agfi
from awstools.afitools import get_firesim_deploy_quintuplet_for_agfi, firesim_description_to_tags
from runtools.firesim_topology_with_passes import FireSimTopologyWithPasses
from runtools.run_farm_deploy_managers import VitisInstanceDeployManager
from runtools.workload import WorkloadConfig
@ -29,6 +33,7 @@ from util.inheritors import inheritors
from util.deepmerge import deep_merge
from util.streamlogger import InfoStreamLogger
from buildtools.bitbuilder import get_deploy_dir
from util.io import downloadURI
from typing import Optional, Dict, Any, List, Sequence, Tuple, TYPE_CHECKING
import argparse # this is not within a if TYPE_CHECKING: scope so the `register_task` in FireSim can evaluate it's annotation
@ -41,123 +46,278 @@ CUSTOM_RUNTIMECONFS_BASE = "../sim/custom-runtime-configs/"
rootLogger = logging.getLogger()
# from https://github.com/pandas-dev/pandas/blob/96b036cbcf7db5d3ba875aac28c4f6a678214bfb/pandas/io/common.py#L73
_RFC_3986_PATTERN = re.compile(r"^[A-Za-z][A-Za-z0-9+\-+.]*://")
class URIContainer:
""" A class which contains the details for downloading a single URI. """
"""a property name on RuntimeHWConfig"""
hwcfg_prop: str
""" the final filename inside sim_slot_x, this is a filename, not a path"""
destination_name: str
def __init__(self, hwcfg_prop: str, destination_name: str):
self.hwcfg_prop = hwcfg_prop
self.destination_name = destination_name
# this filename will be used when pre-downloading
@classmethod
def hashed_name(cls, uri: str) -> str:
m = hashlib.sha256()
m.update(bytes(uri, 'utf-8'))
return m.hexdigest()
def _resolve_vanilla_path(self, hwcfg: RuntimeHWConfig) -> Optional[str]:
""" Allows fallback to a vanilla path. Relative paths are resolved relative to firesim/deploy/.
This will convert a vanilla path to a URI, or return None."""
uri: Optional[str] = getattr(hwcfg, self.hwcfg_prop)
# do nothing if there isn't a URI
if uri is None:
return None
# if already a URI, exit early returning unmodified string
is_uri = re.match(_RFC_3986_PATTERN, uri)
if is_uri:
return uri
# expanduser() is required to get ~ home directory expansion working
# relative paths are relative to firesim/deploy
expanded = Path(expanduser(uri))
try:
# strict=True will throw if the file doesn't exist
resolved = expanded.resolve(strict=True)
except FileNotFoundError as e:
raise Exception(f"{self.hwcfg_prop} file fallback at path '{uri}' or '{expanded}' was not found")
return f"file://{resolved}"
def _choose_path(self, local_dir: str, hwcfg: RuntimeHWConfig) -> Optional[Tuple[str, str]]:
""" Return a deterministic path, given a parent folder and a RuntimeHWConfig object. The URI
as generated from hwcfg is also returned. """
uri: Optional[str] = self._resolve_vanilla_path(hwcfg)
# do nothing if there isn't a URI
if uri is None:
return None
# choose a repeatable, path based on the hash of the URI
destination = pjoin(local_dir, self.hashed_name(uri))
return (uri, destination)
def local_pre_download(self, local_dir: str, hwcfg: RuntimeHWConfig) -> Optional[Tuple[str, str]]:
""" Cached download of the URI contained in this class to a user-specified
destination folder. The destination name is a SHA256 hash of the URI.
If the file exists this will NOT overwrite. """
# resolve the URI and the path '/{dir}/{hash}' we should download to
both = self._choose_path(local_dir, hwcfg)
# do nothing if there isn't a URI
if both is None:
return None
(uri, destination) = both
# When it exists, return the same information, but skip the download
if Path(destination).exists():
rootLogger.debug(f"Skipping download of uri: '{uri}'")
return (uri, destination)
try:
downloadURI(uri, destination)
except FileNotFoundError as e:
raise Exception(f"{self.hwcfg_prop} path '{uri}' was not found")
# return, this is not passed to rsync
return (uri, destination)
def get_rsync_path(self, local_dir: str, hwcfg: RuntimeHWConfig) -> Optional[Tuple[str, str]]:
""" Does not download. Returns the rsync path required to send an already downloaded
URI to the runhost. """
# resolve the URI and the path '/{dir}/{hash}' we should download to
both = self._choose_path(local_dir, hwcfg)
# do nothing if there isn't a URI
if both is None:
return None
(uri, destination) = both
# because the local file has a nonsense name (the hash)
# we are required to specify the destination name to rsync
return (destination, self.destination_name)
class RuntimeHWConfig:
""" A pythonic version of the entires in config_hwdb.yaml """
name: str
platform: str
platform: Optional[str]
# TODO: should be abstracted out between platforms with a URI
agfi: Optional[str]
"""User-specified, URI path to xclbin"""
xclbin: Optional[str]
"""User-specified, URI path to bitstream tar file"""
bitstream_tar: Optional[str]
deploy_quadruplet: Optional[str]
deploy_quintuplet: Optional[str]
customruntimeconfig: str
# note whether we've built a copy of the simulation driver for this hwconf
driver_built: bool
tarball_built: bool
additional_required_files: List[Tuple[str, str]]
driver_name_prefix: str
driver_name_suffix: str
local_driver_base_dir: str
driver_build_target: str
driver_type_message: str
"""User-specified, URI path to driver tarball"""
driver_tar: Optional[str]
# Members that are initlized here also need to be initilized in
""" A list of URIContainer objects, one for each URI that is able to be specified """
uri_list: list[URIContainer]
# Members that are initialized here also need to be initialized in
# RuntimeBuildRecipeConfig.__init__
def __init__(self, name: str, hwconfig_dict: Dict[str, Any]) -> None:
self.name = name
if 'agfi' in hwconfig_dict and 'xclbin' in hwconfig_dict:
raise Exception(f"Unable to have agfi and xclbin in HWDB entry {name}.")
if sum(['agfi' in hwconfig_dict, 'xclbin' in hwconfig_dict, 'bitstream_tar' in hwconfig_dict]) > 1:
raise Exception(f"Must only have 'agfi' or 'xclbin' or 'bitstream_tar' HWDB entry {name}.")
self.agfi = hwconfig_dict.get('agfi')
self.xclbin = hwconfig_dict.get('xclbin')
self.bitstream_tar = hwconfig_dict.get('bitstream_tar')
self.driver_tar = hwconfig_dict.get('driver_tar')
self.platform = None
self.driver_built = False
self.tarball_built = False
self.additional_required_files = []
self.driver_name_prefix = ""
self.driver_type_message = "FPGA software"
self.local_driver_base_dir = LOCAL_DRIVERS_BASE
self.uri_list = []
if self.agfi is not None:
self.platform = "f1"
else:
elif self.xclbin is not None:
self.platform = "vitis"
self.uri_list.append(URIContainer('xclbin', self.get_xclbin_filename()))
else:
self.uri_list.append(URIContainer('bitstream_tar', self.get_bitstream_tar_filename()))
self.driver_name_prefix = ""
self.driver_name_suffix = "-" + self.platform
self.local_driver_base_dir = LOCAL_DRIVERS_BASE
self.driver_type_message = "FPGA software"
self.driver_build_target = self.platform
if 'deploy_triplet_override' in hwconfig_dict.keys() and 'deploy_quadruplet_override' in hwconfig_dict.keys():
rootLogger.error("Cannot have both deploy_quadruplet_override and deploy_triplet_override in hwdb entry. Define only deploy_quadruplet_override.")
if 'deploy_triplet_override' in hwconfig_dict.keys() and 'deploy_quintuplet_override' in hwconfig_dict.keys():
rootLogger.error("Cannot have both 'deploy_quintuplet_override' and 'deploy_triplet_override' in hwdb entry. Define only 'deploy_quintuplet_override'.")
sys.exit(1)
elif 'deploy_triplet_override' in hwconfig_dict.keys():
rootLogger.warning("Please rename your 'deploy_triplet_override' key in your hwdb entry to 'deploy_quadruplet_override'. Support for 'deploy_triplet_override' will be removed in the future.")
rootLogger.warning("Please rename your 'deploy_triplet_override' key in your hwdb entry to 'deploy_quintuplet_override'. Support for 'deploy_triplet_override' will be removed in the future.")
hwconfig_override_build_quadruplet = hwconfig_dict.get('deploy_quadruplet_override')
if hwconfig_override_build_quadruplet is None:
hwconfig_override_build_quintuplet = hwconfig_dict.get('deploy_quintuplet_override')
if hwconfig_override_build_quintuplet is None:
# temporary backwards compat for old key
hwconfig_override_build_quadruplet = hwconfig_dict.get('deploy_triplet_override')
hwconfig_override_build_quintuplet = hwconfig_dict.get('deploy_triplet_override')
if hwconfig_override_build_quadruplet is not None and len(hwconfig_override_build_quadruplet.split("-")) == 3:
# convert old build_triplet into buildquadruplet
hwconfig_override_build_quadruplet = 'firesim-' + hwconfig_override_build_quadruplet
if hwconfig_override_build_quintuplet is not None and len(hwconfig_override_build_quintuplet.split("-")) == 3:
# convert old build_triplet into buildquintuplet
hwconfig_override_build_quintuplet = 'f1-firesim-' + hwconfig_override_build_quintuplet
self.deploy_quadruplet = hwconfig_override_build_quadruplet
if self.deploy_quadruplet is not None and self.platform != "vitis":
rootLogger.warning("{} is overriding a deploy quadruplet in your config_hwdb.yaml file. Make sure you understand why!".format(name))
self.deploy_quintuplet = hwconfig_override_build_quintuplet
if self.deploy_quintuplet is not None and self.platform != "vitis":
rootLogger.warning(f"{name} is overriding a deploy quintuplet in your config_hwdb.yaml file. Make sure you understand why!")
# TODO: obtain deploy_quadruplet from tag in xclbin
if self.deploy_quadruplet is None and self.platform == "vitis":
raise Exception(f"Must set the deploy_quadruplet_override for Vitis bitstreams")
# TODO: obtain deploy_quintuplet from tag in xclbin
if self.deploy_quintuplet is None and self.platform == "vitis":
raise Exception(f"Must set the deploy_quintuplet_override for Vitis bitstreams.")
self.customruntimeconfig = hwconfig_dict['custom_runtime_config']
# note whether we've built a copy of the simulation driver for this hwconf
self.driver_built = False
self.tarball_built = False
self.additional_required_files = []
self.uri_list.append(URIContainer('driver_tar', self.get_driver_tar_filename()))
def get_deploytriplet_for_config(self) -> str:
""" Get the deploytriplet for this configuration. """
quad = self.get_deployquadruplet_for_config()
return "-".join(quad.split("-")[1:])
quin = self.get_deployquintuplet_for_config()
return "-".join(quin.split("-")[2:])
def get_deployquadruplet_for_config(self) -> str:
""" Get the deployquadruplet for this configuration. This memoizes the request
@classmethod
def get_driver_tar_filename(cls) -> str:
""" Get the name of the tarball inside the sim_slot_X directory on the run host. """
return "driver-bundle.tar.gz"
@classmethod
def get_xclbin_filename(cls) -> str:
""" Get the name of the xclbin inside the sim_slot_X directory on the run host. """
return "bitstream.xclbin"
@classmethod
def get_bitstream_tar_filename(cls) -> str:
""" Get the name of the bit tar file inside the sim_slot_X directory on the run host. """
return "firesim.tar.gz"
def get_platform(self) -> str:
assert self.platform is not None
return self.platform
def get_driver_name_suffix(self) -> str:
return "-" + self.get_platform()
def get_driver_build_target(self) -> str:
return self.get_platform()
def set_platform(self, platform: str) -> None:
assert self.platform is None, f"platform is already set to {self.platform}"
self.platform = platform
def set_deploy_quintuplet(self, deploy_quintuplet: str) -> None:
assert self.deploy_quintuplet is None, f"deploy_quintuplet is already set to {self.deploy_quintuplet}"
self.deploy_quintuplet = deploy_quintuplet
def get_deployquintuplet_for_config(self) -> str:
""" Get the deployquintuplet for this configuration. This memoizes the request
to the AWS AGFI API."""
if self.deploy_quadruplet is not None:
return self.deploy_quadruplet
rootLogger.debug("Setting deploytriplet by querying the AGFI's description.")
self.deploy_quadruplet = get_firesim_deploy_quadruplet_for_agfi(self.agfi)
return self.deploy_quadruplet
if self.deploy_quintuplet is not None:
return self.deploy_quintuplet
if self.get_platform() == "f1":
rootLogger.debug("Setting deployquintuplet by querying the AGFI's description.")
self.deploy_quintuplet = get_firesim_deploy_quintuplet_for_agfi(self.agfi)
elif self.get_platform() == "vitis":
assert False, "Must have the deploy_quintuplet_override defined"
else:
assert False, "Unable to obtain deploy_quintuplet"
return self.deploy_quintuplet
def get_design_name(self) -> str:
""" Returns the name used to prefix MIDAS-emitted files. (The DESIGN make var) """
return self.get_deployquadruplet_for_config().split("-")[1]
return self.get_deployquintuplet_for_config().split("-")[2]
def get_local_driver_binaryname(self) -> str:
""" Get the name of the driver binary. """
return self.driver_name_prefix + self.get_design_name() + self.driver_name_suffix
return self.driver_name_prefix + self.get_design_name() + self.get_driver_name_suffix()
def get_local_driver_dir(self) -> str:
""" Get the relative local directory that contains the driver used to
run this sim. """
return self.local_driver_base_dir + "/" + self.platform + "/" + self.get_deploytriplet_for_config() + "/"
return self.local_driver_base_dir + "/" + self.get_platform() + "/" + self.get_deployquintuplet_for_config() + "/"
def get_local_driver_path(self) -> str:
""" return relative local path of the driver used to run this sim. """
return self.get_local_driver_dir() + self.get_local_driver_binaryname()
def local_triplet_path(self) -> Path:
""" return the local path of the triplet folder. the tarball that is created goes inside this folder """
triplet = self.get_deploytriplet_for_config()
return Path(get_deploy_dir()) / '../sim/output' / self.platform / triplet
def local_quintuplet_path(self) -> Path:
""" return the local path of the quintuplet folder. the tarball that is created goes inside this folder """
quintuplet = self.get_deployquintuplet_for_config()
return Path(get_deploy_dir()) / '../sim/output' / self.get_platform() / quintuplet
def local_tarball_path(self, name: str) -> Path:
""" return the local path of the tarball """
triplet = self.get_deploytriplet_for_config()
return self.local_triplet_path() / name
return self.local_quintuplet_path() / name
def get_local_runtimeconf_binaryname(self) -> str:
""" Get the name of the runtimeconf file. """
@ -169,8 +329,8 @@ class RuntimeHWConfig:
""" return relative local path of the runtime conf used to run this sim. """
if self.customruntimeconfig is None:
return None
my_deploytriplet = self.get_deploytriplet_for_config()
drivers_software_base = LOCAL_DRIVERS_GENERATED_SRC + "/" + self.platform + "/" + my_deploytriplet + "/"
quintuplet = self.get_deployquintuplet_for_config()
drivers_software_base = LOCAL_DRIVERS_GENERATED_SRC + "/" + self.get_platform() + "/" + quintuplet + "/"
return CUSTOM_RUNTIMECONFS_BASE + self.customruntimeconfig
def get_additional_required_sim_files(self) -> List[Tuple[str, str]]:
@ -192,8 +352,9 @@ class RuntimeHWConfig:
hostdebug_config: HostDebugConfig,
synthprint_config: SynthPrintConfig,
sudo: bool,
extra_plusargs: str = "",
extra_args: str = "") -> str:
fpga_physical_selection: Optional[str],
extra_plusargs: str,
extra_args: str) -> str:
""" return the command used to boot the simulation. this has to have
some external params passed to it, because not everything is contained
in a runtimehwconfig. TODO: maybe runtimehwconfig should be renamed to
@ -240,11 +401,14 @@ class RuntimeHWConfig:
dwarf_file_name = "+dwarf-file-name=" + all_bootbinaries[0] + "-dwarf"
screen_name = "fsim{}".format(slotid)
run_device_placement = "+slotid={}".format(slotid)
if fpga_physical_selection is None:
fpga_physical_selection = str(slotid)
run_device_placement = "+slotid={}".format(fpga_physical_selection)
if self.platform == "vitis":
assert self.xclbin is not None
vitis_bit = f"+binary_file={VitisInstanceDeployManager.get_xclbin_filename()}"
vitis_bit = f"+binary_file={self.get_xclbin_filename()}"
else:
vitis_bit = ""
@ -286,20 +450,69 @@ class RuntimeHWConfig:
rootLogger.info(f"""You can also re-run '{cmd}' in the '{dir}' directory to debug this error.""")
sys.exit(1)
def fetch_all_URI(self, dir: str) -> None:
""" Downloads all URI. Local filenames use a hash which will be re-calculated later. Duplicate downloads
are skipped via an exists() check on the filesystem. """
for container in self.uri_list:
container.local_pre_download(dir, self)
def get_local_uri_paths(self, dir: str) -> list[Tuple[str, str]]:
""" Get all paths of local URIs that were previously downloaded. """
ret = list()
for container in self.uri_list:
maybe_file = container.get_rsync_path(dir, self)
if maybe_file is not None:
ret.append(maybe_file)
return ret
def resolve_hwcfg_values(self, dir: str) -> None:
# must be done after fetch_all_URIs
# based on the platform, read the URI, fill out values
if self.platform == "f1" or self.platform == "vitis":
return
else: # bitstream_tar platforms
for container in self.uri_list:
both = container._choose_path(dir, self)
# do nothing if there isn't a URI
if both is None:
uri = self.bitstream_tar
destination = self.bitstream_tar
else:
(uri, destination) = both
if uri == self.bitstream_tar and uri is not None:
# unpack destination value
temp_dir = f"{dir}/{URIContainer.hashed_name(uri)}-dir"
local(f"mkdir -p {temp_dir}")
local(f"tar xvf {destination} -C {temp_dir}")
# read string from metadata
cap = local(f"cat {temp_dir}/*/metadata", capture=True)
metadata = firesim_description_to_tags(cap)
self.set_platform(metadata['firesim-deployquintuplet'].split("-")[0])
self.set_deploy_quintuplet(metadata['firesim-deployquintuplet'])
break
def build_sim_driver(self) -> None:
""" Build driver for running simulation """
if self.driver_built:
# we already built the driver at some point
return
# TODO there is a duplicate of this in runtools
quadruplet_pieces = self.get_deployquadruplet_for_config().split("-")
quintuplet_pieces = self.get_deployquintuplet_for_config().split("-")
target_project = quadruplet_pieces[0]
design = quadruplet_pieces[1]
target_config = quadruplet_pieces[2]
platform_config = quadruplet_pieces[3]
platform = quintuplet_pieces[0]
target_project = quintuplet_pieces[1]
design = quintuplet_pieces[2]
target_config = quintuplet_pieces[3]
platform_config = quintuplet_pieces[4]
rootLogger.info(f"Building {self.driver_type_message} driver for {str(self.get_deployquadruplet_for_config())}")
rootLogger.info(f"Building {self.driver_type_message} driver for {str(self.get_deployquintuplet_for_config())}")
with InfoStreamLogger('stdout'), prefix(f'cd {get_deploy_dir()}/../'), \
prefix(f'export RISCV={os.getenv("RISCV", "")}'), \
@ -307,7 +520,7 @@ class RuntimeHWConfig:
prefix(f'export LD_LIBRARY_PATH={os.getenv("LD_LIBRARY_PATH", "")}'), \
prefix('source sourceme-f1-manager.sh --skip-ssh-setup'), \
prefix('cd sim/'):
driverbuildcommand = f"make TARGET_PROJECT={target_project} DESIGN={design} TARGET_CONFIG={target_config} PLATFORM_CONFIG={platform_config} PLATFORM={self.platform} {self.driver_build_target}"
driverbuildcommand = f"make PLATFORM={self.get_platform()} TARGET_PROJECT={target_project} DESIGN={design} TARGET_CONFIG={target_config} PLATFORM_CONFIG={platform_config} {self.get_driver_build_target()}"
buildresult = run(driverbuildcommand)
self.handle_failure(buildresult, 'driver build', 'firesim/sim', driverbuildcommand)
@ -342,10 +555,10 @@ class RuntimeHWConfig:
self.handle_failure(results, 'local rsync', get_deploy_dir(), cmd)
# This must be taken outside of a cd context
cmd = f"mkdir -p {self.local_triplet_path()}"
cmd = f"mkdir -p {self.local_quintuplet_path()}"
results = run(cmd)
self.handle_failure(results, 'local mkdir', builddir, cmd)
absolute_tarball_path = self.local_triplet_path() / tarball_name
absolute_tarball_path = self.local_quintuplet_path() / tarball_name
with InfoStreamLogger('stdout'), prefix(f'cd {builddir}'):
findcmd = 'find . -mindepth 1 -maxdepth 1 -printf "%P\n"'
@ -363,7 +576,7 @@ class RuntimeHWConfig:
self.tarball_built = True
def __str__(self) -> str:
return """RuntimeHWConfig: {}\nDeployQuadruplet: {}\nAGFI: {}\nXCLBIN: {}\nCustomRuntimeConf: {}""".format(self.name, self.deploy_quadruplet, self.agfi, self.xclbin, str(self.customruntimeconfig))
return """RuntimeHWConfig: {}\nDeployQuintuplet: {}\nAGFI: {}\nXCLBIN: {}\nCustomRuntimeConf: {}""".format(self.name, self.deploy_quintuplet, self.agfi, self.xclbin, str(self.customruntimeconfig))
@ -379,27 +592,27 @@ class RuntimeBuildRecipeConfig(RuntimeHWConfig):
self.agfi = None
self.xclbin = None
self.bitstream_tar = None
self.driver_tar = None
self.tarball_built = False
self.deploy_quadruplet = build_recipe_dict.get('TARGET_PROJECT', 'firesim') + "-" + build_recipe_dict['DESIGN'] + "-" + build_recipe_dict['TARGET_CONFIG'] + "-" + build_recipe_dict['PLATFORM_CONFIG']
self.uri_list = []
self.deploy_quintuplet = build_recipe_dict.get('PLATFORM', 'f1') + "-" + build_recipe_dict.get('TARGET_PROJECT', 'firesim') + "-" + build_recipe_dict['DESIGN'] + "-" + build_recipe_dict['TARGET_CONFIG'] + "-" + build_recipe_dict['PLATFORM_CONFIG']
self.customruntimeconfig = build_recipe_dict['metasim_customruntimeconfig']
# note whether we've built a copy of the simulation driver for this hwconf
self.driver_built = False
self.metasim_host_simulator = default_metasim_host_sim
# currently only f1 metasims supported
self.platform = "f1"
self.driver_name_prefix = ""
self.driver_name_suffix = ""
if self.metasim_host_simulator in ["verilator", "verilator-debug"]:
self.driver_name_prefix = "V"
if self.metasim_host_simulator in ['verilator-debug', 'vcs-debug']:
self.driver_name_suffix = "-debug"
self.local_driver_base_dir = LOCAL_DRIVERS_GENERATED_SRC
self.driver_build_target = self.metasim_host_simulator
self.driver_type_message = "Metasim"
self.metasimulation_only_plusargs = metasimulation_only_plusargs
@ -410,6 +623,15 @@ class RuntimeBuildRecipeConfig(RuntimeHWConfig):
if self.metasim_host_simulator in ["vcs", "vcs-debug"]:
self.additional_required_files.append((self.get_local_driver_path() + ".daidir", ""))
def get_driver_name_suffix(self) -> str:
driver_name_suffix = ""
if self.metasim_host_simulator in ['verilator-debug', 'vcs-debug']:
driver_name_suffix = "-debug"
return driver_name_suffix
def get_driver_build_target(self) -> str:
return self.metasim_host_simulator
def get_boot_simulation_command(self,
slotid: int,
all_macs: Sequence[MacAddress],
@ -424,8 +646,9 @@ class RuntimeBuildRecipeConfig(RuntimeHWConfig):
hostdebug_config: HostDebugConfig,
synthprint_config: SynthPrintConfig,
sudo: bool,
extra_plusargs: str = "",
extra_args: str = "") -> str:
fpga_physical_selection: Optional[str],
extra_plusargs: str,
extra_args: str) -> str:
""" return the command used to boot the meta simulation. """
full_extra_plusargs = " " + self.metasimulation_only_plusargs + " " + extra_plusargs
if self.metasim_host_simulator in ['vcs', 'vcs-debug']:
@ -448,6 +671,7 @@ class RuntimeBuildRecipeConfig(RuntimeHWConfig):
hostdebug_config,
synthprint_config,
sudo,
fpga_physical_selection,
full_extra_plusargs,
full_extra_args)

View File

@ -43,6 +43,10 @@ builds_to_run:
# - vitis_firesim_rocket_singlecore_no_nic
# - vitis_firesim_gemmini_rocket_singlecore_no_nic
# Configs for xilinx alveo u250/u280
# - alveo_u250_firesim_rocket_singlecore_no_nic
# - alveo_u280_firesim_rocket_singlecore_no_nic
agfis_to_share:
- firesim_rocket_quadcore_nic_l2_llc4mb_ddr3
- firesim_rocket_quadcore_no_nic_l2_llc4mb_ddr3
@ -62,4 +66,3 @@ share_with_accounts:
somebodysname: 123456789012
# To share publicly:
# public: public

View File

@ -12,7 +12,7 @@
# DESIGN: <>
# TARGET_CONFIG: <>
# PLATFORM_CONFIG: Config
# deploy_quadruplet: null
# deploy_quintuplet: null
# # NOTE: these platform_config_args are for F1 only
# # they should be set to null if using another platform
# platform_config_args:
@ -30,11 +30,12 @@
# Quad-core, Rocket-based recipes
# REQUIRED FOR TUTORIALS
firesim_rocket_quadcore_nic_l2_llc4mb_ddr3:
PLATFORM: f1
TARGET_PROJECT: firesim
DESIGN: FireSim
TARGET_CONFIG: WithNIC_DDR3FRFCFSLLC4MB_WithDefaultFireSimBridges_WithFireSimHighPerfConfigTweaks_chipyard.QuadRocketConfig
PLATFORM_CONFIG: WithAutoILA_BaseF1Config
deploy_quadruplet: null
deploy_quintuplet: null
platform_config_args:
fpga_frequency: 90
build_strategy: TIMING
@ -45,11 +46,12 @@ firesim_rocket_quadcore_nic_l2_llc4mb_ddr3:
# NB: This has a faster host-clock frequency than the NIC-based design, because
# its uncore runs at half rate relative to the tile.
firesim_rocket_quadcore_no_nic_l2_llc4mb_ddr3:
PLATFORM: f1
TARGET_PROJECT: firesim
DESIGN: FireSim
TARGET_CONFIG: DDR3FRFCFSLLC4MB_WithDefaultFireSimBridges_WithFireSimTestChipConfigTweaks_chipyard.QuadRocketConfig
PLATFORM_CONFIG: WithAutoILA_BaseF1Config
deploy_quadruplet: null
deploy_quintuplet: null
platform_config_args:
fpga_frequency: 140
build_strategy: TIMING
@ -60,11 +62,12 @@ firesim_rocket_quadcore_no_nic_l2_llc4mb_ddr3:
# Single-core, BOOM-based recipes
# REQUIRED FOR TUTORIALS
firesim_boom_singlecore_nic_l2_llc4mb_ddr3:
PLATFORM: f1
TARGET_PROJECT: firesim
DESIGN: FireSim
TARGET_CONFIG: WithNIC_DDR3FRFCFSLLC4MB_WithDefaultFireSimBridges_WithFireSimHighPerfConfigTweaks_chipyard.LargeBoomConfig
PLATFORM_CONFIG: WithAutoILA_BaseF1Config
deploy_quadruplet: null
deploy_quintuplet: null
platform_config_args:
fpga_frequency: 65
build_strategy: TIMING
@ -75,11 +78,12 @@ firesim_boom_singlecore_nic_l2_llc4mb_ddr3:
# NB: This has a faster host-clock frequency than the NIC-based design, because
# its uncore runs at half rate relative to the tile.
firesim_boom_singlecore_no_nic_l2_llc4mb_ddr3:
PLATFORM: f1
TARGET_PROJECT: firesim
DESIGN: FireSim
TARGET_CONFIG: DDR3FRFCFSLLC4MB_WithDefaultFireSimBridges_WithFireSimTestChipConfigTweaks_chipyard.LargeBoomConfig
PLATFORM_CONFIG: WithAutoILA_BaseF1Config
deploy_quadruplet: null
deploy_quintuplet: null
platform_config_args:
fpga_frequency: 65
build_strategy: TIMING
@ -89,11 +93,12 @@ firesim_boom_singlecore_no_nic_l2_llc4mb_ddr3:
# Single-core, CVA6-based recipes
firesim_cva6_singlecore_no_nic_l2_llc4mb_ddr3:
PLATFORM: f1
TARGET_PROJECT: firesim
DESIGN: FireSim
TARGET_CONFIG: DDR3FRFCFSLLC4MB_WithDefaultFireSimBridges_WithFireSimConfigTweaks_chipyard.CVA6Config
PLATFORM_CONFIG: WithAutoILA_BaseF1Config
deploy_quadruplet: null
deploy_quintuplet: null
platform_config_args:
fpga_frequency: 90
build_strategy: TIMING
@ -103,11 +108,12 @@ firesim_cva6_singlecore_no_nic_l2_llc4mb_ddr3:
# Single-core, Rocket-based recipes with Gemmini
firesim_rocket_singlecore_gemmini_no_nic_l2_llc4mb_ddr3:
PLATFORM: f1
TARGET_PROJECT: firesim
DESIGN: FireSim
TARGET_CONFIG: DDR3FRFCFSLLC4MB_WithDefaultFireSimBridges_WithFireSimConfigTweaks_chipyard.GemminiRocketConfig
PLATFORM_CONFIG: WithAutoILA_BaseF1Config
deploy_quadruplet: null
deploy_quintuplet: null
platform_config_args:
fpga_frequency: 110
build_strategy: TIMING
@ -117,11 +123,12 @@ firesim_rocket_singlecore_gemmini_no_nic_l2_llc4mb_ddr3:
# RAM Optimizations enabled by adding _MCRams PLATFORM_CONFIG string
firesim_boom_singlecore_no_nic_l2_llc4mb_ddr3_ramopts:
PLATFORM: f1
TARGET_PROJECT: firesim
DESIGN: FireSim
TARGET_CONFIG: DDR3FRFCFSLLC4MB_WithDefaultFireSimBridges_WithFireSimTestChipConfigTweaks_chipyard.LargeBoomConfig
PLATFORM_CONFIG: WithAutoILA_MCRams_BaseF1Config
deploy_quadruplet: null
deploy_quintuplet: null
platform_config_args:
fpga_frequency: 90
build_strategy: TIMING
@ -131,11 +138,12 @@ firesim_boom_singlecore_no_nic_l2_llc4mb_ddr3_ramopts:
# Supernode configurations -- multiple instances of an SoC in a single simulator
firesim_supernode_rocket_singlecore_nic_l2_lbp:
PLATFORM: f1
TARGET_PROJECT: firesim
DESIGN: FireSim
TARGET_CONFIG: WithNIC_SupernodeFireSimRocketConfig
PLATFORM_CONFIG: WithAutoILA_BaseF1Config
deploy_quadruplet: null
deploy_quintuplet: null
platform_config_args:
fpga_frequency: 85
build_strategy: TIMING
@ -145,11 +153,12 @@ firesim_supernode_rocket_singlecore_nic_l2_lbp:
# MIDAS Examples -- BUILD SUPPORT ONLY; Can't launch driver correctly on run farm
midasexamples_gcd:
PLATFORM: f1
TARGET_PROJECT: midasexamples
DESIGN: GCD
TARGET_CONFIG: NoConfig
PLATFORM_CONFIG: DefaultF1Config
deploy_quadruplet: null
deploy_quintuplet: null
platform_config_args:
fpga_frequency: 75
build_strategy: TIMING
@ -159,11 +168,12 @@ midasexamples_gcd:
# Additional Tutorial Config
firesim_rocket_singlecore_no_nic_l2_lbp:
PLATFORM: f1
TARGET_PROJECT: firesim
DESIGN: FireSim
TARGET_CONFIG: WithDefaultFireSimBridges_WithFireSimHighPerfConfigTweaks_chipyard.RocketConfig
PLATFORM_CONFIG: BaseF1Config
deploy_quadruplet: null
deploy_quintuplet: null
platform_config_args:
fpga_frequency: 90
build_strategy: TIMING
@ -173,11 +183,12 @@ firesim_rocket_singlecore_no_nic_l2_lbp:
# Additional Tutorial Config
firesim_rocket_singlecore_sha3_nic_l2_llc4mb_ddr3:
PLATFORM: f1
TARGET_PROJECT: firesim
DESIGN: FireSim
TARGET_CONFIG: WithNIC_DDR3FRFCFSLLC4MB_WithDefaultFireSimBridges_WithFireSimHighPerfConfigTweaks_chipyard.Sha3RocketConfig
PLATFORM_CONFIG: BaseF1Config
deploy_quadruplet: null
deploy_quintuplet: null
platform_config_args:
fpga_frequency: 65
build_strategy: TIMING
@ -187,11 +198,12 @@ firesim_rocket_singlecore_sha3_nic_l2_llc4mb_ddr3:
# Additional Tutorial Config
firesim_rocket_singlecore_sha3_no_nic_l2_llc4mb_ddr3:
PLATFORM: f1
TARGET_PROJECT: firesim
DESIGN: FireSim
TARGET_CONFIG: DDR3FRFCFSLLC4MB_WithDefaultFireSimBridges_WithFireSimHighPerfConfigTweaks_chipyard.Sha3RocketConfig
PLATFORM_CONFIG: BaseF1Config
deploy_quadruplet: null
deploy_quintuplet: null
platform_config_args:
fpga_frequency: 65
build_strategy: TIMING
@ -201,11 +213,12 @@ firesim_rocket_singlecore_sha3_no_nic_l2_llc4mb_ddr3:
# Additional Tutorial Config
firesim_rocket_singlecore_sha3_no_nic_l2_llc4mb_ddr3_printf:
PLATFORM: f1
TARGET_PROJECT: firesim
DESIGN: FireSim
TARGET_CONFIG: DDR3FRFCFSLLC4MB_WithDefaultFireSimBridges_WithFireSimHighPerfConfigTweaks_chipyard.Sha3RocketPrintfConfig
PLATFORM_CONFIG: WithPrintfSynthesis_BaseF1Config
deploy_quadruplet: null
deploy_quintuplet: null
platform_config_args:
fpga_frequency: 30
build_strategy: TIMING
@ -213,13 +226,14 @@ firesim_rocket_singlecore_sha3_no_nic_l2_llc4mb_ddr3_printf:
metasim_customruntimeconfig: null
bit_builder_recipe: bit-builder-recipes/f1.yaml
# Additional Vitis/XRT-only Config
# Additional Xilinx Vitis/XRT-only Config
vitis_firesim_rocket_singlecore_no_nic:
PLATFORM: vitis
TARGET_PROJECT: firesim
DESIGN: FireSim
TARGET_CONFIG: FireSimRocketMMIOOnlyConfig
PLATFORM_CONFIG: BaseVitisConfig
deploy_quadruplet: null
deploy_quintuplet: null
platform_config_args:
fpga_frequency: 140
build_strategy: TIMING
@ -228,13 +242,14 @@ vitis_firesim_rocket_singlecore_no_nic:
bit_builder_recipe: bit-builder-recipes/vitis.yaml
# Additional Tutorial Config
# Additional Vitis/XRT-only Config
# Additional Xilinx Vitis/XRT-only Config
vitis_firesim_gemmini_rocket_singlecore_no_nic:
PLATFORM: vitis
TARGET_PROJECT: firesim
DESIGN: FireSim
TARGET_CONFIG: FireSimLeanGemminiRocketMMIOOnlyConfig
PLATFORM_CONFIG: BaseVitisConfig
deploy_quadruplet: null
deploy_quintuplet: null
platform_config_args:
fpga_frequency: 30
build_strategy: TIMING
@ -244,11 +259,12 @@ vitis_firesim_gemmini_rocket_singlecore_no_nic:
# Additional Tutorial Config
firesim_gemmini_rocket_singlecore_no_nic:
PLATFORM: f1
TARGET_PROJECT: firesim
DESIGN: FireSim
TARGET_CONFIG: FireSimLeanGemminiRocketConfig
PLATFORM_CONFIG: BaseF1Config
deploy_quadruplet: null
deploy_quintuplet: null
platform_config_args:
fpga_frequency: 30 # AJG: conservative for now, later sweep for higher frequency
build_strategy: TIMING
@ -258,14 +274,45 @@ firesim_gemmini_rocket_singlecore_no_nic:
# Additional Tutorial Config
firesim_gemmini_printf_rocket_singlecore_no_nic:
PLATFORM: f1
TARGET_PROJECT: firesim
DESIGN: FireSim
TARGET_CONFIG: FireSimLeanGemminiPrintfRocketConfig
PLATFORM_CONFIG: WithPrintfSynthesis_BaseF1Config
deploy_quadruplet: null
deploy_quintuplet: null
platform_config_args:
fpga_frequency: 10 # AJG: conservative for now, later sweep for higher frequency
build_strategy: TIMING
post_build_hook: null
metasim_customruntimeconfig: null
bit_builder_recipe: bit-builder-recipes/f1.yaml
# Additional Xilinx Alveo U250-only Config
alveo_u250_firesim_rocket_singlecore_no_nic:
PLATFORM: xilinx_alveo_u250
TARGET_PROJECT: firesim
DESIGN: FireSim
TARGET_CONFIG: FireSimRocketConfig
PLATFORM_CONFIG: BaseXilinxAlveoConfig
deploy_quintuplet: null
platform_config_args:
fpga_frequency: 15
build_strategy: TIMING
post_build_hook: null
metasim_customruntimeconfig: null
bit_builder_recipe: bit-builder-recipes/xilinx_alveo_u250.yaml
# Additional Xilinx Alveo U280-only Config
alveo_u280_firesim_rocket_singlecore_no_nic:
PLATFORM: xilinx_alveo_u280
TARGET_PROJECT: firesim
DESIGN: FireSim
TARGET_CONFIG: FireSimRocketConfig
PLATFORM_CONFIG: BaseXilinxAlveoConfig
deploy_quintuplet: null
platform_config_args:
fpga_frequency: 15
build_strategy: TIMING
post_build_hook: null
metasim_customruntimeconfig: null
bit_builder_recipe: bit-builder-recipes/xilinx_alveo_u280.yaml

View File

@ -1,7 +1,7 @@
# Hardware config database for FireSim Simulation Manager
# See https://docs.fires.im/en/stable/Advanced-Usage/Manager/Manager-Configuration-Files.html for documentation of all of these params.
# Hardware configs represent a combination of an agfi, a deployquadruplet override
# Hardware configs represent a combination of an agfi, a deployquintuplet override
# (if needed), and a custom runtime config (if needed)
# The AGFIs provided below are public and available to all users.
@ -12,54 +12,64 @@
# DOCREF START: Example HWDB Entry
firesim_boom_singlecore_nic_l2_llc4mb_ddr3:
agfi: agfi-0ac731f61d3f31817
deploy_quadruplet_override: null
deploy_quintuplet_override: null
custom_runtime_config: null
# DOCREF END: Example HWDB Entry
firesim_boom_singlecore_no_nic_l2_llc4mb_ddr3:
agfi: agfi-0a60b1241fe70aad8
deploy_quadruplet_override: null
deploy_quintuplet_override: null
custom_runtime_config: null
firesim_rocket_quadcore_nic_l2_llc4mb_ddr3:
agfi: agfi-0c82dc422cf6408a9
deploy_quadruplet_override: null
deploy_quintuplet_override: null
custom_runtime_config: null
firesim_rocket_quadcore_no_nic_l2_llc4mb_ddr3:
agfi: agfi-09a9331f468822063
deploy_quadruplet_override: null
deploy_quintuplet_override: null
custom_runtime_config: null
firesim_supernode_rocket_singlecore_nic_l2_lbp:
agfi: agfi-074d5fb88949da9e3
deploy_quadruplet_override: null
deploy_quintuplet_override: null
custom_runtime_config: null
firesim_rocket_singlecore_no_nic_l2_lbp:
agfi: agfi-0bd3e59a6291be8d8
deploy_quadruplet_override: null
deploy_quintuplet_override: null
custom_runtime_config: null
firesim_rocket_singlecore_sha3_nic_l2_llc4mb_ddr3:
agfi: agfi-0a3aa8485fc964a28
deploy_quadruplet_override: null
deploy_quintuplet_override: null
custom_runtime_config: null
firesim_rocket_singlecore_sha3_no_nic_l2_llc4mb_ddr3:
agfi: agfi-037fd4a1261e58c73
deploy_quadruplet_override: null
deploy_quintuplet_override: null
custom_runtime_config: null
firesim_rocket_singlecore_sha3_no_nic_l2_llc4mb_ddr3_printf:
agfi: agfi-037978a7a54662358
deploy_quadruplet_override: null
deploy_quintuplet_override: null
custom_runtime_config: null
firesim_gemmini_printf_rocket_singlecore_no_nic:
agfi: agfi-03d8cc99122d5fb41
deploy_quadruplet_override: null
deploy_quintuplet_override: null
custom_runtime_config: null
firesim_gemmini_rocket_singlecore_no_nic:
agfi: agfi-09127fbc65317005a
deploy_quadruplet_override: null
deploy_quintuplet_override: null
custom_runtime_config: null
vitis_firesim_rocket_singlecore_no_nic:
xclbin: https://firesim-ci-vitis-xclbins.s3.us-west-2.amazonaws.com/vitis_firesim_rocket_singlecore_no_nic_c12936.xclbin
deploy_quadruplet_override: firesim-FireSim-FireSimRocketMMIOOnlyConfig-BaseVitisConfig
deploy_quintuplet_override: vitis-firesim-FireSim-FireSimRocketMMIOOnlyConfig-BaseVitisConfig
custom_runtime_config: null
vitis_firesim_gemmini_rocket_singlecore_no_nic:
xclbin: https://firesim-ci-vitis-xclbins.s3.us-west-2.amazonaws.com/vitis_firesim_gemmini_rocket_singlecore_no_nic_1ea5c4.xclbin
deploy_quadruplet_override: firesim-FireSim-FireSimLeanGemminiRocketMMIOOnlyConfig-BaseVitisConfig
deploy_quintuplet_override: vitis-firesim-FireSim-FireSimLeanGemminiRocketMMIOOnlyConfig-BaseVitisConfig
custom_runtime_config: null
# DOCREF START: Xilinx Alveo HWDB Entries
alveo_u250_firesim_rocket_singlecore_no_nic:
bitstream_tar: REPLACE_THIS
deploy_quintuplet_override: null
custom_runtime_config: null
alveo_u280_firesim_rocket_singlecore_no_nic:
bitstream_tar: REPLACE_THIS
deploy_quintuplet_override: null
custom_runtime_config: null
# DOCREF END: Xilinx Alveo HWDB Entries

View File

@ -244,7 +244,7 @@ class TestConfigBuildAPI:
testing_recipe_name:
DESIGN: TopModule
TARGET_CONFIG: Config
deploy_quadruplet: null
deploy_quintuplet: null
PLATFORM_CONFIG: Config
platform_config_args:
fpga_frequency: 123

View File

@ -13,7 +13,7 @@ def firesim_input(prompt: object = None) -> str:
Log the entered text as DEBUG so that the log contains it.
Don't pass the prompt to builtins.input() because we don't need StreamLogger to also
be trying to log the prompt.
See 'streamlogger.py' and it's use at the end of 'firesim.py'
"""
@ -35,7 +35,7 @@ def downloadURI(uri: str, local_dest_path: str, tries: int = 4) -> None:
uri: uri of an object to be fetched
local_dest_path: path on the local file system to store the uri object
tries: The number of times to try the download. A 1 second sleep will occur after each failure.
"""
"""
# TODO consider using fsspec
# filecache https://filesystem-spec.readthedocs.io/en/latest/features.html#caching-files-locally

View File

@ -1,12 +1,12 @@
Non-Source Dependency Management
================================
In :doc:`/Initial-Setup/Setting-up-your-Manager-Instance`, we quickly copy-pasted the contents
In the AWS EC2 F1 setup, in :doc:`/Getting-Started-Guides/AWS-EC2-F1-Tutorial/Initial-Setup/Setting-up-your-Manager-Instance`, we quickly copy-pasted the contents
of ``scripts/machine-launch-script.sh`` into the EC2 Management Console and
that script installed many dependencies that FireSim needs using
`conda <https://conda.io/en/latest/index.html>`_, a platform-agnostic package
manager, specifically using packages from the `conda-forge community <https://conda-forge.org/#about>`_
(or in the case of :doc:`/Initial-OnPrem-Setup/Setting-up-your-On-Premises-Machine`, we ran ``scripts/machine-launch-script.sh``).
(or in the case of :doc:`/Getting-Started-Guides/AWS-EC2-F1-Tutorial/Initial-Setup/Setting-up-your-Manager-Instance`, we ran ``scripts/machine-launch-script.sh``).
In many situations, you may not need to know anything about ``conda``. By default, the
``machine-launch-script.sh`` installs ``conda`` into ``/opt/conda`` and all of the FireSim dependencies into
@ -23,7 +23,7 @@ is that you are able to write into the install location. See ``machine-launch-s
To :ref:`run a simulation on a F1 FPGA <running_simulations>` , FireSim currently requires that
you are able to act as root via ``sudo``.
However, you can do many things without having root, like :doc:`/Building-a-FireSim-AFI`,
However, you can do many things without having root, like :doc:`/Getting-Started-Guides/AWS-EC2-F1-Tutorial/Building-a-FireSim-AFI`,
`<meta-simulation>`_ of a FireSim system using Verilator or even developing new features in FireSim.
Updating a Package Version
@ -32,7 +32,9 @@ Updating a Package Version
If you need a newer version of package, the most expedient method to see whether there
is a newer version available on `conda-forge`_ is to run ``conda update <package-name>``. If you are lucky,
and the dependencies of the package you want to update are simple, you'll see output that looks something like
this ::
this:
.. code-block:: bash
bash-4.2$ conda update moto
Collecting package metadata (current_repodata.json): done

View File

@ -27,7 +27,7 @@ terminate.
An example of an assertion caught in a dual-core instance of BOOM is given
below:
::
.. code-block:: text
id: 1190, module: IssueSlot_4, path: FireSimNoNIC.tile_1.core.issue_units_0.slots_3]
Assertion failed

View File

@ -31,7 +31,7 @@ In order to annotate a signal, we must import the
``midas.targetutils.FpgaDebug`` annotator. FpgaDebug's apply method accepts a
vararg of chisel3.Data. Invoke it as follows:
::
.. code-block:: scala
import midas.targetutils.FpgaDebug
@ -59,12 +59,10 @@ configured much like the desired HostFrequency by appending a mixin to the
Below is an example `PLATFORM_CONFIG` that can be used in the `build_recipes` config file.
::
.. code-block:: bash
PLATFORM_CONFIG=ILADepth8192_BaseF1Config
Using the ILA at Runtime
------------------------
@ -74,7 +72,7 @@ In order to use the ILA, we must enable the GUI interface on our manager instanc
In the past, AWS had a custom ``setup_gui.sh`` script. However, this was recently deprecated due to compatibility
issues with various packages. Therefore, AWS currently recommends using `NICE DCV <https://docs.aws.amazon.com/dcv/latest/adminguide/what-is-dcv.html>`__ as a GUI client. You should `download a DCV client <https://docs.aws.amazon.com/dcv/latest/userguide/client.html>`__, and then run the following commands on your FireSim manager instance:
::
.. code-block:: bash
sudo yum -y groupinstall "GNOME Desktop"
sudo yum -y install glx-utils

View File

@ -8,7 +8,7 @@ Golden Gate can synthesize printfs present in Chisel/FIRRTL (implemented as
Rocket and BOOM have printfs of their commit logs and other useful transaction
streams.
::
.. code-block:: text
C0: 409 [1] pc=[008000004c] W[r10=0000000000000000][1] R[r 0=0000000000000000] R[r20=0000000000000003] inst=[f1402573] csrr a0, mhartid
C0: 410 [0] pc=[008000004c] W[r 0=0000000000000000][0] R[r 0=0000000000000000] R[r20=0000000000000003] inst=[f1402573] csrr a0, mhartid

View File

@ -235,14 +235,14 @@ Running Metasimulations through Make
Metasimulations are run out of the ``firesim/sim`` directory.
::
.. code-block:: bash
[in firesim/sim]
make <verilator|vcs>
To compile a simulator with full-visibility waveforms, type:
::
.. code-block:: bash
make <verilator|vcs>-debug
@ -252,7 +252,7 @@ for running suites of assembly tests. MIDAS puts this in
Make sure your ``$RISCV`` environment variable is set by sourcing
``firesim/sourceme-f1-manager.sh`` or ``firesim/env.sh``, and type:
::
.. code-block:: bash
make run-<asm|bmark>-tests EMUL=<vcs|verilator>
@ -260,7 +260,7 @@ Make sure your ``$RISCV`` environment variable is set by sourcing
To run only a single test, the make target is the full path to the output.
Specifically:
::
.. code-block:: bash
make EMUL=<vcs|verilator> $PWD/output/f1/<DESIGN>-<TARGET_CONFIG>-<PLATFORM_CONFIG>/<RISCV-TEST-NAME>.<vpd|out>
@ -269,7 +269,7 @@ whereas a ``.out`` target will use the faster waveform-less simulator.
Additionally, you can run a unique binary in the following way:
::
.. code-block:: bash
make SIM_BINARY=<PATH_TO_BINARY> run-<vcs|verilator>
make SIM_BINARY=<PATH_TO_BINARY> run-<vcs|verilator>-debug
@ -280,7 +280,7 @@ Examples
Run all RISCV-tools assembly and benchmark tests on a Verilated simulator.
::
.. code-block:: bash
[in firesim/sim]
make
@ -290,7 +290,7 @@ Run all RISCV-tools assembly and benchmark tests on a Verilated simulator.
Run all RISCV-tools assembly and benchmark tests on a Verilated simulator with
waveform dumping.
::
.. code-block:: bash
make verilator-debug
make -j run-asm-tests-debug
@ -298,7 +298,7 @@ waveform dumping.
Run ``rv64ui-p-simple`` (a single assembly test) on a Verilated simulator.
::
.. code-block:: bash
make
make $(pwd)/output/f1/FireSim-FireSimRocketConfig-BaseF1Config/rv64ui-p-simple.out
@ -306,7 +306,7 @@ Run ``rv64ui-p-simple`` (a single assembly test) on a Verilated simulator.
Run ``rv64ui-p-simple`` (a single assembly test) on a VCS simulator with
waveform dumping.
::
.. code-block:: bash
make vcs-debug
make EMUL=vcs $(pwd)/output/f1/FireSim-FireSimRocketConfig-BaseF1Config/rv64ui-p-simple.vpd
@ -347,4 +347,3 @@ a scientific comparison between simulators. VCS numbers collected on a local
Berkeley machine, Verilator numbers collected on a ``c4.4xlarge``.
(metasimulation Verilator version: 4.002, target-level Verilator version:
3.904)

View File

@ -7,7 +7,7 @@ I just bumped the FireSim repository to a newer commit and simulations aren't ru
Anytime there is an AGFI bump, FireSim simulations will break/hang due to outdated AFGI.
To get the new default AGFI's you must run the manager initialization again by doing the following:
::
.. code-block:: bash
cd firesim
source sourceme-f1-manager.sh
@ -19,7 +19,7 @@ Is there a good way to keep track of what AGFI corresponds to what FireSim commi
When building an AGFI during ``firesim buildbitstream``, FireSim keeps track of what FireSim repository commit was used to build the AGFI.
To view a list of AGFI's that you have built and what you have access to, you can run the following command:
::
.. code-block:: bash
cd firesim
source sourceme-f1-manager.sh
@ -27,7 +27,7 @@ To view a list of AGFI's that you have built and what you have access to, you ca
You can also view a specific AGFI image by giving the AGFI ID (found in ``deploy/config_hwdb.ini``) through the following command:
::
.. code-block:: bash
cd firesim
source sourceme-f1-manager.sh
@ -74,7 +74,7 @@ For example, if you want to use ZFS to transparently compress data:
Creating the zpool will destroy all pre-existing data on that partition.
Double-check that the device node is correct before running any commands.
::
.. code-block:: bash
# replace /dev/nvme1n1 with the proper device node
zpool create -o ashift=12 -O compression=on <POOL_NAME> /dev/nvme1n1

View File

@ -195,7 +195,7 @@ Projects have the following directory structure:
Specifying A Target Instance
----------------------------
To generate a specific instance of a target, the build system leverages four Make variables:
To generate a specific instance of a target, the build system leverages five Make variables:
1. ``TARGET_PROJECT``: this points the Makefile (`sim/Makefile`) at the right
target-specific Makefrag, which defines the generation and metasimulation
@ -214,6 +214,10 @@ To generate a specific instance of a target, the build system leverages four Mak
parameters, such as whether to enable assertion synthesis, or multi-ported RAM optimizations.
Common platform configs are described in ``firesim-lib/sim/src/main/scala/configs/CompilerConfigs.scala``).
5. ``PLATFORM``: this points the Makefile (`sim/Makefile`) at the right
FPGA platform to build for. This must correspond to a platform
defined at :gh-file-ref:`platforms`.
``TARGET_CONFIG`` and ``PLATFORM_CONFIG`` are strings that are used to construct a
``Config`` instance (derives from RocketChip's parameterization system, ``Config``, see the
`CDE repo
@ -284,21 +288,21 @@ Three design classes use Rocket scalar in-order pipelines.
Single core, Rocket pipeline (default)
::
.. code-block:: bash
make TARGET_CONFIG=FireSimRocketConfig
Single-core, Rocket pipeline, with network interface
::
.. code-block:: bash
make TARGET_CONFIG=WithNIC_FireSimRocketChipConfig
Quad-core, Rocket pipeline
::
.. code-block:: bash
make TARGET_CONFIG=FireSimQuadRocketConfig
@ -311,13 +315,13 @@ The BOOM (`Berkeley Out-of-Order Machine <https://github.com/ucb-bar/riscv-boom>
Single-core BOOM
::
.. code-block:: bash
make TARGET_CONFIG=FireSimLargeBoomConfig
Single-core BOOM, with network interface
::
.. code-block:: bash
make TARGET_CONFIG=WithNIC_FireSimBoomConfig
@ -332,14 +336,14 @@ use the Makefile-defined defaults of ``DESIGN=FireSim PLATFORM_CONFIG=BaseF1Conf
Quad-rank DDR3 first-ready, first-come first-served memory access scheduler
::
.. code-block:: bash
make TARGET_CONFIG=DDR3FRFCFS_FireSimRocketConfig
As above, but with a 4 MiB (maximum simulatable capacity) last-level-cache model
::
.. code-block:: bash
make TARGET_CONFIG=DDR3FRFCFSLLC4MB_FireSimRocketConfig
@ -370,7 +374,7 @@ Examples
To generate the GCD midasexample:
::
.. code-block:: bash
make DESIGN=GCD TARGET_PROJECT=midasexamples
@ -387,12 +391,12 @@ Examples
Generate a synthesizable AXI4Fuzzer (based off of Rocket Chip's TL fuzzer), driving a
DDR3 FR-FCFS-based FASED instance.
::
.. code-block:: bash
make TARGET_PROJECT=fasedtests DESIGN=AXI4Fuzzer TARGET_CONFIG=FRFCFSConfig
As above, now configured to drive 10 million transactions through the instance.
::
.. code-block:: bash
make TARGET_PROJECT=fasedtests DESIGN=AXI4Fuzzer TARGET_CONFIG=NT10e7_FRFCFSConfig

View File

@ -6,7 +6,6 @@ populated with metadata that helps the manager decide how to deploy
a simulation. The important metadata is listed below, along with how each field
is set and used:
- ``firesim-buildquadruplet``: This always reflects the quadruplet combination used to BUILD the AGFI.
- ``firesim-deployquadruplet``: This reflects the quadruplet combination that is used to DEPLOY the AGFI. By default, this is the same as ``firesim-buildquadruplet``. In certain cases however, your users may not have access to a particular configuration, but a simpler configuration may be sufficient for building a compatible software driver (e.g. if you have proprietary RTL in your FPGA image that doesn't interface with the outside system). In this case, you can specify a custom deployquadruplet at build time. If you do not do so, the manager will automatically set this to be the same as ``firesim-buildquadruplet``.
- ``firesim-buildquintuplet``: This always reflects the quintuplet combination used to BUILD the AGFI.
- ``firesim-deployquintuplet``: This reflects the quintuplet combination that is used to DEPLOY the AGFI. By default, this is the same as ``firesim-buildquintuplet``. In certain cases however, your users may not have access to a particular configuration, but a simpler configuration may be sufficient for building a compatible software driver (e.g. if you have proprietary RTL in your FPGA image that doesn't interface with the outside system). In this case, you can specify a custom deployquintuplet at build time. If you do not do so, the manager will automatically set this to be the same as ``firesim-buildquintuplet``.
- ``firesim-commit``: This is the commit hash of the version of FireSim used to build this AGFI. If the AGFI was created from a dirty copy of the FireSim repo, "-dirty" will be appended to the commit hash.

View File

@ -3,7 +3,8 @@ usage: firesim [-h] [-c RUNTIMECONFIGFILE] [-b BUILDCONFIGFILE]
[-x OVERRIDECONFIGDATA] [-f TERMINATESOMEF116]
[-g TERMINATESOMEF12] [-i TERMINATESOMEF14]
[-m TERMINATESOMEM416] [--terminatesome TERMINATESOME] [-q]
[-t LAUNCHTIME] [--platform {f1,vitis}]
[-t LAUNCHTIME]
[--platform {f1,vitis,xilinx_alveo_u250,xilinx_alveo_u280}]
{managerinit,infrasetup,boot,kill,runworkload,buildbitstream,builddriver,tar2afi,runcheck,launchrunfarm,terminaterunfarm,shareagfi}
FireSim Simulation Manager.
@ -75,6 +76,6 @@ options:
Give the "Y-m-d--H-M-S" prefix of results-build
directory. Useful for tar2afi when finishing a partial
buildbitstream
--platform {f1,vitis}
--platform {f1,vitis,xilinx_alveo_u250,xilinx_alveo_u280}
Required argument for "managerinit" to specify which
platform you will be using

View File

@ -6,7 +6,7 @@ Manager Configuration Files
This page contains a centralized reference for all of the configuration options
in ``config_runtime.yaml``, ``config_build.yaml``, ``config_build_farm.yaml``,
``config_build_recipes.yaml``, and ``config_hwdb.yaml``. It also contains
references for all build and run farm recipes (in ``deploy/build-farm-recipes/`` and ``deploy/run-farm-recipes/``).
references for all build and run farm recipes (in :gh-file-ref:`deploy/build-farm-recipes` and :gh-file-ref:`deploy/run-farm-recipes`).
.. _config-runtime:
@ -306,7 +306,7 @@ for a particular call to the ``buildbitstream`` command (see
example, if we want to run the builds named ``awesome_firesim_config`` and ``quad_core_awesome_firesim_config``, we would
write:
::
.. code-block:: yaml
builds_to_run:
- awesome_firesim_config
@ -323,17 +323,17 @@ users specified in the next (``share_with_accounts``) section. In this section,
you should specify the section title (i.e. the name you made up) for a hardware
configuration in ``config_hwdb.yaml``. For example, to share the hardware config:
::
.. code-block:: yaml
firesim_rocket_quadcore_nic_l2_llc4mb_ddr3:
# this is a comment that describes my favorite configuration!
agfi: agfi-0a6449b5894e96e53
deploy_quadruplet_override: null
deploy_quintuplet_override: null
custom_runtime_config: null
you would use:
::
.. code-block:: yaml
agfis_to_share:
- firesim_rocket_quadcore_nic_l2_llc4mb_ddr3
@ -392,7 +392,7 @@ Targets<generating-different-targets>`).
This specifies parameters to pass to the compiler (Golden Gate). Notably,
PLATFORM_CONFIG can be used to enable debugging tools like assertion synthesis,
and resource optimizations like instance multithreading. Critically, it also
calls out the host-platform (e.g., F1 or Vitis) to compile against: this
calls out the host-platform (e.g., F1) to compile against: this
defines the widths of internal simulation interfaces and specifies resource
limits (e.g., how much DRAM is available on the platform).
@ -413,7 +413,7 @@ Specifies the host FPGA frequency for a bitstream build.
Specifies a pre-canned set of strategies and directives to pass to the
bitstream build. Note, these are implemented differently on different host
platforms, but try to optimize for the same things. Strategies supported across both Vitis and EC2 F1 include:
platforms, but try to optimize for the same things. Strategies supported across both Vitis, Xilinx Alveo U250/U280, and EC2 F1 include:
- ``TIMING``: Optimize for improved fmax.
- ``AREA``: Optimize for reduced resource utilization.
@ -430,11 +430,18 @@ Setting ``TARGET_PROJECT`` is required for building the MIDAS examples
(``TARGET_PROJECT: midasexamples``) with the manager, or for building a
user-provided target project.
``deploy_quadruplet``
``PLATFORM`` `(Optional)`
"""""""""""""""""""""""""""""""
This specifies the platform for which the target will be built for (this is described
in greater detail :ref:`here<generating-different-targets>`). If
``PLATFORM`` is undefined the manager will default to ``f1``.
``deploy_quintuplet``
""""""""""""""""""""""""""
This allows you to override the ``deployquadruplet`` stored with the AGFI.
Otherwise, the ``TARGET_PROJECT``/``DESIGN``/``TARGET_CONFIG``/``PLATFORM_CONFIG`` you specify
This allows you to override the ``deployquintuplet`` stored with the AGFI.
Otherwise, the ``PLATFORM``/``TARGET_PROJECT``/``DESIGN``/``TARGET_CONFIG``/``PLATFORM_CONFIG`` you specify
above will be used. See the AGFI Tagging section for more details. Most likely,
you should leave this set to ``null``. This is usually only used if you have
proprietary RTL that you bake into an FPGA image, but don't want to share with
@ -490,12 +497,12 @@ Here is a sample of this configuration file:
This file tracks hardware configurations that you can deploy as simulated nodes
in FireSim. Each such configuration contains a name for easy reference in higher-level
configurations, defined in the section header, an handle to a bitstream (an AGFI or ``xclbin`` path), which represents the
FPGA image, a custom runtime config, if one is needed, and a deploy quadruplet
configurations, defined in the section header, an handle to a bitstream (i.e. an AGFI or ``xclbin`` path), which represents the
FPGA image, a custom runtime config, if one is needed, and a deploy quintuplet
override if one is necessary.
When you build a new bitstream, you should put the default version of it in this
file so that it can be referenced from your other configuration files (the AGFI ID or ``xclbin`` path).
file so that it can be referenced from your other configuration files (i.e. the AGFI ID or ``xclbin`` path).
The following is an example section from this file - you can add as many of
these as necessary:
@ -526,11 +533,18 @@ Indicates where the bitstream (FPGA Image) is located, may be one of:
* A Uniform Resource Identifier (URI), (see :ref:`uri-path-support` for details)
* A filesystem path available to the manager. Local paths are relative to the `deploy` folder.
``deploy_quadruplet_override``
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
``bitstream_tar``
"""""""""""""""""
Indicates where the bitstream (FPGA Image) and metadata associated with it is located, may be one of:
* A Uniform Resource Identifier (URI), (see :ref:`uri-path-support` for details)
* A filesystem path available to the manager. Local paths are relative to the `deploy` folder.
``deploy_quintuplet_override``
""""""""""""""""""""""""""""""
This is an advanced feature - under normal conditions, you should leave this set to ``null``, so that the
manager uses the configuration quadruplet that is automatically stored with the
manager uses the configuration quintuplet that is automatically stored with the
bitstream metadata at build time. Advanced users can set this to a different
value to build and use a different driver when deploying simulations. Since
the driver depends on logic now hardwired into the
@ -572,7 +586,7 @@ Add more hardware config sections, like ``NAME_GOES_HERE_2``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can add as many of these entries to ``config_hwdb.yaml`` as you want, following the format
discussed above (i.e. you provide ``agfi`` or ``xclbin``, ``deploy_quadruplet_override``, and ``custom_runtime_config``).
discussed above (i.e. you provide ``agfi`` or ``xclbin``, ``deploy_quintuplet_override``, and ``custom_runtime_config``).
.. _run-farm-recipe:
@ -738,7 +752,7 @@ simulations across all run farm hosts.
For example, this class manages how to flash FPGAs with bitstreams, how to copy back results, and how to check if a simulation is running.
By default, deploy platform classes can be found in :gh-file-ref:`deploy/runtools/run_farm_deploy_managers.py`. However, you can specify
your own custom run farm classes by adding your python file to the ``PYTHONPATH``.
There are two default deploy managers / platforms that correspond to AWS EC2 F1 FPGAs and Vitis FPGAs, ``EC2InstanceDeployManager`` and ``VitisInstanceDeployManager``, respectively.
There are default deploy managers / platforms that correspond to AWS EC2 F1 FPGAs, Vitis FPGAs, and Xilinx Alveo U250/U280 FPGAs, ``EC2InstanceDeployManager``, ``VitisInstanceDeployManager``, ``XilinxAlveo{U250,U280}InstanceDeployManager``, respectively.
For example, to use the ``EC2InstanceDeployManager`` deploy platform class, you would write ``default_platform: EC2InstanceDeployManager``.
``default_simulation_dir``
@ -927,13 +941,23 @@ When enabled, this appends the current users AWS user ID and region to the ``s3_
``vitis.yaml`` bit builder recipe
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This bit builder recipe configures a build farm host to build an Vitis U250 (FPGA bitstream called an ``xclbin``).
This bit builder recipe configures a build farm host to build an Vitis bitstream (FPGA bitstream called an ``xclbin``).
``device``
""""""""""""""""""""""""""
This specifies a Vitis platform to compile against, for example: ``xilinx_u250_gen3x16_xdma_3_1_202020_1``.
This specifies a Vitis platform to compile against, for example: ``xilinx_u250_gen3x16_xdma_3_1_202020_1`` when targeting a Vitis-enabled Alveo U250 FPGA.
Here is an example of this configuration file:
.. literalinclude:: /../deploy/bit-builder-recipes/vitis.yaml
:language: yaml
``xilinx_alveo_u250.yaml`` bit builder recipe
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This bit builder recipe configures a build farm host to build an Xilinx Alveo U250 bitstream.
``xilinx_alveo_u280.yaml`` bit builder recipe
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This bit builder recipe configures a build farm host to build an Xilinx Alveo U280 bitstream.

View File

@ -5,7 +5,7 @@ This page outlines all of the tasks that the FireSim manager supports.
.. _firesim-managerinit:
``firesim managerinit --platform {f1,vitis}``
``firesim managerinit``
---------------------------------------------
This is a setup command that does the following:
@ -27,6 +27,14 @@ Then, do platform-specific init steps for the given ``--platform``.
* Setup the ``config_runtime.yaml`` and ``config_build.yaml`` files with externally provisioned run/build farm arguments.
.. tab:: ``xilinx_alveo_u250``
* Setup the ``config_runtime.yaml`` and ``config_build.yaml`` files with externally provisioned run/build farm arguments.
.. tab:: ``xilinx_alveo_u280``
* Setup the ``config_runtime.yaml`` and ``config_build.yaml`` files with externally provisioned run/build farm arguments.
You can re-run this whenever you want to get clean configuration files.
.. note:: In the case of ``f1``, you can just hit Enter when prompted for ``aws configure`` credentials and your email
@ -74,9 +82,20 @@ For each config, the build process entails:
7. [Local or Remote] Run Vitis Synthesis and P&R for the configuration
8. [Local/Remote] Copy back all output generated by Vitis (including ``xclbin`` bitstream)
.. tab:: Xilinx Alveo U250/U280
1. [Locally] Run the elaboration process for your hardware configuration
2. [Locally] FAME-1 transform the design with MIDAS
3. [Locally] Attach simulation models (I/O widgets, memory model, etc.)
4. [Locally] Emit Verilog to run through the FPGA Flow
5. Use a build farm configuration to launch/use build hosts for each configuration you want to build
6. [Local/Remote] Prep build hosts, copy generated Verilog for hardware configuration to build instance
7. [Local or Remote] Run Vivado Synthesis and P&R for the configuration
8. [Local/Remote] Copy back all output generated by Vivado (including ``bit`` bitstream)
This process happens in parallel for all of the builds you specify. The command
will exit when all builds are completed (but you will get notified as
INDIVIDUAL builds complete if on F1) and indicate whether all builds passed or a
INDIVIDUAL builds complete if on F1) and indicate whether all builds passed or a
build failed by the exit code.
.. Note:: **It is highly recommended that you either run this command in a ``screen`` or use
@ -100,6 +119,12 @@ This directory will contain:
This contains reports, ``stdout`` from the build, and the final bitstream ``xclbin`` file produced by Vitis.
This also contains a copy of the generated verilog (``FireSim-generated.sv``) used to produce this build.
.. tab:: Xilinx Alveo U250/U280
The Vivado project collateral that built the FPGA image, in the state it was in when the Vivado build process completed.
This contains reports, ``stdout`` from the build, and the final ``bitstream_tar`` bitstream/metadata file produced by Vivado.
This also contains a copy of the generated verilog (``FireSim-generated.sv``) used to produce this build.
If this command is cancelled by a SIGINT, it will prompt for confirmation
that you want to terminate the build instances.
If you respond in the affirmative, it will move forward with the termination.
@ -108,7 +133,7 @@ command in a script), you can give the command the ``--forceterminate`` command
line argument. For example, the following will terminate all build instances in the
build farm without prompting for confirmation if a SIGINT is received:
::
.. code-block:: bash
firesim buildbitstream --forceterminate
@ -266,7 +291,7 @@ command in a script), you can give the command the ``--forceterminate`` command
line argument. For example, the following will TERMINATE ALL INSTANCES IN THE
RUN FARM WITHOUT PROMPTING FOR CONFIRMATION:
::
.. code-block:: bash
firesim terminaterunfarm --forceterminate
@ -282,7 +307,7 @@ instance as you specify.
Here are some examples:
::
.. code-block:: bash
[ start with 2 f1.16xlarges, 2 f1.2xlarges, 2 m4.16xlarges ]
@ -291,7 +316,7 @@ Here are some examples:
[ now, we have: 1 f1.16xlarges, 2 f1.2xlarges, 2 m4.16xlarges ]
::
.. code-block:: bash
[ start with 2 f1.16xlarges, 2 f1.2xlarges, 2 m4.16xlarges ]
@ -412,5 +437,3 @@ Here is an example of such a diagram (click to expand/zoom):
:alt: Example diagram from running ``firesim runcheck``
Example diagram for an 8-node cluster with one ToR switch

View File

@ -18,7 +18,7 @@ FPGA Dev AMI Remote Desktop Setup
To Remote Desktop into your manager instance, you must do the following:
::
.. code-block:: bash
curl https://s3.amazonaws.com/aws-fpga-developer-ami/1.5.0/Scripts/setup_gui.sh -o /home/centos/src/scripts/setup_gui.sh
sudo sed -i 's/enabled=0/enabled=1/g' /etc/yum.repos.d/CentOS-CR.repo
@ -48,7 +48,7 @@ the simulated node:
4. Go into the newest directory that is prefixed with ``switch0-``
5. Edit the ``switchconfig.h`` file so that it looks like this:
::
.. code-block:: c
// THIS FILE IS MACHINE GENERATED. SEE deploy/buildtools/switchmodelconfig.py
@ -72,7 +72,7 @@ the simulated node:
7. Run ``scp switch0 YOUR_RUN_FARM_INSTANCE_IP:switch_slot_0/switch0``
8. On the RUN FARM INSTANCE, run:
::
.. code-block:: bash
sudo ip tuntap add mode tap dev tap0 user $USER
sudo ip link set tap0 up
@ -85,7 +85,7 @@ the simulated node:
10. To ssh into the simulated machine, you will need to first ssh onto the Run Farm instance, then ssh into the IP address of the simulated node (172.16.0.2), username ``root``. You should also prefix with TERM=linux to get backspace to work correctly: So:
::
.. code-block:: bash
ssh YOUR_RUN_FARM_INSTANCE_IP
# from within the run farm instance:
@ -95,7 +95,7 @@ the simulated node:
11. To also be able to access the internet from within the simulation, run the following
on the RUN FARM INSTANCE:
::
.. code-block:: bash
sudo sysctl -w net.ipv4.ip_forward=1
export EXT_IF_TO_USE=$(ifconfig -a | sed 's/[ \t].*//;/^\(lo:\|\)$/d' | sed 's/[ \t].*//;/^\(tap0:\|\)$/d' | sed 's/://g')
@ -106,7 +106,7 @@ on the RUN FARM INSTANCE:
12. Then run the following in the simulation:
::
.. code-block:: bash
route add default gw 172.16.0.1 eth0
echo "nameserver 8.8.8.8" >> /etc/resolv.conf
@ -129,7 +129,7 @@ by running ``./gen-tags.sh`` in your FireSim repo.
For example, to use these tags to jump around the codebase in ``vim``, add the following to
your ``.vimrc``:
::
.. code-block:: bash
set tags=tags;/

View File

@ -34,7 +34,7 @@ The Supernode target configuration wrapper can be found in Chipyard in
``chipyard/generators/firechip/src/main/scala/TargetConfigs.scala``. An example wrapper
configuration is:
::
.. code-block:: scala
class SupernodeFireSimRocketConfig extends Config(
new WithNumNodes(4) ++
@ -48,7 +48,7 @@ different target configuration, we will generate a new supernode wrapper, with
the new target configuration. For example, to simulate 4 quad-core nodes on one
FPGA, you can use:
::
.. code-block:: scala
class SupernodeFireSimQuadRocketConfig extends Config(
new WithNumNodes(4) ++
@ -64,19 +64,19 @@ the wrapper configuration that was defined in
``PLATFORM_CONFIG`` can be selected the same as in regular FireSim
configurations. For example:
::
.. code-block:: yaml
DESIGN: FireSim
TARGET_CONFIG: SupernodeFireSimQuadRocketConfig
PLATFORM_CONFIG: BaseF1Config
deploy_quadruplet: null
deploy_quintuplet: null
We currently provide a single pre-built AGFI for supernode of 4 quad-core
RocketChips with DDR3 memory models. You can build your own AGFI, using the supplied samples in
``config_build_recipes.yaml``. Importantly, in order to meet FPGA timing
contraints, Supernode target may require lower host clock frequencies.
Host clock frequencies can be configured as parts of the ``platform_config_args``
Host clock frequencies can be configured as parts of the ``platform_config_args``
(this must be done using ``PLATFORM_CONFIG`` if not using F1) in ``config_build_recipes.yaml``.
Running Supernode Simulations
@ -110,7 +110,7 @@ Supernode topologies.
A sample Supernode topology of 4 simulated target nodes which can fit on a
single ``f1.2xlarge`` is:
::
.. code-block:: python
def supernode_example_4config(self):
self.roots = [FireSimSwitchNode()]
@ -121,7 +121,7 @@ single ``f1.2xlarge`` is:
A sample Supernode topology of 32 simulated target nodes which can fit on a
single ``f1.16xlarge`` is:
::
.. code-block:: python
def supernode_example_32config(self):
self.roots = [FireSimSwitchNode()]

View File

@ -53,7 +53,7 @@ Let's take a look at this file:
There is also a corresponding directory named after this workload/file:
::
.. code-block:: bash
centos@ip-192-168-2-7.ec2.internal:~/firesim/deploy/workloads/linux-uniform$ ls -la
total 4
@ -136,7 +136,7 @@ Additionally, let's take a look at the state of the ``ping-latency`` directory
AFTER the workload is built (assume that a tool like :ref:`firemarshal` already
created the rootfses and linux images):
::
.. code-block:: bash
centos@ip-172-30-2-111.us-west-2.compute.internal:~/firesim-new/deploy/workloads/ping-latency$ ls -la
total 15203216

View File

@ -56,7 +56,7 @@ Let's take a look at this file:
There is also a corresponding directory named after this workload/file:
::
.. code-block:: bash
centos@ip-172-30-2-111.us-west-2.compute.internal:~/firesim-new/deploy/workloads/linux-uniform$ ls -la
total 4
@ -132,7 +132,7 @@ job per simulated node.
Additionally, let's take a look at the state of the ``ping-latency`` directory
AFTER the workload is built:
::
.. code-block:: bash
centos@ip-172-30-2-111.us-west-2.compute.internal:~/firesim-new/deploy/workloads/ping-latency$ ls -la
total 15203216
@ -160,7 +160,7 @@ First, let's identify some of these files:
Additionally, let's look at the ``overlay`` subdirectory:
::
.. code-block:: bash
centos@ip-172-30-2-111.us-west-2.compute.internal:~/firesim-new/deploy/workloads/ping-latency/overlay$ ls -la */*
-rwxrwxr-x 1 centos centos 249 May 17 21:58 bin/pinglatency.sh
@ -190,7 +190,7 @@ overlay directory, and the base rootfses generated in ``firesim-software``,
the following command will automatically generate all of the rootfses that you
see in the ``ping-latency`` directory.
::
.. code-block:: bash
[ from the workloads/ directory ]
./gen-benchmark-rootfs.py -w ping-latency.json -r -b ../../sw/firesim-software/images/firechip/br-base/br-base.img -s ping-latency/overlay

View File

@ -12,7 +12,7 @@ Running PyTests Locally
Assuming the FireSim repository is setup properly, PyTests can be run by doing the following:
::
.. code-block:: bash
cd <FireSim Root>
cd deploy/

View File

@ -11,7 +11,7 @@ If developers want to update the requirements files, they should also update the
There are two different methods:
#. Running ``build-setup.sh --unpinned-deps``. This will update the lock file in place so that it can be committed and will re-setup the FireSim repository.
#. Running ``./scripts/generate-conda-lockfile.sh``. This will update the lock file in place without setting up your directory.
#. Running :gh-file-ref:`scripts/generate-conda-lockfile.sh`. This will update the lock file in place without setting up your directory.
Caveats of the Conda Lock File and CI
=====================================

View File

@ -11,7 +11,7 @@ Please follow along with the following steps to get setup if you already have an
2. Run the following commands:
::
.. code-block:: bash
#!/bin/bash
@ -109,7 +109,7 @@ Please follow along with the following steps to get setup if you already have an
3. Next copy the following contents and replace your entire ``~/.bashrc`` file with this:
::
.. code-block:: bash
# .bashrc
# Source global definitions

View File

@ -119,39 +119,18 @@ on the simulated RISC-V system (*target*-software) or on a host x86 machine (*ho
The FIRRTL compiler used by FireSim to convert target RTL into a decoupled
simulator. Formerly named MIDAS.
Using FireSim/The FireSim Workflow
-------------------------------------
Get Started
-----------
The tutorials that follow this page will guide you through the complete flow for
getting an example FireSim simulation up and running using AWS EC2 F1. At the end of this
tutorial, you'll have a simulation that simulates a single quad-core Rocket
Chip-based node with a 4 MB last level cache, 16 GB DDR3, and no NIC. After
this, you can continue to a tutorial that shows you how to simulate
a globally-cycle-accurate cluster-scale FireSim simulation. The final tutorial
will show you how to build your own FPGA images with customized hardware.
After you complete these tutorials, you can look at the Advanced documentation
in the sidebar to the left.
FireSim supports many type of FPGAs and FPGA platforms!
Click one of the following links to get started with your particular platform.
Here's a high-level outline of what we'll be doing in our AWS EC2 tutorials:
.. warning:: If using a Xilinx Alveo U250 or U280, we recommend the FPGA-specific flows instead of the Xilinx Vitis flow.
#. **Initial Setup/Installation**
* :doc:`/Getting-Started-Guides/AWS-EC2-F1-Tutorial/index`
a. First-time AWS User Setup: You can skip this if you already have an AWS
account/payment method set up.
* :doc:`/Getting-Started-Guides/On-Premises-FPGA-Tutorial/Xilinx-Alveo-U250-FPGAs`
#. Configuring required AWS resources in your account: This sets up the
appropriate VPCs/subnets/security groups required to run FireSim.
* :doc:`/Getting-Started-Guides/On-Premises-FPGA-Tutorial/Xilinx-Alveo-U280-FPGAs`
#. Setting up a "Manager Instance" from which you will coordinate building
and deploying simulations.
#. **Single-node simulation tutorial**: This tutorial guides you through the process of running one simulation on a Run Farm consisting of a single ``f1.2xlarge``, using our pre-built public FireSim AGFIs.
#. **Cluster simulation tutorial**: This tutorial guides you through the process of running an 8-node cluster simulation on a Run Farm consisting of one ``f1.16xlarge``, using our pre-built public FireSim AGFIs and switch models.
#. **Building your own hardware designs tutorial (Chisel to FPGA Image)**: This tutorial guides you through the full process of taking Rocket Chip RTL and any custom RTL plugged into Rocket Chip and producing a FireSim AGFI to plug into your simulations. This automatically runs Chisel elaboration, FAME-1 Transformation, and the Vivado FPGA flow.
Generally speaking, you only need to follow step 4 if you're modifying Chisel
RTL or changing non-runtime configurable hardware parameters.
Now, hit Next to proceed with setup.
* :doc:`Getting-Started-Guides/On-Premises-FPGA-Tutorial/Xilinx-Vitis-FPGAs`

View File

@ -16,16 +16,16 @@ to specify a name.
So, choose a bucket name, e.g. ``firesim``. Bucket names must be
globally unique. If you choose one that's already taken, the manager
will notice and complain when you tell it to build an AFI. To set your
bucket name, open ``deploy/bit-builder-recipes/f1.yaml`` in your editor and under the
bucket name, open :gh-file-ref:`deploy/bit-builder-recipes/f1.yaml` in your editor and under the
particular recipe you plan to build, replace
::
.. code-block:: yaml
s3_bucket_name: firesim
with your own bucket name, e.g.:
::
.. code-block:: yaml
s3_bucket_name: firesim
@ -51,7 +51,7 @@ This is a design that has four cores, no nic, and uses the 4MB LLC + DDR3 memory
To do so, comment out all of the other build entries in ``deploy/config_build.ini``, besides the one we want. So, you should
end up with something like this (a line beginning with a ``#`` is a comment):
::
.. code-block:: yaml
builds_to_run:
# this section references builds defined in config_build_recipes.ini
@ -64,7 +64,7 @@ Running a Build
Now, we can run a build like so:
::
.. code-block:: bash
firesim buildbitstream

View File

@ -75,13 +75,13 @@ Run scripts from the t2.nano
SSH into the ``t2.nano`` like so:
::
.. code-block:: bash
ssh -i firesim.pem ec2-user@INSTANCE_PUBLIC_IP
Which should present you with something like:
::
.. code-block:: text
, #_
~\_ ####_ Amazon Linux 2023
@ -97,7 +97,7 @@ Which should present you with something like:
On this machine, run the following:
::
.. code-block:: bash
aws configure
[follow prompts]
@ -108,18 +108,18 @@ output format to ``json``. You will need to generate an AWS access key in the "S
Again on the ``t2.nano`` instance, do the following:
.. parsed-literal::
.. code-block:: bash
sudo yum install -y python3-pip
sudo python3 -m pip install boto3
sudo python3 -m pip install --upgrade awscli
wget https://raw.githubusercontent.com/firesim/firesim/|version|/deploy/awstools/aws_setup.py
chmod +x aws_setup.py
./aws_setup.py
The final command should print the following:
::
.. code-block:: text
Creating VPC for FireSim...
Success!

View File

@ -37,7 +37,7 @@ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html
You'll probably want to start out with the following request, depending on your existing limits:
::
.. code-block:: text
Limit Type: EC2 Instances
Region: US East (Northern Virginia)

View File

@ -86,7 +86,7 @@ setup is complete.
In either case, ``ssh`` into your instance (e.g. ``ssh -i firesim.pem centos@YOUR_INSTANCE_IP``) and wait until the
``/tmp/machine-launchstatus`` file contains all the following text:
::
.. code-block:: bash
$ cat /tmp/machine-launchstatus
machine launch script started
@ -110,13 +110,13 @@ Setting up the FireSim Repo
We're finally ready to fetch FireSim's sources. Run:
.. parsed-literal::
.. code-block:: bash
git clone https://github.com/firesim/firesim
cd firesim
# checkout latest official firesim release
# note: this may not be the latest release if the documentation version != "stable"
git checkout |version|
git checkout |overall_version|
./build-setup.sh
The ``build-setup.sh`` script will validate that you are on a tagged branch,
@ -126,7 +126,7 @@ other dependencies.
Next, run:
::
.. code-block:: bash
source sourceme-f1-manager.sh
@ -147,7 +147,7 @@ Completing Setup Using the Manager
The FireSim manager contains a command that will interactively guide you
through the rest of the FireSim setup process. To run it, do the following:
::
.. code-block:: bash
firesim managerinit --platform f1

View File

@ -16,7 +16,7 @@ Returning to a clean configuration
If you already ran the single-node tutorial, let's return to a clean FireSim
manager configuration by doing the following:
::
.. code-block:: bash
cd firesim/deploy
cp sample-backup-configs/sample_config_runtime.yaml config_runtime.yaml
@ -34,7 +34,7 @@ Linux distribution on each of the nodes in your simulated cluster. To do so,
we'll need to build our FireSim-compatible RISC-V Linux distro. You can do
this like so:
::
.. code-block:: bash
cd firesim/sw/firesim-software
./marshal -v build br-base.json
@ -89,7 +89,7 @@ section.
As a final sanity check, your ``config_runtime.yaml`` file should now look like this:
::
.. code-block:: yaml
run_farm:
base_recipe: run-farm-recipes/aws_ec2.yaml
@ -144,13 +144,13 @@ When you do this, you will start getting charged for the running EC2 instances
To do launch your run farm, run:
::
.. code-block:: bash
firesim launchrunfarm
You should expect output like the following:
::
.. code-block:: bash
centos@ip-172-30-2-111.us-west-2.compute.internal:~/firesim-new/deploy$ firesim launchrunfarm
FireSim Manager. Docs: http://docs.fires.im
@ -186,14 +186,14 @@ case). The manager will also handle
flashing FPGAs. To tell the manager to setup our simulation infrastructure,
let's run:
::
.. code-block:: bash
firesim infrasetup
For a complete run, you should expect output like the following:
::
.. code-block:: bash
centos@ip-172-30-2-111.us-west-2.compute.internal:~/firesim-new/deploy$ firesim infrasetup
FireSim Manager. Docs: http://docs.fires.im
@ -256,7 +256,7 @@ Running a simulation!
Finally, let's run our simulation! To do so, run:
::
.. code-block:: bash
firesim runworkload
@ -265,7 +265,7 @@ This command boots up the 8-port switch simulation and then starts 8 Rocket Chip
FPGA Simulations, then prints out the live status of the simulated
nodes and switch every 10s. When you do this, you will initially see output like:
::
.. code-block:: bash
centos@ip-172-30-2-111.us-west-2.compute.internal:~/firesim-new/deploy$ firesim runworkload
FireSim Manager. Docs: http://docs.fires.im
@ -291,7 +291,7 @@ nodes and switch every 10s. When you do this, you will initially see output like
If you don't look quickly, you might miss it, because it will be replaced with
a live status page once simulations are kicked-off:
::
.. code-block:: text
FireSim Simulation Status @ 2018-05-19 06:28:56.087472
--------------------------------------------------------------------------------
@ -336,7 +336,7 @@ the status page, **from your manager instance**. In our case, from the above
output, we see that our simulated system is running on the instance with IP
``172.30.2.178``. So, run:
::
.. code-block:: bash
[RUN THIS ON YOUR MANAGER INSTANCE!]
ssh 172.30.2.178
@ -351,14 +351,14 @@ here for performance reasons.
For example, if we want to enter commands into node zero, we can attach
to its console like so:
::
.. code-block:: bash
screen -r fsim0
Voila! You should now see Linux booting on the simulated node and then be prompted
with a Linux login prompt, like so:
::
.. code-block:: text
[truncated Linux boot output]
[ 0.020000] Registered IceNet NIC 00:12:6d:00:00:02
@ -386,7 +386,7 @@ Now, you can login to the system! The username is ``root``.
At this point, you should be presented with a regular console,
where you can type commands into the simulation and run programs. For example:
::
.. code-block:: bash
Welcome to Buildroot
buildroot login: root
@ -401,7 +401,7 @@ let's poweroff the simulated system and see what the manager does. To do so,
in the console of the simulated system, run ``poweroff -f``:
::
.. code-block:: bash
Welcome to Buildroot
buildroot login: root
@ -412,7 +412,7 @@ in the console of the simulated system, run ``poweroff -f``:
You should see output like the following from the simulation console:
::
.. code-block:: bash
# poweroff -f
[ 3.748000] reboot: Power down
@ -430,7 +430,7 @@ You should see output like the following from the simulation console:
You'll also notice that the manager polling loop exited! You'll see output like this
from the manager:
::
.. code-block:: text
--------------------------------------------------------------------------------
Instances
@ -493,7 +493,7 @@ simulation.
If you take a look at the workload output directory given in the manager output (in this case, ``/home/centos/firesim-new/deploy/results-workload/2018-05-19--06-39-35-linux-uniform/``), you'll see the following:
::
.. code-block:: bash
centos@ip-172-30-2-111.us-west-2.compute.internal:~/firesim-new/deploy/results-workload/2018-05-19--06-39-35-linux-uniform$ ls -la */*
-rw-rw-r-- 1 centos centos 797 May 19 06:45 linux-uniform0/memory_stats.csv
@ -533,13 +533,13 @@ each simulated node and each simulated switch in the cluster. The
For now, let's wrap-up our tutorial by terminating the ``f1.16xlarge`` instance
that we launched. To do so, run:
::
.. code-block:: bash
firesim terminaterunfarm
Which should present you with the following:
::
.. code-block:: bash
centos@ip-172-30-2-111.us-west-2.compute.internal:~/firesim-new/deploy$ firesim terminaterunfarm
FireSim Manager. Docs: http://docs.fires.im
@ -560,7 +560,7 @@ Which should present you with the following:
You must type ``yes`` then hit enter here to have your instances terminated. Once
you do so, you will see:
::
.. code-block:: text
[ truncated output from above ]
Type yes, then press enter, to continue. Otherwise, the operation will be cancelled.

View File

@ -19,7 +19,7 @@ simulated node. To do so, we'll need to build our FireSim-compatible RISC-V
Linux distro. For this tutorial, we will use a simple buildroot-based
distribution. You can do this like so:
::
.. code-block:: bash
cd firesim/sw/firesim-software
./init-submodules.sh
@ -63,7 +63,7 @@ a workload) that you may be operating -- but more on that later.
Since we only want to simulate a single node, let's switch to using one
``f1.2xlarge``. To do so, change the ``run_farm_hosts_to_use`` sequence to the following:
::
.. code-block:: yaml
run_farm_hosts_to_use:
- f1.16xlarge: 0
@ -83,7 +83,7 @@ Now, let's verify that the ``target_config`` mapping will model the correct targ
By default, it is set to model a single-node with no network.
It should look like the following:
::
.. code-block:: yaml
target_config:
topology: no_net_config
@ -114,7 +114,7 @@ feature is an advanced feature that you can learn more about in the
As a final sanity check, in the mappings we changed, the ``config_runtime.yaml`` file should now look like this:
::
.. code-block:: yaml
run_farm:
base_recipe: run-farm-recipes/aws_ec2.yaml
@ -171,13 +171,13 @@ When you do this, you will start getting charged for the running EC2 instances
To do launch your run farm, run:
::
.. code-block:: bash
firesim launchrunfarm
You should expect output like the following:
::
.. code-block:: bash
centos@ip-172-30-2-111.us-west-2.compute.internal:~/firesim-new/deploy$ firesim launchrunfarm
FireSim Manager. Docs: http://docs.fires.im
@ -212,14 +212,14 @@ components necessary to run your simulation. The manager will also handle
flashing FPGAs. To tell the manager to setup our simulation infrastructure,
let's run:
::
.. code-block:: bash
firesim infrasetup
For a complete run, you should expect output like the following:
::
.. code-block:: bash
centos@ip-172-30-2-111.us-west-2.compute.internal:~/firesim-new/deploy$ firesim infrasetup
FireSim Manager. Docs: http://docs.fires.im
@ -257,7 +257,7 @@ Running a simulation!
Finally, let's run our simulation! To do so, run:
::
.. code-block:: bash
firesim runworkload
@ -265,7 +265,7 @@ Finally, let's run our simulation! To do so, run:
This command boots up a simulation and prints out the live status of the simulated
nodes every 10s. When you do this, you will initially see output like:
::
.. code-block:: bash
centos@ip-172-30-2-111.us-west-2.compute.internal:~/firesim-new/deploy$ firesim runworkload
FireSim Manager. Docs: http://docs.fires.im
@ -281,7 +281,7 @@ nodes every 10s. When you do this, you will initially see output like:
If you don't look quickly, you might miss it, since it will get replaced with a
live status page:
::
.. code-block:: text
FireSim Simulation Status @ 2018-05-19 00:38:56.062737
--------------------------------------------------------------------------------
@ -317,7 +317,7 @@ printed by the status page, **from your manager instance**. In our case, from
the above output, we see that our simulated system is running on the instance with
IP ``172.30.2.174``. So, run:
::
.. code-block:: bash
[RUN THIS ON YOUR MANAGER INSTANCE!]
ssh 172.30.2.174
@ -325,7 +325,7 @@ IP ``172.30.2.174``. So, run:
This will log you into the instance running the simulation. Then, to attach to the
console of the simulated system, run:
::
.. code-block:: bash
screen -r fsim0
@ -333,7 +333,7 @@ Voila! You should now see Linux booting on the simulated system and then be prom
with a Linux login prompt, like so:
::
.. code-block:: bash
[truncated Linux boot output]
[ 0.020000] VFS: Mounted root (ext2 filesystem) on device 254:0.
@ -362,7 +362,7 @@ Now, you can login to the system! The username is ``root``.
At this point, you should be presented with a regular console,
where you can type commands into the simulation and run programs. For example:
::
.. code-block:: bash
Welcome to Buildroot
buildroot login: root
@ -377,7 +377,7 @@ let's poweroff the simulated system and see what the manager does. To do so,
in the console of the simulated system, run ``poweroff -f``:
::
.. code-block:: bash
Welcome to Buildroot
buildroot login: root
@ -388,7 +388,7 @@ in the console of the simulated system, run ``poweroff -f``:
You should see output like the following from the simulation console:
::
.. code-block:: bash
# poweroff -f
[ 12.456000] reboot: Power down
@ -406,7 +406,7 @@ You should see output like the following from the simulation console:
You'll also notice that the manager polling loop exited! You'll see output like this
from the manager:
::
.. code-block:: bash
FireSim Simulation Status @ 2018-05-19 00:46:50.075885
--------------------------------------------------------------------------------
@ -440,7 +440,7 @@ from the manager:
If you take a look at the workload output directory given in the manager output (in this case, ``/home/centos/firesim-new/deploy/results-workload/2018-05-19--00-38-52-linux-uniform/``), you'll see the following:
::
.. code-block:: bash
centos@ip-172-30-2-111.us-west-2.compute.internal:~/firesim-new/deploy/results-workload/2018-05-19--00-38-52-linux-uniform$ ls -la */*
-rw-rw-r-- 1 centos centos 797 May 19 00:46 linux-uniform0/memory_stats.csv
@ -456,13 +456,13 @@ useful for running benchmarks automatically. The
For now, let's wrap-up our tutorial by terminating the ``f1.2xlarge`` instance
that we launched. To do so, run:
::
.. code-block:: bash
firesim terminaterunfarm
Which should present you with the following:
::
.. code-block:: bash
centos@ip-172-30-2-111.us-west-2.compute.internal:~/firesim-new/deploy$ firesim terminaterunfarm
FireSim Manager. Docs: http://docs.fires.im
@ -482,7 +482,7 @@ Which should present you with the following:
You must type ``yes`` then hit enter here to have your instances terminated. Once
you do so, you will see:
::
.. code-block:: text
[ truncated output from above ]
Type yes, then press enter, to continue. Otherwise, the operation will be cancelled.

View File

@ -0,0 +1,41 @@
AWS EC2 F1 Getting Started
==========================
The tutorials that follow this page will guide you through the complete flow for
getting an example FireSim simulation up and running using AWS EC2 F1. At the end of this
tutorial, you'll have a simulation that simulates a single quad-core Rocket
Chip-based node with a 4 MB last level cache, 16 GB DDR3, and no NIC. After
this, you can continue to a tutorial that shows you how to simulate
a globally-cycle-accurate cluster-scale FireSim simulation. The final tutorial
will show you how to build your own FPGA images with customized hardware.
After you complete these tutorials, you can look at the "Advanced Docs"
in the sidebar to the left.
Here's a high-level outline of what we'll be doing in our AWS EC2 F1 tutorials:
#. **Initial Setup/Installation**
a. First-time AWS User Setup: You can skip this if you already have an AWS
account/payment method set up.
#. Configuring required AWS resources in your account: This sets up the
appropriate VPCs/subnets/security groups required to run FireSim.
#. Setting up a "Manager Instance" from which you will coordinate building
and deploying simulations.
#. **Single-node simulation tutorial**: This tutorial guides you through the process of running one simulation on a Run Farm consisting of a single ``f1.2xlarge``, using our pre-built public FireSim AGFIs.
#. **Cluster simulation tutorial**: This tutorial guides you through the process of running an 8-node cluster simulation on a Run Farm consisting of one ``f1.16xlarge``, using our pre-built public FireSim AGFIs and switch models.
#. **Building your own hardware designs tutorial (Chisel to FPGA Image)**: This tutorial guides you through the full process of taking Rocket Chip RTL and any custom RTL plugged into Rocket Chip and producing a FireSim AGFI to plug into your simulations. This automatically runs Chisel elaboration, FAME-1 Transformation, and the Vivado FPGA flow.
Generally speaking, you only need to follow step 4 if you're modifying Chisel
RTL or changing non-runtime configurable hardware parameters.
.. toctree::
:maxdepth: 2
Initial-Setup/index
Running-Simulations-Tutorial/index
Building-a-FireSim-AFI

View File

@ -0,0 +1,8 @@
.. |fpga_name| replace:: Xilinx Alveo U250
.. |hwdb_entry_name| replace:: ``alveo_u250_firesim_rocket_singlecore_no_nic``
.. |hwdb_entry_name_non_code| replace:: alveo_u250_firesim_rocket_singlecore_no_nic
.. |bit_file_type| replace:: ``bitstream_tar``
.. |builder_name| replace:: Xilinx Vivado
.. |bit_builder_path| replace:: ``bit-builder-recipes/xilinx_alveo_u250.yaml``
.. include:: Xilinx-Bitstream-Template.rst

View File

@ -0,0 +1,8 @@
.. |fpga_name| replace:: Xilinx Alveo U280
.. |hwdb_entry_name| replace:: ``alveo_u280_firesim_rocket_singlecore_no_nic``
.. |hwdb_entry_name_non_code| replace:: alveo_u280_firesim_rocket_singlecore_no_nic
.. |bit_file_type| replace:: ``bitstream_tar``
.. |builder_name| replace:: Xilinx Vivado
.. |bit_builder_path| replace:: ``bit-builder-recipes/xilinx_alveo_u280.yaml``
.. include:: Xilinx-Bitstream-Template.rst

View File

@ -1,15 +1,16 @@
Building Your Own Hardware Designs (FireSim Vitis FPGA Xclbins)
===============================================================
Building Your Own Hardware Designs
==================================
This section will guide you through building a U250 FPGA xclbin (FPGA image) for a FireSim
simulation.
This section will guide you through building a |fpga_name| FPGA |bit_file_type| (FPGA image) for a FireSim simulation.
Build Recipes
---------------
We already provide for you a build recipe (i.e. hardware configuration) called ``vitis_firesim_rocket_singlecore_no_nic`` that was used to pre-build a U250 FPGA xclbin.
We already provide for you a build recipe (i.e. hardware configuration) called |hwdb_entry_name| that was used to pre-build a |fpga_name| FPGA |bit_file_type|.
You can find this in the ``config_build_recipes.yaml`` file.
This configuration is a simple singlecore Rocket configuration with a single DRAM channel and no debugging features.
This configuration is a simple singlecore Rocket configuration with a single DRAM channel and no debugging features (as indicated by some of the variables like ``TARGET_CONFIG``).
Additionally, this configuration has a field called ``bit_builder_recipe`` pointing to |bit_builder_path|.
This file found in the :gh-file-ref:`deploy` tells the FireSim build system what combination of commands to run to build the |bit_file_type|.
Next, lets build the bitstream corresponding to the build recipe and specify the Build Farm to run on.
In the ``deploy/config_build.yaml`` file, you will notice at least two mappings: ``build_farm`` and ``builds_to_run``.
@ -21,25 +22,25 @@ Next, let's look at the ``build_farm_hosts`` list that has a single element ``lo
This list indicates the IP addresses of machines already booted and ready to use for builds.
In our case, we are building locally so we provide our own IP address, ``localhost``.
Finally, let's look at and modify the ``default_build_dir`` mapping to a directory of your choice that will store
temporary Vitis build files during builds.
temporary |builder_name| build files during builds.
Continuing to the next section in the ``deploy/config_build.yaml`` file, you will notice that the ``builds_to_run``
section currently contains several lines, which
indicates to the build system that you want to run all of these builds on the machines provided, with the parameters listed in the relevant section of the
``deploy/config_build_recipes.yaml`` file.
To start out, let's build our simple design, ``vitis_firesim_rocket_singlecore_no_nic``, that we previously added.
To do so, comment out all of the other build entries in ``deploy/config_build.yaml``, and uncomment the ``- vitis_firesim_rocket_singlecore_no_nic`` line.
So, you should
end up with something like this (a line beginning with a ``#`` is a comment):
To start out, let's build our simple design, |hwdb_entry_name|, that we previously added.
To do so, comment out all of the other build entries in ``deploy/config_build.yaml``, and uncomment the "- |hwdb_entry_name_non_code|" line.
So, you should end up with something like this (a line beginning with a ``#`` is a comment):
::
.. code-block:: text
:substitutions:
builds_to_run:
# this section references builds defined in config_build_recipes.yaml
# if you add a build here, it will be built when you run buildbitstream
# Many other commented lines...
- vitis_firesim_rocket_singlecore_no_nic
- |hwdb_entry_name_non_code|
Running a Build
@ -47,21 +48,20 @@ Running a Build
Now, we can run a build like so:
::
.. code-block:: bash
firesim buildbitstream
This will run through the entire build process, taking the Chisel RTL
and producing an U250 FPGA xclbin that runs on the FPGA. This whole process will
and producing an |fpga_name| FPGA |bit_file_type| that runs on the FPGA. This whole process will
usually take a few hours. When the build
completes, you will see a directory in
``deploy/results-build/``, named after your build parameter
settings, that contains all of the outputs of the Vitis build process.
settings, that contains all of the outputs of the |builder_name| build process.
Additionally, the manager will print out a path to a log file
that describes everything that happened, in-detail, during this run (this is a
good file to send us if you encounter problems).
Now that you know how to generate your own FPGA image, you can modify the target-design
to add your own features, then build a FireSim-compatible FPGA image automatically!
To learn more advanced FireSim features, you can choose a link under the "Advanced
Docs" section to the left.
To learn more advanced FireSim features, you can choose a link under the "Advanced Docs" section to the left.

View File

@ -0,0 +1,8 @@
.. |fpga_name| replace:: Xilinx Vitis-enabled U250
.. |hwdb_entry_name| replace:: ``vitis_firesim_rocket_singlecore_no_nic``
.. |hwdb_entry_name_non_code| replace:: vitis_firesim_rocket_singlecore_no_nic
.. |bit_file_type| replace:: ``xclbin``
.. |builder_name| replace:: Xilinx Vitis
.. |bit_builder_path| replace:: ``bit-builder-recipes/vitis.yaml``
.. include:: Xilinx-Bitstream-Template.rst

View File

@ -0,0 +1,126 @@
FPGA and Tool Setup
===================
Requirements and Installations
------------------------------
We require a base machine that is able to support the |fpga_name| and running Xilinx Vivado.
Please refer to the minimum system requirements given in the following link: https://docs.xilinx.com/r/en-US/ug1301-getting-started-guide-alveo-accelerator-cards/Minimum-System-Requirements.
Next, install the U250 FPGA as indicated: https://docs.xilinx.com/r/en-US/ug1301-getting-started-guide-alveo-accelerator-cards/Card-Installation-Procedures
We require the following programs/packages installed from the Xilinx website in addition to a physical U250 installation:
* Vivado 2021.1 or 2022.2
* U250 board package (corresponding with Vivado 2021.1 or 2022.2)
* Ensure you complete the "Installing the Deployment Software" and "Card Bring-Up and Validation" sections in the following link: https://docs.xilinx.com/r/en-US/ug1301-getting-started-guide-alveo-accelerator-cards/Installing-the-Deployment-Software
* Ensure that the board package is installed to a Vivado accessible location: https://support.xilinx.com/s/article/The-board-file-location-with-the-latest-Vivado-tools?language=en_US
Importantly, using this FPGA with FireSim requires that you have ``sudo`` **passwordless** access to the machine with the FPGA.
This is needed to flash the FPGA bitstream onto the FPGA.
FPGA Setup
----------
.. warning:: Currently, FireSim only supports a single |fpga_name| installed on a machine. Future support will address this.
After installing the |fpga_name| using the Xilinx instructions and installing the specific version of Vivado, we need to flash the |fpga_name| with a dummy XDMA-enabled design to finish setup.
First, lets install the XDMA kernel module in a FireSim known location:
.. code-block:: bash
cd /tmp # or any location you prefer
git clone https://github.com/Xilinx/dma_ip_drivers
cd dma_ip_drivers
git checkout 2022.1.5
cd XDMA/linux-kernel/xdma
sudo make clean && sudo make && sudo make install
Next, lets add the kernel module:
.. code-block:: bash
# the module should be installed in the following location
# by the `make install` previously run
sudo insmod /lib/modules/$(uname -r)/extra/xdma.ko poll_mode=1
Next, let's determine the BDF's (unique ID) of the/any FPGA you want to use with FireSim.
.. code-block:: bash
:substitutions:
# determine BDF of FPGA that you want to use / re-flash
lspci | grep -i xilinx
# example output of a 2 |fpga_name| FPGA system:
# 04:00.0 Processing accelerators: Xilinx Corporation Device 5004
# 04:00.1 Processing accelerators: Xilinx Corporation Device 5005
# 83:00.0 Processing accelerators: Xilinx Corporation Device 5004
# 83:00.1 Processing accelerators: Xilinx Corporation Device 5005
# BDF would be 04:00.0 if you want to flash the '04' FPGA
# the extended BDF would be 0000: + the BDF from before (i.e. 0000:04:00.0)
# note: that you BDF to use is the one ending in .0
Keep note of the **extended BDF** of the FPGA you would like to setup.
Next, let's flash each |fpga_name| that you would like to use with the dummy bitstream.
To obtain the sample bitstream, let's find the URL to download the file to the machine with the FPGA.
Below find the HWDB entry called |hwdb_entry_name|.
.. literalinclude:: /../deploy/sample-backup-configs/sample_config_hwdb.yaml
:language: yaml
:start-after: DOCREF START: Xilinx Alveo HWDB Entries
:end-before: DOCREF END: Xilinx Alveo HWDB Entries
Look for the ``bitstream_tar: <URL>`` line within |hwdb_entry_name| and keep note of the URL.
Next, we will do the following for each FPGA that will be used with FireSim.
#. Create a temporary flashing area that we will delete after flashing the FPGA.
#. Download the bitstream file.
#. Download a temporary FireSim repository to have access to the flashing scripts.
#. Flash the FPGA (with the extended BDF obtained) and the bitstream file.
#. Delete the temporary flashing area.
.. code-block:: bash
:substitutions:
mkdir /tmp/tempdownload
cd /tmp/tempdownload
wget <BIT_TAR URL SAVED FROM PREVIOUSLY>
tar xvf firesim.tar.gz
cd |platform_name|
git clone --branch |overall_version| https://github.com/firesim/firesim
EXTENDED_DEVICE_BDF1=<YOUR BDF HERE> ./firesim/platforms/|platform_name|/scripts/program_fpga.sh ./firesim.bit |board_name|
rm -rf /tmp/tempdownload
Next, **warm reboot** the computer.
This will reconfigure your PCI-E settings such that FireSim can detect the XDMA-enabled bitstream.
After the machine is rebooted, you may need to re-insert the XDMA kernel module.
Then verify that you can see the XDMA module with:
.. code-block:: bash
cat /proc/devices | grep xdma
Also, verify that the FPGA programming worked by seeing if the ``lspci`` output has changed.
For example, we should see ``Serial controller`` for BDF's that were flashed.
.. code-block:: bash
lspci | grep -i xilinx
# example output if only the 0000:04:00.0 FPGA was programmed
04:00.0 Serial controller: Xilinx Corporation Device 903f (rev ff)
83:00.0 Processing accelerators: Xilinx Corporation Device 5004
83:00.1 Processing accelerators: Xilinx Corporation Device 5005
.. Warning:: Anytime the host computer is rebooted you may need to re-run parts of the setup process (i.e. re-insert XDMA kernel module).
Before continuing to FireSim simulations after a host computer reboot, ensure that the previously mentioned ``cat /proc/devices | grep xdma`` command is successful.
Also ensure that you see ``Serial controller`` for the BDF of the FPGA you would like to use (otherwise, re-run this setup).
Now you're ready to continue with other FireSim setup!

View File

@ -0,0 +1,6 @@
.. |fpga_name| replace:: Xilinx Alveo U250
.. |hwdb_entry_name| replace:: ``alveo_u250_firesim_rocket_singlecore_no_nic``
.. |platform_name| replace:: xilinx_alveo_u250
.. |board_name| replace:: au250
.. include:: Xilinx-Alveo-Template.rst

View File

@ -0,0 +1,6 @@
.. |fpga_name| replace:: Xilinx Alveo U280
.. |hwdb_entry_name| replace:: ``alveo_u280_firesim_rocket_singlecore_no_nic``
.. |platform_name| replace:: xilinx_alveo_u280
.. |board_name| replace:: au280
.. include:: Xilinx-Alveo-Template.rst

View File

@ -0,0 +1,45 @@
.. |fpga_name| replace:: Xilinx Vitis-enabled U250
.. |vitis_version| replace:: 2022.1
.. |vitis_link| replace:: https://www.xilinx.com/products/design-tools/vitis/vitis-whats-new.html#20221
FPGA and Tool Setup
===================
Requirements and Installations
------------------------------
We require a base machine that is able to support a |fpga_name| and running Xilinx Vitis.
For the purposes of this tutorial, we assume you are running with a |fpga_name|.
Please refer to the minimum system requirements given in the following link: https://docs.xilinx.com/r/en-US/ug1301-getting-started-guide-alveo-accelerator-cards/Minimum-System-Requirements.
``sudo`` access is not needed for the machine except for when the |fpga_name| and corresponding software is installed.
Next, install the |fpga_name| as indicated: https://docs.xilinx.com/r/en-US/ug1301-getting-started-guide-alveo-accelerator-cards/Card-Installation-Procedures
We require the following programs/packages installed from the Xilinx website in addition to a physical |fpga_name| installation:
* Xilinx Vitis |vitis_version|
* Installation link: |vitis_link|
* Xilinx XRT and |fpga_name| board package (corresponding with Vitis |vitis_version|)
* Ensure you complete the "Installing the Deployment Software" and "Card Bring-Up and Validation" sections in the following link: https://docs.xilinx.com/r/en-US/ug1301-getting-started-guide-alveo-accelerator-cards/Installing-the-Deployment-Software
Setup Validation
----------------
After installing the |fpga_name| using the Xilinx instructions and installing the specific versions of Vitis/XRT, let's verify that the |fpga_name| can be used for emulations.
Ensure that you can run the following XRT commands without errors:
.. code-block:: bash
:substitutions:
xbutil examine # obtain the BDF associated with your installed |fpga_name|
xbutil validate --device <CARD_BDF_INSTALLED> --verbose
The ``xbutil validate`` command runs simple tests to ensure that the FPGA can be properly flashed with a bitstream by using XRT.
.. Warning:: Anytime the host computer is rebooted you may need to re-run parts of the setup process (i.e. re-flash the shell).
Before continuing to FireSim simulations after a host computer reboot, ensure that the previously mentioned ``xbutil`` command is successful.
Now you're ready to continue with other FireSim setup!

View File

@ -3,48 +3,12 @@ Setting up your On-Premises Machine
This tutorial is setting up a single node cluster (i.e. running FPGA bitstream builds and simulations on a single machine) for FireSim use.
This single machine will serve as the "Manager Machine" that acts as a "head" node that all work will be completed on.
``sudo`` access is not needed for the machine being setup except for when the U250 FPGA and corresponding software is installed.
Xilinx Alveo FPGA Software Installation and Validation
------------------------------------------------------
On-Premises FPGA support currently only supports Xilinx Alveo U250 FPGAs (further referred to as a U250 FPGA).
Requirements
~~~~~~~~~~~~
We require a base machine that is able to support the U250 FPGA and running Xilinx Vivado/Vitis.
Please refer to the minimum system requirements given in the following link: https://docs.xilinx.com/r/en-US/ug1301-getting-started-guide-alveo-accelerator-cards/Minimum-System-Requirements.
Next, install the U250 FPGA as indicated: https://docs.xilinx.com/r/en-US/ug1301-getting-started-guide-alveo-accelerator-cards/Card-Installation-Procedures
We require the following programs/packages installed from the Xilinx website in addition to a physical U250 installation:
* Xilinx Vitis 2022.1
* Installation link: https://www.xilinx.com/products/design-tools/vitis/vitis-whats-new.html#20221
* Xilinx XRT and U250 board package (corresponding with Vitis 2022.1)
* Ensure you complete the "Installing the Deployment Software" and "Card Bring-Up and Validation" sections in the following link: https://docs.xilinx.com/r/en-US/ug1301-getting-started-guide-alveo-accelerator-cards/Installing-the-Deployment-Software
Setup Validation
~~~~~~~~~~~~~~~~
After installing the U250 FPGA using the Xilinx instructions and installing the specific versions of Vitis/XRT, let's verify that the U250 FPGA can be used for emulations.
Ensure that you can run the following XRT commands without errors:
.. parsed-literal::
xbutil examine # obtain the BDF associated with your installed U250 FPGA
xbutil validate --device <CARD_BDF_INSTALLED> --verbose
The ``xbutil validate`` command runs simple tests to ensure that the FPGA can be properly flashed with a bitstream by using XRT.
.. Warning:: Anytime the host computer is rebooted you may need to re-run parts of the setup process (i.e. re-flash the shell).
Before continuing to FireSim simulations after a host computer reboot, ensure that the previously mentioned ``xbutil`` command is successful.
Finally, ensure that the XRT/Vitis tools are sourced in your shell setup (i.e. ``.bashrc`` and or ``.bash_profile``) so that any shell can use the corresponding programs.
Finally, ensure that the |tool_type| tools are sourced in your shell setup (i.e. ``.bashrc`` and or ``.bash_profile``) so that any shell can use the corresponding programs.
The environment variables should be visible to any non-interactive shells that are spawned.
You can check this by ensuring that the output of the following command shows that the XRT/Vitis tools are present in the environment variables (i.e. ``XILINX_XRT``):
You can check this by ensuring that the output of the following command shows that the |tool_type| tools are present in the environment variables (i.e. "|example_var|"):
.. parsed-literal::
.. code-block:: bash
ssh localhost printenv
@ -55,7 +19,7 @@ Additionally, you should be able to run ``ssh localhost`` without needing a pass
The FireSim manager program runs all commands by ``ssh``-ing into a BuildFarm/RunFarm machine given an IP address then running the command.
To do so non-interactively, it needs passwordless access to the machines (in our case, ``localhost``) to build/run on.
Finally, if you are running this tutorial without ``sudo`` access you should also install the ``guestmount`` program and ensure it runs properly.
Finally, you should also install the ``guestmount`` program and ensure it runs properly.
This is needed by a variety of FireSim steps that mount disk images in order to copy in/out results of simulations out of the images.
Most likely you will need to follow the instructions `here <https://askubuntu.com/questions/1046828/how-to-run-libguestfs-tools-tools-such-as-virt-make-fs-without-sudo>`_ to ensure ``guestmount`` doesn't error.
@ -68,18 +32,19 @@ Setting up the FireSim Repo
We're finally ready to fetch FireSim's sources. Run:
.. parsed-literal::
.. code-block:: bash
:substitutions:
git clone https://github.com/firesim/firesim
cd firesim
# checkout latest official firesim release
# note: this may not be the latest release if the documentation version != "stable"
git checkout |version|
git checkout |overall_version|
Next, we will bootstrap the machine by installing Miniforge Conda, our software package manager, and set up a default software environment using Conda.
First run the following to see the options to the bootstrap script:
.. parsed-literal::
.. code-block:: bash
./scripts/machine-launch-script.sh --help
@ -95,19 +60,19 @@ Below we will give a few examples on how to run the command (choose the command
.. tab:: With ``sudo`` access (newly install Conda)
.. parsed-literal::
.. code-block:: bash
sudo ./scripts/machine-launch-script.sh
.. tab:: Without ``sudo`` access (install Conda to user-specified location)
.. parsed-literal::
.. code-block:: bash
./scripts/machine-launch-script.sh --prefix REPLACE_USER_SPECIFIED_LOCATION
.. tab:: Without ``sudo`` access (use existing Conda)
.. parsed-literal::
.. code-block:: bash
./scripts/machine-launch-script.sh --prefix REPLACE_PATH_TO_CONDA
@ -118,19 +83,19 @@ After re-logging back into the machine, you should be in the ``firesim`` Conda e
environment in the ``machine-launch-script.sh``).
Verify this by running:
.. parsed-literal::
.. code-block:: bash
conda env list
If you are not in the ``firesim`` environment and the environment exists, you can run the following to "activate" or enter the environment:
.. parsed-literal::
.. code-block:: bash
conda activate firesim # or whatever the environment is called
Next run:
.. parsed-literal::
.. code-block:: bash
./build-setup.sh
@ -141,7 +106,7 @@ other dependencies.
Next, run:
.. parsed-literal::
.. code-block:: bash
source sourceme-f1-manager.sh --skip-ssh-setup
@ -156,23 +121,23 @@ Final Environment Check
Finally, lets verify that the environment variables are correctly setup for the tutorial. Run:
.. parsed-literal::
.. code-block:: bash
echo $PATH
You should see that both the Xilinx Vitis and XRT tools are located in the ``PATH`` are are **after**
You should see that both the |tool_type| tools are located in the ``PATH`` are are **after**
the conda environment path. Next run:
.. parsed-literal::
.. code-block:: bash
echo $LD_LIBRARY_PATH
You should see that the XRT tools are located on your ``LD_LIBRARY_PATH`` and that there
You should see that the |tool_type| tools are located on your ``LD_LIBRARY_PATH`` and that there
is no trailing ``:`` (otherwise compilation will error later).
Finally verify that Xilinx Vitis and XRT tools are found when running locally through ``ssh``. Run:
Finally verify that |tool_type| tools are found when running locally through ``ssh``. Run:
.. parsed-literal::
.. code-block:: bash
ssh localhost printenv
@ -185,9 +150,10 @@ Completing Setup Using the Manager
The FireSim manager contains a command that will finish the rest of the FireSim setup process.
To run it, do the following:
.. parsed-literal::
.. code-block:: bash
:substitutions:
firesim managerinit --platform vitis
firesim managerinit --platform |platform_name|
It will create initial configuration files, which we will edit in later
sections.

View File

@ -0,0 +1,5 @@
.. |platform_name| replace:: xilinx_alveo_u250
.. |tool_type| replace:: Xilinx Vivado
.. |example_var| replace:: XILINX_VIVADO
.. include:: ./Setting-Up-Template.rst

View File

@ -0,0 +1,5 @@
.. |platform_name| replace:: xilinx_alveo_u280
.. |tool_type| replace:: Xilinx Vivado
.. |example_var| replace:: XILINX_VIVADO
.. include:: ./Setting-Up-Template.rst

View File

@ -0,0 +1,5 @@
.. |platform_name| replace:: vitis
.. |tool_type| replace:: Xilinx XRT/Vitis
.. |example_var| replace:: XILINX_XRT
.. include:: ./Setting-Up-Template.rst

View File

@ -0,0 +1,36 @@
|fpga_name| Getting Started
=======================================
The tutorials that follow this page will guide you through the complete flow for
getting an example FireSim simulation up and running using an on-premise |fpga_name| FPGA.
This tutorial is setting up a single node on-premise cluster (i.e. running FPGA bitstream builds and simulations on a single machine) for FireSim use.
This single machine will serve as the "Manager Machine" that acts as a "head" node that all work will be completed on.
At the end of this
tutorial, you'll have a simulation that simulates a single quad-core Rocket
Chip-based node with a 4 MB last level cache, 16 GB DDR3, and no NIC.
The final tutorial
will show you how to build your own FPGA images with customized hardware.
After you complete these tutorials, you can look at the "Advanced Docs"
in the sidebar to the left.
Here's a high-level outline of what we'll be doing in our tutorials:
#. **FPGA Setup**: Installing the FPGA board and relevant software.
#. **On-Premises Machine Setup**
#. Setting up a "Manager Machine" from which you will coordinate building
and deploying simulations locally.
#. **Single-node simulation tutorial**: This tutorial guides you through the
process of running one simulation locally consisting of a single
|fpga_name|, using our pre-built public FireSim |bit_type| bitstream.
#. **Building your own hardware designs tutorial (Chisel to FPGA Image)**:
This tutorial guides you through the full process of taking Rocket Chip RTL
and any custom RTL plugged into Rocket Chip and producing a FireSim bitstream
to plug into your simulations. This automatically runs Chisel elaboration,
FAME-1 Transformation, and the |build_type| FPGA flow.
Generally speaking, you only need to follow Step 4 if you're modifying Chisel
RTL or changing non-runtime configurable hardware parameters.

View File

@ -3,10 +3,9 @@ Running a Single Node Simulation
Now that we've completed the setup of our manager machine, it's time to run
a simulation! In this section, we will simulate **1 target node**, for which we
will need a single U250 FPGA.
will need a single |fpga_type|.
**Make sure you have sourced
``sourceme-f1-manager.sh --skip-ssh-setup`` before running any of these commands.**
**Make sure you have sourced** ``sourceme-f1-manager.sh --skip-ssh-setup`` **before running any of these commands.**
Building target software
------------------------
@ -16,7 +15,7 @@ simulated node. To do so, we'll need to build our FireSim-compatible RISC-V
Linux distro. For this tutorial, we will use a simple buildroot-based
distribution. You can do this like so:
::
.. code-block:: bash
cd firesim/sw/firesim-software
./init-submodules.sh
@ -49,23 +48,23 @@ you have not modified it):
We'll need to modify a couple of these lines.
First, let's tell the manager to use the single U250 FPGA.
First, let's tell the manager to use the single |fpga_type| FPGA.
You'll notice that in the ``run_farm`` mapping which describes and specifies the machines to run simulations on.
First notice that the ``base_recipe`` maps to ``run-farm-recipes/externally_provisioned.yaml``.
This indicates to the FireSim manager that the machines allocated to run simulations will be provided by the user through IP addresses
instead of automatically launched and allocated (e.g. launching instances on-demand in AWS).
Let's modify the ``default_platform`` to be ``VitisInstanceDeployManager`` so that we can launch simulations using Vitis/XRT.
Let's modify the ``default_platform`` to be |deploy_manager_code| so that we can launch simulations using |runner|.
Next, modify the ``default_simulation_dir`` to a directory that you want to store temporary simulation collateral to.
When running simulations, this directory is used to store any temporary files that the simulator creates (e.g. a uartlog emitted by a Linux simulation).
Next, lets modify the ``run_farm_hosts_to_use`` mapping.
This maps IP addresses (i.e. ``localhost``) to a description/specification of the simulation machine.
In this case, we have only one U250 FPGA so we will change the description of ``localhost`` to ``one_fpga_spec``.
In this case, we have only one |fpga_type| FPGA so we will change the description of ``localhost`` to ``one_fpga_spec``.
Now, let's verify that the ``target_config`` mapping will model the correct target design.
By default, it is set to model a single-node with no network.
It should look like the following:
::
.. code-block:: yaml
target_config:
topology: no_net_config
@ -85,9 +84,9 @@ Note ``topology`` is set to
``no_net_num_nodes`` is set to ``1``, indicating that we only want to simulate
one node. Lastly, the ``default_hw_config`` is
``firesim_rocket_quadcore_no_nic_l2_llc4mb_ddr3``.
Let's modify the ``default_hw_config`` (the target design) to ``vitis_firesim_rocket_singlecore_no_nic``.
Let's modify the ``default_hw_config`` (the target design) to "|hwdb_entry|".
This new hardware configuration does not
have a NIC and is pre-built for the U250 FPGA.
have a NIC and is pre-built for the |fpga_type| FPGA.
This hardware configuration models a Single-core Rocket Chip SoC and **no** network interface card.
We will leave the ``workload`` mapping unchanged here, since we do
@ -97,12 +96,13 @@ feature is an advanced feature that you can learn more about in the
As a final sanity check, in the mappings we changed, the ``config_runtime.yaml`` file should now look like this (with ``PATH_TO_SIMULATION_AREA`` replaced with your simulation collateral temporary directory):
::
.. code-block:: text
:substitutions:
run_farm:
base_recipe: run-farm-recipes/externally_provisioned.yaml
recipe_arg_overrides:
default_platform: VitisInstanceDeployManager
default_platform: |deploy_manager|
default_simulation_dir: <PATH_TO_SIMULATION_AREA>
run_farm_hosts_to_use:
- localhost: one_fpga_spec
@ -114,7 +114,7 @@ As a final sanity check, in the mappings we changed, the ``config_runtime.yaml``
switching_latency: 10
net_bandwidth: 200
profile_interval: -1
default_hw_config: vitis_firesim_rocket_singlecore_no_nic
default_hw_config: |hwdb_entry|
plusarg_passthrough: ""
workload:
@ -133,7 +133,7 @@ Starting the Run Farm
First, we will tell the manager to launch our Run Farm with a single machine called ``localhost``. Run:
::
.. code-block:: bash
firesim launchrunfarm
@ -142,7 +142,7 @@ this command should not launch any machine and should be quick.
You should expect output like the following:
::
.. code-block:: bash
$ firesim launchrunfarm
FireSim Manager. Docs: https://docs.fires.im
@ -160,19 +160,19 @@ components necessary to run your simulation. The manager will also handle
flashing FPGAs. To tell the manager to setup our simulation infrastructure,
let's run:
::
.. code-block:: bash
firesim infrasetup
For a complete run, you should expect output like the following:
::
.. code-block:: bash
$ firesim infrasetup FireSim Manager. Docs: https://docs.fires.im
Running: infrasetup
Building FPGA software driver for FireSim-FireSimRocketConfig-BaseVitisConfig
Building FPGA software driver for |quintuplet|
...
[localhost] Checking if host instance is up...
[localhost] Copying FPGA simulation infrastructure for slot: 0.
@ -195,7 +195,7 @@ Running a simulation!
Finally, let's run our simulation! To do so, run:
::
.. code-block:: bash
firesim runworkload
@ -203,7 +203,7 @@ Finally, let's run our simulation! To do so, run:
This command boots up a simulation and prints out the live status of the simulated
nodes every 10s. When you do this, you will initially see output like:
::
.. code-block:: bash
$ firesim runworkload
FireSim Manager. Docs: https://docs.fires.im
@ -216,7 +216,7 @@ nodes every 10s. When you do this, you will initially see output like:
If you don't look quickly, you might miss it, since it will get replaced with a
live status page:
::
.. code-block:: text
FireSim Simulation Status @ 2018-05-19 00:38:56.062737
--------------------------------------------------------------------------------
@ -251,13 +251,13 @@ Next, let's ``ssh`` into the simulation machine.
In this case, since we are running the simulation on the same machine (i.e. ``localhost``)
we can run the following:
::
.. code-block:: bash
ssh localhost
Next, we can directly attach to the console of the simulated system using ``screen``, run:
::
.. code-block:: bash
screen -r fsim0
@ -265,7 +265,7 @@ Voila! You should now see Linux booting on the simulated system and then be prom
with a Linux login prompt, like so:
::
.. code-block:: bash
[truncated Linux boot output]
[ 0.020000] VFS: Mounted root (ext2 filesystem) on device 254:0.
@ -294,7 +294,7 @@ Now, you can login to the system! The username is ``root``.
At this point, you should be presented with a regular console,
where you can type commands into the simulation and run programs. For example:
::
.. code-block:: bash
Welcome to Buildroot
buildroot login: root
@ -309,7 +309,7 @@ let's power off the simulated system and see what the manager does. To do so,
in the console of the simulated system, run ``poweroff -f``:
::
.. code-block:: bash
Welcome to Buildroot
buildroot login: root
@ -320,7 +320,7 @@ in the console of the simulated system, run ``poweroff -f``:
You should see output like the following from the simulation console:
::
.. code-block:: bash
# poweroff -f
[ 12.456000] reboot: Power down
@ -338,7 +338,7 @@ You should see output like the following from the simulation console:
You'll also notice that the manager polling loop exited! You'll see output like this
from the manager:
::
.. code-block:: text
FireSim Simulation Status @ 2018-05-19 00:46:50.075885
--------------------------------------------------------------------------------
@ -372,7 +372,7 @@ from the manager:
If you take a look at the workload output directory given in the manager output (in this case, ``.../firesim/deploy/results-workload/2018-05-19--00-38-52-linux-uniform/``), you'll see the following:
::
.. code-block:: bash
$ ls -la firesim/deploy/results-workload/2018-05-19--00-38-52-linux-uniform/*/*
-rw-rw-r-- 1 centos centos 797 May 19 00:46 linux-uniform0/memory_stats.csv
@ -388,13 +388,13 @@ useful for running benchmarks automatically. The
For now, let's wrap-up our tutorial by terminating the Run Farm that we launched.
To do so, run:
::
.. code-block:: bash
firesim terminaterunfarm
Which should present you with the following:
::
.. code-block:: bash
$ firesim terminaterunfarm
FireSim Manager. Docs: https://docs.fires.im
@ -410,11 +410,3 @@ Congratulations on running your first FireSim simulation! At this point, you can
check-out some of the advanced features of FireSim in the sidebar to the left
(for example, we expect that many people will be interested in the ability to
automatically run the SPEC17 benchmarks: :ref:`spec-2017`).
.. warning:: Currently, FireSim simulations with bridges that use the Vitis PCI-E DMA interface are not supported (i.e. TracerV, NIC, Dromajo, Printfs).
This will be added in a future FireSim release.
.. warning:: In some cases, simulation may fail because you might need to update the U250 DRAM offset that is currently hard coded in both the FireSim Vitis/XRT driver code and platform shim.
To verify this, run ``xclbinutil --info --input <YOURXCLBIN>``, obtain the ``bank0`` ``MEM_DDR4`` offset. If it differs from the hardcoded ``0x40000000`` given in
driver code (``u250_dram_expected_offset`` variable in ``sim/midas/src/main/cc/simif_vitis.cc``) and platform shim (``araddr``/``awaddr`` offset in
``sim/midas/src/main/scala/midas/platform/VitisShim.scala``) replace both areas with the new offset given by ``xclbinutil`` and regenerate the bitstream.

View File

@ -0,0 +1,8 @@
.. |fpga_type| replace:: Xilinx Alveo U250
.. |deploy_manager| replace:: XilinxAlveoU250InstanceDeployManager
.. |deploy_manager_code| replace:: ``XilinxAlveoU250InstanceDeployManager``
.. |runner| replace:: Xilinx Vivado
.. |hwdb_entry| replace:: alveo_u250_firesim_rocket_singlecore_no_nic
.. |quintuplet| replace:: xilinx_alveo_u250-firesim-FireSim-FireSimRocketConfig-BaseXilinxAlveoConfig
.. include:: Running-Single-Node-Simulation-Template.rst

View File

@ -0,0 +1,8 @@
.. |fpga_type| replace:: Xilinx Alveo U280
.. |deploy_manager| replace:: XilinxAlveoU280InstanceDeployManager
.. |deploy_manager_code| replace:: ``XilinxAlveoU280InstanceDeployManager``
.. |runner| replace:: Xilinx Vivado
.. |hwdb_entry| replace:: alveo_u280_firesim_rocket_singlecore_no_nic
.. |quintuplet| replace:: xilinx_alveo_u280-firesim-FireSim-FireSimRocketConfig-BaseXilinxAlveoConfig
.. include:: Running-Single-Node-Simulation-Template.rst

View File

@ -0,0 +1,17 @@
.. |fpga_type| replace:: Xilinx Vitis-enabled U250
.. |deploy_manager| replace:: VitisInstanceDeployManager
.. |deploy_manager_code| replace:: ``VitisInstanceDeployManager``
.. |runner| replace:: Xilinx XRT/Vitis
.. |hwdb_entry| replace:: vitis_firesim_rocket_singlecore_no_nic
.. |quintuplet| replace:: vitis-firesim-FireSim-FireSimRocketConfig-BaseVitisConfig
.. include:: Running-Single-Node-Simulation-Template.rst
.. warning:: Currently, FireSim simulations with bridges that use the PCI-E DMA interface are not supported (i.e. TracerV, NIC, Dromajo, Printfs) with |fpga_type| FPGAs.
This will be added in a future FireSim release.
.. warning:: In some cases, simulation may fail because you might need to update the |fpga_type| DRAM offset that is currently hard coded in both the FireSim |runner| driver code and platform shim.
To verify this, run ``xclbinutil --info --input <YOUR_XCL_BIN>``, obtain the ``bank0`` ``MEM_DDR4`` offset.
If it differs from the hardcoded ``0x40000000`` given in driver code (``u250_dram_expected_offset`` variable in :gh-file-ref:`sim/midas/src/main/cc/simif_vitis.cc`) and
platform shim (``araddr``/``awaddr`` offset in :gh-file-ref:`sim/midas/src/main/scala/midas/platform/VitisShim.scala`) replace both areas with the new offset given by
``xclbinutil`` and regenerate the bitstream.

View File

@ -0,0 +1,13 @@
.. |fpga_name| replace:: Xilinx Alveo U250
.. |bit_type| replace:: ``bitstream_tar``
.. |build_type| replace:: Xilinx Vivado
.. include:: Intro-Template.rst
.. toctree::
:maxdepth: 3
FPGA-Setup/Xilinx-Alveo-U250
Initial-Setup/Setting-Up-Xilinx-Alveo-U250
Running-Simulations/Running-Single-Node-Simulation-Xilinx-Alveo-U250
Building-a-FireSim-Bitstream/Xilinx-Alveo-U250

View File

@ -0,0 +1,13 @@
.. |fpga_name| replace:: Xilinx Alveo U280
.. |bit_type| replace:: ``bitstream_tar``
.. |build_type| replace:: Xilinx Vivado
.. include:: Intro-Template.rst
.. toctree::
:maxdepth: 3
FPGA-Setup/Xilinx-Alveo-U280
Initial-Setup/Setting-Up-Xilinx-Alveo-U280
Running-Simulations/Running-Single-Node-Simulation-Xilinx-Alveo-U280
Building-a-FireSim-Bitstream/Xilinx-Alveo-U280

View File

@ -0,0 +1,13 @@
.. |fpga_name| replace:: Xilinx Vitis-enabled U250
.. |bit_type| replace:: ``xclbin``
.. |build_type| replace:: Xilinx Vitis
.. include:: Intro-Template.rst
.. toctree::
:maxdepth: 3
FPGA-Setup/Xilinx-Vitis-FPGAs
Initial-Setup/Setting-Up-Xilinx-Vitis
Running-Simulations/Running-Single-Node-Simulation-Xilinx-Vitis
Building-a-FireSim-Bitstream/Xilinx-Vitis

View File

@ -116,7 +116,7 @@ a concrete bridge driver must implement:
:end-before: DOC include end: Bridge Driver Interface
The declaration of the Uart bridge
driver lives at ``sim/firesim-lib/src/main/cc/bridges/uart.h``. It is inlined
driver lives at :gh-file-ref:`sim/firesim-lib/src/main/cc/bridges/uart.h`. It is inlined
below:
.. include:: ../../sim/firesim-lib/src/main/cc/bridges/uart.h
@ -137,7 +137,7 @@ target RTL: SBT will make sure those classes are available on the runtime
classpath. If you're hosting your bridge driver sources outside of the existing
directories, you'll need to modify your target-project make fragments to include
them. The default Chipyard/Rocket Chip-based one lives here:
``sim/src/main/makefrag/firesim/``
:gh-file-ref:`sim/src/main/makefrag/firesim`.
Here the main order of business is to add header and source files to
``DRIVER_H`` and ``DRIVER_CC`` respectively in `driver.mk`, by modifying the

View File

@ -41,13 +41,13 @@ provided for the cores' register files, which are the most FPGA-hostile memories
Next, with these annotations in place, enabling the optimization requires mixing in the ``MCRams``
class to the platform configuration, as shown in the following example build recipe:
::
.. code-block:: yaml
firesim-boom-mem-opt:
DESIGN: FireSim
TARGET_CONFIG: WithNIC_DDR3FRFCFSLLC4MB_FireSimLargeBoomConfig
PLATFORM_CONFIG: MCRams_BaseF1Config
deploy_quadruplet: null
deploy_quintuplet: null
Multi-Threading of Repeated Instances
@ -65,13 +65,13 @@ pre-annotated for both Rocket- and BOOM-based systems. To enable this tile multi
necessary to mix in the ``MTModels`` class to the platform configuration, as shown in the following
example build recipe:
::
.. code-block:: yaml
firesim-threaded-cores-opt:
DESIGN: FireSim
TARGET_CONFIG: WithNIC_DDR3FRFCFSLLC4MB_FireSimQuadRocketConfig
PLATFORM_CONFIG: MTModels_BaseF1Config
deploy_quadruplet: null
deploy_quintuplet: null
This simulator configuration will rely on a single threaded model to simulate the four Rocket tiles.
However, it will still produce bit- and cycle-identical results to any other platform configuration
@ -83,10 +83,10 @@ reduced throughput relative to unoptimized FireSim simulators, very large SoCs t
never fit on a single FPGA can be simulated without the cost and performance drawbacks of
partitioning.
::
.. code-block:: yaml
firesim-optimized-big-soc:
DESIGN: FireSim
TARGET_CONFIG: MyMultiCoreBoomConfig
PLATFORM_CONFIG: MTModels_MCRams_BaseF1Config
deploy_quadruplet: null
deploy_quintuplet: null

View File

@ -53,7 +53,7 @@ Trigger sources and sinks are Boolean signals, synchronous to a particular
clock domain, that have been annotated as such. The ``midas.targetutils``
package provides chisel-facing utilities for annotating these signals in your
design. We describe these utilities below, the source for which can be found in
``sim/midas/targetutils/src/main/scala/annotations.scala``.
:gh-file-ref:`sim/midas/targetutils/src/main/scala/midas/annotations.scala`.
Trigger Sources
***************

View File

@ -1,14 +0,0 @@
.. _onprem-initial-setup:
Initial Setup/Installation
================================
This section will guide you through initial setup of a single on-premises (self-owned) machine to support
FireSim, as well as cloning/installing FireSim on that machine.
Please refer to the "Advanced Docs" for additional ways to extend the on-premises support to arbitrary externally provisioned clusters or multiple FPGAs.
.. toctree::
:maxdepth: 2
:caption: Initial Setup/Installation:
Setting-up-your-On-Premises-Machine

View File

@ -2,7 +2,7 @@
#
# You can set these variables from the command line.
SPHINXOPTS = -w warnings.txt -n -W
SPHINXOPTS = -w warnings.txt -n
SPHINXBUILD = python3 -msphinx
SPHINXPROJ = FireSim
SOURCEDIR = .

View File

@ -1,15 +0,0 @@
.. _running_onpre_simulations:
Running FireSim Simulations
================================
This guide will walk you through running a simulation of a single-node, non-networked target, using a pre-generated
hardware image.
Hit Next to get started!
.. toctree::
:maxdepth: 2
:caption: Running FireSim Simulations:
Running-a-Single-Node-Simulation

View File

@ -87,7 +87,9 @@ release = version
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_tabs.tabs'
'sphinx_tabs.tabs',
'sphinx_copybutton',
'sphinx_substitution_extensions',
]
# Add any paths that contain templates here, relative to this directory.
@ -125,13 +127,11 @@ pygments_style = 'sphinx'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'collapse_navigation': False,
'logo_only': True,
@ -165,9 +165,10 @@ html_context = {
"version": version
}
# add rst to end of each rst source file
# add rst to beginning of each rst source file
# can put custom strings here that are generated from this file
rst_epilog = f"""
# you can use these in .. code-block:: directives if you give the :substitutions: option underneath
rst_prolog = f"""
.. |overall_version| replace:: {version}
"""

View File

@ -3,30 +3,16 @@ Welcome to FireSim's documentation (version "|version|")!
New to FireSim? Jump to the :ref:`firesim-basics` page for more info.
.. toctree::
:maxdepth: 3
:caption: Getting Started:
FireSim-Basics
.. toctree::
:maxdepth: 3
:caption: AWS EC2 F1 Tutorial:
:numbered:
Initial-Setup/index
Running-Simulations-Tutorial/index
Building-a-FireSim-AFI
.. toctree::
:maxdepth: 3
:caption: On-Premises FPGA Tutorial:
:numbered:
Initial-OnPrem-Setup/index
Running-OnPrem-Simulations-Tutorial/index
Building-a-FireSim-Xclbin
Getting-Started-Guides/AWS-EC2-F1-Tutorial/index
Getting-Started-Guides/On-Premises-FPGA-Tutorial/Xilinx-Alveo-U250-FPGAs
Getting-Started-Guides/On-Premises-FPGA-Tutorial/Xilinx-Alveo-U280-FPGAs
Getting-Started-Guides/On-Premises-FPGA-Tutorial/Xilinx-Vitis-FPGAs
.. toctree::
:maxdepth: 3

View File

@ -1,6 +0,0 @@
Sphinx==4.5.0
Pygments==2.11.2
sphinx-autobuild
sphinx_rtd_theme==1.0.0
requests==2.27.1
sphinx-tabs

View File

@ -4,7 +4,7 @@ and populate it with the necessary sources. We'll call this subdirectory, WORKDI
# Bitstream Builds
`make bitstream` to build an XCLBIN that can be deployed to a U250. Bitstream builds run under the $WORDIR/bitstream
`make bitstream` to build an XCLBIN that can be deployed to a U250. Bitstream builds run under the $WORKDIR/bitstream
# FPGA-level Metasimulation

View File

@ -0,0 +1,5 @@
.*.swp
.*.swo
cl_*
!cl_firesim

View File

@ -0,0 +1,75 @@
#!/bin/bash
# This script is called by FireSim's bitbuilder to create a bit file
# exit script if any command fails
set -e
set -o pipefail
usage() {
echo "usage: ${0} [OPTIONS]"
echo ""
echo "Options"
echo " --cl_dir : Custom logic directory to build Vivado bitstream from"
echo " --frequency : Frequency in MHz of the desired FPGA host clock."
echo " --strategy : A string to a precanned set of build directives.
See aws-fpga documentation for more info/.
For this platform TIMING and AREA supported."
echo " --board : FPGA board {au250,au280}."
echo " --help : Display this message"
exit "$1"
}
CL_DIR=""
FREQUENCY=""
STRATEGY=""
BOARD=""
# getopts does not support long options, and is inflexible
while [ "$1" != "" ];
do
case $1 in
--help)
usage 1 ;;
--cl_dir )
shift
CL_DIR=$1 ;;
--strategy )
shift
STRATEGY=$1 ;;
--frequency )
shift
FREQUENCY=$1 ;;
--board )
shift
BOARD=$1 ;;
* )
echo "invalid option $1"
usage 1 ;;
esac
shift
done
if [ -z "$CL_DIR" ] ; then
echo "no cl directory specified"
usage 1
fi
if [ -z "$FREQUENCY" ] ; then
echo "No --frequency specified"
usage 1
fi
if [ -z "$STRATEGY" ] ; then
echo "No --strategy specified"
usage 1
fi
if [ -z "$BOARD" ] ; then
echo "No --board specified"
usage 1
fi
# run build
cd $CL_DIR
vivado -mode batch -source $CL_DIR/scripts/main.tcl -tclargs $FREQUENCY $STRATEGY $BOARD

View File

@ -0,0 +1,66 @@
`timescale 1ns/1ps
//////////////////////////////////////////////////////////////////////////////////
// Company:
// Engineer:
//
// Create Date: 09/10/2021 06:05:42 PM
// Design Name:
// Module Name: axi_tieoff_master
// Project Name:
// Target Devices:
// Tool Versions:
// Description:
//
// Dependencies:
//
// Revision:
// Revision 0.01 - File Created
// Additional Comments:
//
//////////////////////////////////////////////////////////////////////////////////
module axi_tieoff_master(
TIEOFF_M_AXI_CTRL_0_araddr,
TIEOFF_M_AXI_CTRL_0_arready,
TIEOFF_M_AXI_CTRL_0_arvalid,
TIEOFF_M_AXI_CTRL_0_awaddr,
TIEOFF_M_AXI_CTRL_0_awready,
TIEOFF_M_AXI_CTRL_0_awvalid,
TIEOFF_M_AXI_CTRL_0_bready,
TIEOFF_M_AXI_CTRL_0_bresp,
TIEOFF_M_AXI_CTRL_0_bvalid,
TIEOFF_M_AXI_CTRL_0_rdata,
TIEOFF_M_AXI_CTRL_0_rready,
TIEOFF_M_AXI_CTRL_0_rresp,
TIEOFF_M_AXI_CTRL_0_rvalid,
TIEOFF_M_AXI_CTRL_0_wdata,
TIEOFF_M_AXI_CTRL_0_wready,
TIEOFF_M_AXI_CTRL_0_wvalid);
(* X_INTERFACE_PARAMETER = "XIL_INTERFACENAME TIEOFF_M_AXI_CTRL_0, ADDR_WIDTH 32, ARUSER_WIDTH 0, AWUSER_WIDTH 0, BUSER_WIDTH 0, DATA_WIDTH 32, HAS_BRESP 1, HAS_BURST 0, HAS_CACHE 0, HAS_LOCK 0, HAS_PROT 0, HAS_QOS 0, HAS_REGION 0, HAS_RRESP 1, HAS_WSTRB 0, ID_WIDTH 0, INSERT_VIP 0, MAX_BURST_LENGTH 1, NUM_READ_OUTSTANDING 1, NUM_READ_THREADS 1, NUM_WRITE_OUTSTANDING 1, NUM_WRITE_THREADS 1, FREQ_HZ 300000000, PHASE 0.0, PROTOCOL AXI4LITE, READ_WRITE_MODE READ_WRITE, RUSER_BITS_PER_BYTE 0, RUSER_WIDTH 0, SUPPORTS_NARROW_BURST 0, WUSER_BITS_PER_BYTE 0, WUSER_WIDTH 0" *)
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 TIEOFF_M_AXI_CTRL_0 ARADDR" *) output[31:0] TIEOFF_M_AXI_CTRL_0_araddr;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 TIEOFF_M_AXI_CTRL_0 ARREADY" *) input TIEOFF_M_AXI_CTRL_0_arready;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 TIEOFF_M_AXI_CTRL_0 ARVALID" *) output TIEOFF_M_AXI_CTRL_0_arvalid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 TIEOFF_M_AXI_CTRL_0 AWADDR" *) output[31:0] TIEOFF_M_AXI_CTRL_0_awaddr;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 TIEOFF_M_AXI_CTRL_0 AWREADY" *) input TIEOFF_M_AXI_CTRL_0_awready;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 TIEOFF_M_AXI_CTRL_0 AWVALID" *) output TIEOFF_M_AXI_CTRL_0_awvalid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 TIEOFF_M_AXI_CTRL_0 BREADY" *) output TIEOFF_M_AXI_CTRL_0_bready;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 TIEOFF_M_AXI_CTRL_0 BRESP" *) input[1:0] TIEOFF_M_AXI_CTRL_0_bresp;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 TIEOFF_M_AXI_CTRL_0 BVALID" *) input TIEOFF_M_AXI_CTRL_0_bvalid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 TIEOFF_M_AXI_CTRL_0 RDATA" *) input[31:0] TIEOFF_M_AXI_CTRL_0_rdata;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 TIEOFF_M_AXI_CTRL_0 RREADY" *) output TIEOFF_M_AXI_CTRL_0_rready;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 TIEOFF_M_AXI_CTRL_0 RRESP" *) input[1:0] TIEOFF_M_AXI_CTRL_0_rresp;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 TIEOFF_M_AXI_CTRL_0 RVALID" *) input TIEOFF_M_AXI_CTRL_0_rvalid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 TIEOFF_M_AXI_CTRL_0 WDATA" *) output[31:0] TIEOFF_M_AXI_CTRL_0_wdata;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 TIEOFF_M_AXI_CTRL_0 WREADY" *) input TIEOFF_M_AXI_CTRL_0_wready;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 TIEOFF_M_AXI_CTRL_0 WVALID" *) output TIEOFF_M_AXI_CTRL_0_wvalid;
assign TIEOFF_M_AXI_CTRL_0_araddr = 32'b0;
assign TIEOFF_M_AXI_CTRL_0_arvalid = 1'b0;
assign TIEOFF_M_AXI_CTRL_0_awaddr = 32'b0;
assign TIEOFF_M_AXI_CTRL_0_awvalid = 1'b0;
assign TIEOFF_M_AXI_CTRL_0_bready = 1'b0;
assign TIEOFF_M_AXI_CTRL_0_rready = 1'b0;
assign TIEOFF_M_AXI_CTRL_0_wdata = 32'b0;
assign TIEOFF_M_AXI_CTRL_0_wvalid = 1'b0;
endmodule

View File

@ -0,0 +1,521 @@
`timescale 1ns/1ps
//////////////////////////////////////////////////////////////////////////////////
// Company:
// Engineer:
//
// Create Date: 09/10/2021 07:33:50 PM
// Design Name:
// Module Name: firesim_wrapper
// Project Name:
// Target Devices:
// Tool Versions:
// Description:
//
// Dependencies:
//
// Revision:
// Revision 0.01 - File Created
// Additional Comments:
//
//////////////////////////////////////////////////////////////////////////////////
module firesim_wrapper(
S_AXI_CTRL_araddr,
S_AXI_CTRL_arprot,
S_AXI_CTRL_arready,
S_AXI_CTRL_arvalid,
S_AXI_CTRL_awaddr,
S_AXI_CTRL_awprot,
S_AXI_CTRL_awready,
S_AXI_CTRL_awvalid,
S_AXI_CTRL_bready,
S_AXI_CTRL_bresp,
S_AXI_CTRL_bvalid,
S_AXI_CTRL_rdata,
S_AXI_CTRL_rready,
S_AXI_CTRL_rresp,
S_AXI_CTRL_rvalid,
S_AXI_CTRL_wdata,
S_AXI_CTRL_wready,
S_AXI_CTRL_wstrb,
S_AXI_CTRL_wvalid,
S_AXI_DMA_araddr,
S_AXI_DMA_arburst,
S_AXI_DMA_arcache,
S_AXI_DMA_arid,
S_AXI_DMA_arlen,
S_AXI_DMA_arlock,
S_AXI_DMA_arprot,
S_AXI_DMA_arqos,
S_AXI_DMA_arready,
S_AXI_DMA_arregion,
S_AXI_DMA_arsize,
S_AXI_DMA_arvalid,
S_AXI_DMA_awaddr,
S_AXI_DMA_awburst,
S_AXI_DMA_awcache,
S_AXI_DMA_awid,
S_AXI_DMA_awlen,
S_AXI_DMA_awlock,
S_AXI_DMA_awprot,
S_AXI_DMA_awqos,
S_AXI_DMA_awready,
S_AXI_DMA_awregion,
S_AXI_DMA_awsize,
S_AXI_DMA_awvalid,
S_AXI_DMA_bid,
S_AXI_DMA_bready,
S_AXI_DMA_bresp,
S_AXI_DMA_bvalid,
S_AXI_DMA_rdata,
S_AXI_DMA_rid,
S_AXI_DMA_rlast,
S_AXI_DMA_rready,
S_AXI_DMA_rresp,
S_AXI_DMA_rvalid,
S_AXI_DMA_wdata,
S_AXI_DMA_wlast,
S_AXI_DMA_wready,
S_AXI_DMA_wstrb,
S_AXI_DMA_wvalid,
M_AXI_DDR0_araddr,
M_AXI_DDR0_arburst,
M_AXI_DDR0_arcache,
M_AXI_DDR0_arid,
M_AXI_DDR0_arlen,
M_AXI_DDR0_arlock,
M_AXI_DDR0_arprot,
M_AXI_DDR0_arqos,
M_AXI_DDR0_arready,
M_AXI_DDR0_arregion,
M_AXI_DDR0_arsize,
M_AXI_DDR0_arvalid,
M_AXI_DDR0_awaddr,
M_AXI_DDR0_awburst,
M_AXI_DDR0_awcache,
M_AXI_DDR0_awid,
M_AXI_DDR0_awlen,
M_AXI_DDR0_awlock,
M_AXI_DDR0_awprot,
M_AXI_DDR0_awqos,
M_AXI_DDR0_awready,
M_AXI_DDR0_awregion,
M_AXI_DDR0_awsize,
M_AXI_DDR0_awvalid,
M_AXI_DDR0_bready,
M_AXI_DDR0_bid,
M_AXI_DDR0_bresp,
M_AXI_DDR0_bvalid,
M_AXI_DDR0_rdata,
M_AXI_DDR0_rid,
M_AXI_DDR0_rlast,
M_AXI_DDR0_rready,
M_AXI_DDR0_rresp,
M_AXI_DDR0_rvalid,
M_AXI_DDR0_wdata,
M_AXI_DDR0_wlast,
M_AXI_DDR0_wready,
M_AXI_DDR0_wstrb,
M_AXI_DDR0_wvalid,
sys_clk_30,
sys_reset_n
);
(* X_INTERFACE_PARAMETER = "XIL_INTERFACENAME S_AXI_CTRL, ADDR_WIDTH 32, ARUSER_WIDTH 0, AWUSER_WIDTH 0, BUSER_WIDTH 0, CLK_DOMAIN design_1_clk_wiz_0_0_clk_out1, DATA_WIDTH 32, HAS_BRESP 1, HAS_BURST 0, HAS_CACHE 0, HAS_LOCK 0, HAS_PROT 1, HAS_QOS 0, HAS_REGION 0, HAS_RRESP 1, HAS_WSTRB 1, ID_WIDTH 0, INSERT_VIP 0, MAX_BURST_LENGTH 1, NUM_READ_OUTSTANDING 1, NUM_READ_THREADS 1, NUM_WRITE_OUTSTANDING 1, NUM_WRITE_THREADS 1, PHASE 0, PROTOCOL AXI4LITE, READ_WRITE_MODE READ_WRITE, RUSER_BITS_PER_BYTE 0, RUSER_WIDTH 0, SUPPORTS_NARROW_BURST 0, WUSER_BITS_PER_BYTE 0, WUSER_WIDTH 0" *)
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_CTRL ARADDR" *) input[31:0] S_AXI_CTRL_araddr;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_CTRL ARPROT" *) input[2:0] S_AXI_CTRL_arprot;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_CTRL ARREADY" *) output S_AXI_CTRL_arready;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_CTRL ARVALID" *) input S_AXI_CTRL_arvalid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_CTRL AWADDR" *) input[31:0] S_AXI_CTRL_awaddr;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_CTRL AWPROT" *) input[2:0] S_AXI_CTRL_awprot;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_CTRL AWREADY" *) output S_AXI_CTRL_awready;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_CTRL AWVALID" *) input S_AXI_CTRL_awvalid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_CTRL BREADY" *) input S_AXI_CTRL_bready;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_CTRL BRESP" *) output[1:0] S_AXI_CTRL_bresp;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_CTRL BVALID" *) output S_AXI_CTRL_bvalid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_CTRL RDATA" *) output[31:0] S_AXI_CTRL_rdata;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_CTRL RREADY" *) input S_AXI_CTRL_rready;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_CTRL RRESP" *) output[1:0] S_AXI_CTRL_rresp;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_CTRL RVALID" *) output S_AXI_CTRL_rvalid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_CTRL WDATA" *) input[31:0] S_AXI_CTRL_wdata;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_CTRL WREADY" *) output S_AXI_CTRL_wready;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_CTRL WSTRB" *) input[3:0] S_AXI_CTRL_wstrb;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_CTRL WVALID" *) input S_AXI_CTRL_wvalid;
(* X_INTERFACE_PARAMETER = "XIL_INTERFACENAME S_AXI_DMA, ADDR_WIDTH 64, ARUSER_WIDTH 0, AWUSER_WIDTH 0, BUSER_WIDTH 0, CLK_DOMAIN design_1_clk_wiz_0_0_clk_out1, DATA_WIDTH 512, HAS_BRESP 1, HAS_BURST 0, HAS_CACHE 1, HAS_LOCK 1, HAS_PROT 1, HAS_QOS 0, HAS_REGION 0, HAS_RRESP 1, HAS_WSTRB 1, ID_WIDTH 4, INSERT_VIP 0, MAX_BURST_LENGTH 256, NUM_READ_OUTSTANDING 32, NUM_READ_THREADS 4, NUM_WRITE_OUTSTANDING 16, NUM_WRITE_THREADS 4, PHASE 0, PROTOCOL AXI4, READ_WRITE_MODE READ_WRITE, RUSER_BITS_PER_BYTE 0, RUSER_WIDTH 0, SUPPORTS_NARROW_BURST 0, WUSER_BITS_PER_BYTE 0, WUSER_WIDTH 0" *)
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA ARADDR" *) input[63:0] S_AXI_DMA_araddr;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA ARBURST" *) input[1:0] S_AXI_DMA_arburst;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA ARCACHE" *) input[3:0] S_AXI_DMA_arcache;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA ARID" *) input[3:0] S_AXI_DMA_arid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA ARLEN" *) input[7:0] S_AXI_DMA_arlen;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA ARLOCK" *) input[0:0] S_AXI_DMA_arlock;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA ARPROT" *) input[2:0] S_AXI_DMA_arprot;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA ARQOS" *) input[3:0] S_AXI_DMA_arqos;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA ARREADY" *) output S_AXI_DMA_arready;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA ARREGION" *) input[3:0] S_AXI_DMA_arregion;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA ARSIZE" *) input[2:0] S_AXI_DMA_arsize;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA ARVALID" *) input S_AXI_DMA_arvalid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA AWADDR" *) input[63:0] S_AXI_DMA_awaddr;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA AWBURST" *) input[1:0] S_AXI_DMA_awburst;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA AWCACHE" *) input[3:0] S_AXI_DMA_awcache;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA AWID" *) input[3:0] S_AXI_DMA_awid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA AWLEN" *) input[7:0] S_AXI_DMA_awlen;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA AWLOCK" *) input[0:0] S_AXI_DMA_awlock;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA AWPROT" *) input[2:0] S_AXI_DMA_awprot;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA AWQOS" *) input[3:0] S_AXI_DMA_awqos;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA AWREADY" *) output S_AXI_DMA_awready;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA AWREGION" *) input[3:0] S_AXI_DMA_awregion;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA AWSIZE" *) input[2:0] S_AXI_DMA_awsize;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA AWVALID" *) input S_AXI_DMA_awvalid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA BID" *) output[3:0] S_AXI_DMA_bid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA BREADY" *) input S_AXI_DMA_bready;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA BRESP" *) output[1:0] S_AXI_DMA_bresp;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA BVALID" *) output S_AXI_DMA_bvalid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA RDATA" *) output[511:0] S_AXI_DMA_rdata;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA RID" *) output[3:0] S_AXI_DMA_rid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA RLAST" *) output S_AXI_DMA_rlast;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA RREADY" *) input S_AXI_DMA_rready;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA RRESP" *) output[1:0] S_AXI_DMA_rresp;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA RVALID" *) output S_AXI_DMA_rvalid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA WDATA" *) input[511:0] S_AXI_DMA_wdata;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA WLAST" *) input S_AXI_DMA_wlast;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA WREADY" *) output S_AXI_DMA_wready;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA WSTRB" *) input[63:0] S_AXI_DMA_wstrb;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 S_AXI_DMA WVALID" *) input S_AXI_DMA_wvalid;
(* X_INTERFACE_PARAMETER = "XIL_INTERFACENAME M_AXI_DDR0, ADDR_WIDTH 34, ARUSER_WIDTH 0, AWUSER_WIDTH 0, BUSER_WIDTH 0, CLK_DOMAIN design_1_clk_wiz_0_0_clk_out1, DATA_WIDTH 64, HAS_BRESP 1, HAS_BURST 1, HAS_CACHE 1, HAS_LOCK 1, HAS_PROT 1, HAS_QOS 1, HAS_REGION 1, HAS_RRESP 1, HAS_WSTRB 1, ID_WIDTH 16, INSERT_VIP 0, MAX_BURST_LENGTH 256, NUM_READ_OUTSTANDING 2, NUM_READ_THREADS 1, NUM_WRITE_OUTSTANDING 2, NUM_WRITE_THREADS 1, PHASE 0, PROTOCOL AXI4, READ_WRITE_MODE READ_WRITE, RUSER_BITS_PER_BYTE 0, RUSER_WIDTH 0, SUPPORTS_NARROW_BURST 1, WUSER_BITS_PER_BYTE 0, WUSER_WIDTH 0" *)
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 ARADDR" *) output[33:0] M_AXI_DDR0_araddr;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 ARBURST" *) output[1:0] M_AXI_DDR0_arburst;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 ARCACHE" *) output[3:0] M_AXI_DDR0_arcache;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 ARID" *) output[15:0] M_AXI_DDR0_arid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 ARLEN" *) output[7:0] M_AXI_DDR0_arlen;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 ARLOCK" *) output[0:0] M_AXI_DDR0_arlock;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 ARPROT" *) output[2:0] M_AXI_DDR0_arprot;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 ARQOS" *) output[3:0] M_AXI_DDR0_arqos;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 ARREADY" *) input M_AXI_DDR0_arready;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 ARREGION" *) output[3:0] M_AXI_DDR0_arregion; // TODO: connect
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 ARSIZE" *) output[2:0] M_AXI_DDR0_arsize;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 ARVALID" *) output M_AXI_DDR0_arvalid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 AWADDR" *) output[33:0] M_AXI_DDR0_awaddr;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 AWBURST" *) output[1:0] M_AXI_DDR0_awburst;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 AWCACHE" *) output[3:0] M_AXI_DDR0_awcache;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 AWID" *) output[15:0] M_AXI_DDR0_awid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 AWLEN" *) output[7:0] M_AXI_DDR0_awlen;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 AWLOCK" *) output[0:0] M_AXI_DDR0_awlock;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 AWPROT" *) output[2:0] M_AXI_DDR0_awprot;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 AWQOS" *) output[3:0] M_AXI_DDR0_awqos;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 AWREADY" *) input M_AXI_DDR0_awready;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 AWREGION" *) output[3:0] M_AXI_DDR0_awregion; // TODO: connect
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 AWSIZE" *) output[2:0] M_AXI_DDR0_awsize;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 AWVALID" *) output M_AXI_DDR0_awvalid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 BID" *) input[15:0] M_AXI_DDR0_bid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 BREADY" *) output M_AXI_DDR0_bready;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 BRESP" *) input[1:0] M_AXI_DDR0_bresp;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 BVALID" *) input M_AXI_DDR0_bvalid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 RDATA" *) input[63:0] M_AXI_DDR0_rdata;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 RID" *) input[15:0] M_AXI_DDR0_rid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 RLAST" *) input M_AXI_DDR0_rlast;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 RREADY" *) output M_AXI_DDR0_rready;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 RRESP" *) input[1:0] M_AXI_DDR0_rresp;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 RVALID" *) input M_AXI_DDR0_rvalid;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 WDATA" *) output[63:0] M_AXI_DDR0_wdata;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 WLAST" *) output M_AXI_DDR0_wlast;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 WREADY" *) input M_AXI_DDR0_wready;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 WSTRB" *) output[7:0] M_AXI_DDR0_wstrb;
(* X_INTERFACE_INFO = "xilinx.com:interface:aximm_rtl:1.0 M_AXI_DDR0 WVALID" *) output M_AXI_DDR0_wvalid;
(* X_INTERFACE_PARAMETER = "XIL_INTERFACENAME CLK.SYS_CLK_30, ASSOCIATED_BUSIF S_AXI_CTRL:S_AXI_DMA:M_AXI_DDR0, CLK_DOMAIN design_1_clk_wiz_0_0_clk_out1, INSERT_VIP 0, PHASE 0" *)
(* X_INTERFACE_INFO = "xilinx.com:signal:clock:1.0 CLK.SYS_CLK_30 CLK" *) input sys_clk_30;
(* X_INTERFACE_PARAMETER = "XIL_INTERFACENAME RST.SYS_RESET_N, INSERT_VIP 0, POLARITY ACTIVE_LOW" *)
(* X_INTERFACE_INFO = "xilinx.com:signal:reset:1.0 RST.SYS_RESET_N RST" *) input[0:0] sys_reset_n;
F1Shim firesim_top(
.clock(sys_clk_30),
.reset(!sys_reset_n),
.io_master_aw_ready(S_AXI_CTRL_awready),
.io_master_aw_valid(S_AXI_CTRL_awvalid),
.io_master_aw_bits_addr(S_AXI_CTRL_awaddr[24:0]),
.io_master_aw_bits_len(8'h0),
.io_master_aw_bits_size(3'h2),
.io_master_aw_bits_burst(2'h1),
.io_master_aw_bits_lock(1'h0),
.io_master_aw_bits_cache(4'h0),
.io_master_aw_bits_prot(3'h0), //unused? (could connect?) S_AXI_CTRL_awprot
.io_master_aw_bits_qos(4'h0),
.io_master_aw_bits_region(4'h0),
.io_master_aw_bits_id(12'h0),
.io_master_aw_bits_user(1'h0),
.io_master_w_ready(S_AXI_CTRL_wready),
.io_master_w_valid(S_AXI_CTRL_wvalid),
.io_master_w_bits_data(S_AXI_CTRL_wdata),
.io_master_w_bits_last(1'h1),
.io_master_w_bits_id(12'h0),
.io_master_w_bits_strb(S_AXI_CTRL_wstrb), //OR 8'hff
.io_master_w_bits_user(1'h0),
.io_master_b_ready(S_AXI_CTRL_bready),
.io_master_b_valid(S_AXI_CTRL_bvalid),
.io_master_b_bits_resp(S_AXI_CTRL_bresp),
.io_master_b_bits_id(), // UNUSED at top level
.io_master_b_bits_user(), // UNUSED at top level
.io_master_ar_ready(S_AXI_CTRL_arready),
.io_master_ar_valid(S_AXI_CTRL_arvalid),
.io_master_ar_bits_addr(S_AXI_CTRL_araddr[24:0]),
.io_master_ar_bits_len(8'h0),
.io_master_ar_bits_size(3'h2),
.io_master_ar_bits_burst(2'h1),
.io_master_ar_bits_lock(1'h0),
.io_master_ar_bits_cache(4'h0),
.io_master_ar_bits_prot(3'h0), // S_AXI_CTRL_arprot
.io_master_ar_bits_qos(4'h0),
.io_master_ar_bits_region(4'h0),
.io_master_ar_bits_id(12'h0),
.io_master_ar_bits_user(1'h0),
.io_master_r_ready(S_AXI_CTRL_rready),
.io_master_r_valid(S_AXI_CTRL_rvalid),
.io_master_r_bits_resp(S_AXI_CTRL_rresp),
.io_master_r_bits_data(S_AXI_CTRL_rdata),
.io_master_r_bits_last(), //UNUSED at top level
.io_master_r_bits_id(), // UNUSED at top level
.io_master_r_bits_user(), // UNUSED at top level
// special NIC master interface
.io_dma_aw_ready(S_AXI_DMA_awready),
.io_dma_aw_valid(S_AXI_DMA_awvalid),
.io_dma_aw_bits_addr(S_AXI_DMA_awaddr),
.io_dma_aw_bits_len(S_AXI_DMA_awlen),
.io_dma_aw_bits_size(S_AXI_DMA_awsize),
.io_dma_aw_bits_burst(2'h1), // S_AXI_DMA_awburst
.io_dma_aw_bits_lock(1'h0), // S_AXI_DMA_awlock
.io_dma_aw_bits_cache(4'h0), // S_AXI_DMA_awcache
.io_dma_aw_bits_prot(3'h0), //unused? (could connect?) S_AXI_DMA_awprot
.io_dma_aw_bits_qos(4'h0), // S_AXI_DMA_awqos
.io_dma_aw_bits_region(4'h0), // S_AXI_DMA_awregion
.io_dma_aw_bits_id(S_AXI_DMA_awid),
.io_dma_aw_bits_user(1'h0),
.io_dma_w_ready(S_AXI_DMA_wready),
.io_dma_w_valid(S_AXI_DMA_wvalid),
.io_dma_w_bits_data(S_AXI_DMA_wdata),
.io_dma_w_bits_last(S_AXI_DMA_wlast),
.io_dma_w_bits_id(4'h0),
.io_dma_w_bits_strb(S_AXI_DMA_wstrb),
.io_dma_w_bits_user(1'h0),
.io_dma_b_ready(S_AXI_DMA_bready),
.io_dma_b_valid(S_AXI_DMA_bvalid),
.io_dma_b_bits_resp(S_AXI_DMA_bresp),
.io_dma_b_bits_id(S_AXI_DMA_bid),
.io_dma_b_bits_user(), // UNUSED at top level
.io_dma_ar_ready(S_AXI_DMA_arready),
.io_dma_ar_valid(S_AXI_DMA_arvalid),
.io_dma_ar_bits_addr(S_AXI_DMA_araddr),
.io_dma_ar_bits_len(S_AXI_DMA_arlen),
.io_dma_ar_bits_size(S_AXI_DMA_arsize),
.io_dma_ar_bits_burst(2'h1), // S_AXI_DMA_arburst
.io_dma_ar_bits_lock(1'h0), // S_AXI_DMA_arlock
.io_dma_ar_bits_cache(4'h0), // S_AXI_DMA_arcache
.io_dma_ar_bits_prot(3'h0), // S_AXI_DMA_arprot
.io_dma_ar_bits_qos(4'h0), // S_AXI_DMA_arqos
.io_dma_ar_bits_region(4'h0), // S_AXI_DMA_arregion
.io_dma_ar_bits_id(S_AXI_DMA_arid),
.io_dma_ar_bits_user(1'h0),
.io_dma_r_ready(S_AXI_DMA_rready),
.io_dma_r_valid(S_AXI_DMA_rvalid),
.io_dma_r_bits_resp(S_AXI_DMA_rresp),
.io_dma_r_bits_data(S_AXI_DMA_rdata),
.io_dma_r_bits_last(S_AXI_DMA_rlast),
.io_dma_r_bits_id(S_AXI_DMA_rid),
.io_dma_r_bits_user(), // UNUSED at top level
// `include "firesim_ila_insert_ports.v"
.io_slave_0_aw_ready(M_AXI_DDR0_awready),
.io_slave_0_aw_valid(M_AXI_DDR0_awvalid),
.io_slave_0_aw_bits_addr(M_AXI_DDR0_awaddr),
.io_slave_0_aw_bits_len(M_AXI_DDR0_awlen),
.io_slave_0_aw_bits_size(M_AXI_DDR0_awsize),
.io_slave_0_aw_bits_burst(M_AXI_DDR0_awburst), // not available on DDR IF
.io_slave_0_aw_bits_lock(M_AXI_DDR0_awlock), // not available on DDR IF
.io_slave_0_aw_bits_cache(M_AXI_DDR0_awcache), // not available on DDR IF
.io_slave_0_aw_bits_prot(M_AXI_DDR0_awprot), // not available on DDR IF
.io_slave_0_aw_bits_qos(M_AXI_DDR0_awqos), // not available on DDR IF
.io_slave_0_aw_bits_id(M_AXI_DDR0_awid),
.io_slave_0_w_ready(M_AXI_DDR0_wready),
.io_slave_0_w_valid(M_AXI_DDR0_wvalid),
.io_slave_0_w_bits_data(M_AXI_DDR0_wdata),
.io_slave_0_w_bits_last(M_AXI_DDR0_wlast),
.io_slave_0_w_bits_strb(M_AXI_DDR0_wstrb),
.io_slave_0_b_ready(M_AXI_DDR0_bready),
.io_slave_0_b_valid(M_AXI_DDR0_bvalid),
.io_slave_0_b_bits_resp(M_AXI_DDR0_bresp),
.io_slave_0_b_bits_id(M_AXI_DDR0_bid),
.io_slave_0_ar_ready(M_AXI_DDR0_arready),
.io_slave_0_ar_valid(M_AXI_DDR0_arvalid),
.io_slave_0_ar_bits_addr(M_AXI_DDR0_araddr),
.io_slave_0_ar_bits_len(M_AXI_DDR0_arlen),
.io_slave_0_ar_bits_size(M_AXI_DDR0_arsize),
.io_slave_0_ar_bits_burst(M_AXI_DDR0_arburst), // not available on DDR IF
.io_slave_0_ar_bits_lock(M_AXI_DDR0_arlock), // not available on DDR IF
.io_slave_0_ar_bits_cache(M_AXI_DDR0_arcache), // not available on DDR IF
.io_slave_0_ar_bits_prot(M_AXI_DDR0_arprot), // not available on DDR IF
.io_slave_0_ar_bits_qos(M_AXI_DDR0_arqos), // not available on DDR IF
.io_slave_0_ar_bits_id(M_AXI_DDR0_arid), // not available on DDR IF
.io_slave_0_r_ready(M_AXI_DDR0_rready),
.io_slave_0_r_valid(M_AXI_DDR0_rvalid),
.io_slave_0_r_bits_resp(M_AXI_DDR0_rresp),
.io_slave_0_r_bits_data(M_AXI_DDR0_rdata),
.io_slave_0_r_bits_last(M_AXI_DDR0_rlast),
.io_slave_0_r_bits_id(M_AXI_DDR0_rid)
// .io_slave_1_aw_ready(fsimtop_s_1_axi_awready),
// .io_slave_1_aw_valid(fsimtop_s_1_axi_awvalid),
// .io_slave_1_aw_bits_addr(fsimtop_s_1_axi_awaddr_small),
// .io_slave_1_aw_bits_len(fsimtop_s_1_axi_awlen),
// .io_slave_1_aw_bits_size(fsimtop_s_1_axi_awsize),
// .io_slave_1_aw_bits_burst(fsimtop_s_1_axi_awburst), // not available on DDR IF
// .io_slave_1_aw_bits_lock(fsimtop_s_1_axi_awlock), // not available on DDR IF
// .io_slave_1_aw_bits_cache(fsimtop_s_1_axi_awcache), // not available on DDR IF
// .io_slave_1_aw_bits_prot(fsimtop_s_1_axi_awprot), // not available on DDR IF
// .io_slave_1_aw_bits_qos(fsimtop_s_1_axi_awqos), // not available on DDR IF
// .io_slave_1_aw_bits_id(fsimtop_s_1_axi_awid),
//
// .io_slave_1_w_ready(fsimtop_s_1_axi_wready),
// .io_slave_1_w_valid(fsimtop_s_1_axi_wvalid),
// .io_slave_1_w_bits_data(fsimtop_s_1_axi_wdata),
// .io_slave_1_w_bits_last(fsimtop_s_1_axi_wlast),
// .io_slave_1_w_bits_strb(fsimtop_s_1_axi_wstrb),
//
// .io_slave_1_b_ready(fsimtop_s_1_axi_bready),
// .io_slave_1_b_valid(fsimtop_s_1_axi_bvalid),
// .io_slave_1_b_bits_resp(fsimtop_s_1_axi_bresp),
// .io_slave_1_b_bits_id(fsimtop_s_1_axi_bid),
//
// .io_slave_1_ar_ready(fsimtop_s_1_axi_arready),
// .io_slave_1_ar_valid(fsimtop_s_1_axi_arvalid),
// .io_slave_1_ar_bits_addr(fsimtop_s_1_axi_araddr_small),
// .io_slave_1_ar_bits_len(fsimtop_s_1_axi_arlen),
// .io_slave_1_ar_bits_size(fsimtop_s_1_axi_arsize),
// .io_slave_1_ar_bits_burst(fsimtop_s_1_axi_arburst), // not available on DDR IF
// .io_slave_1_ar_bits_lock(fsimtop_s_1_axi_arlock), // not available on DDR IF
// .io_slave_1_ar_bits_cache(fsimtop_s_1_axi_arcache), // not available on DDR IF
// .io_slave_1_ar_bits_prot(fsimtop_s_1_axi_arprot), // not available on DDR IF
// .io_slave_1_ar_bits_qos(fsimtop_s_1_axi_arqos), // not available on DDR IF
// .io_slave_1_ar_bits_id(fsimtop_s_1_axi_arid), // not available on DDR IF
//
// .io_slave_1_r_ready(fsimtop_s_1_axi_rready),
// .io_slave_1_r_valid(fsimtop_s_1_axi_rvalid),
// .io_slave_1_r_bits_resp(fsimtop_s_1_axi_rresp),
// .io_slave_1_r_bits_data(fsimtop_s_1_axi_rdata),
// .io_slave_1_r_bits_last(fsimtop_s_1_axi_rlast),
// .io_slave_1_r_bits_id(fsimtop_s_1_axi_rid),
//
// .io_slave_2_aw_ready(fsimtop_s_2_axi_awready),
// .io_slave_2_aw_valid(fsimtop_s_2_axi_awvalid),
// .io_slave_2_aw_bits_addr(fsimtop_s_2_axi_awaddr_small),
// .io_slave_2_aw_bits_len(fsimtop_s_2_axi_awlen),
// .io_slave_2_aw_bits_size(fsimtop_s_2_axi_awsize),
// .io_slave_2_aw_bits_burst(fsimtop_s_2_axi_awburst), // not available on DDR IF
// .io_slave_2_aw_bits_lock(fsimtop_s_2_axi_awlock), // not available on DDR IF
// .io_slave_2_aw_bits_cache(fsimtop_s_2_axi_awcache), // not available on DDR IF
// .io_slave_2_aw_bits_prot(fsimtop_s_2_axi_awprot), // not available on DDR IF
// .io_slave_2_aw_bits_qos(fsimtop_s_2_axi_awqos), // not available on DDR IF
// .io_slave_2_aw_bits_id(fsimtop_s_2_axi_awid),
//
// .io_slave_2_w_ready(fsimtop_s_2_axi_wready),
// .io_slave_2_w_valid(fsimtop_s_2_axi_wvalid),
// .io_slave_2_w_bits_data(fsimtop_s_2_axi_wdata),
// .io_slave_2_w_bits_last(fsimtop_s_2_axi_wlast),
// .io_slave_2_w_bits_strb(fsimtop_s_2_axi_wstrb),
//
// .io_slave_2_b_ready(fsimtop_s_2_axi_bready),
// .io_slave_2_b_valid(fsimtop_s_2_axi_bvalid),
// .io_slave_2_b_bits_resp(fsimtop_s_2_axi_bresp),
// .io_slave_2_b_bits_id(fsimtop_s_2_axi_bid),
//
// .io_slave_2_ar_ready(fsimtop_s_2_axi_arready),
// .io_slave_2_ar_valid(fsimtop_s_2_axi_arvalid),
// .io_slave_2_ar_bits_addr(fsimtop_s_2_axi_araddr_small),
// .io_slave_2_ar_bits_len(fsimtop_s_2_axi_arlen),
// .io_slave_2_ar_bits_size(fsimtop_s_2_axi_arsize),
// .io_slave_2_ar_bits_burst(fsimtop_s_2_axi_arburst), // not available on DDR IF
// .io_slave_2_ar_bits_lock(fsimtop_s_2_axi_arlock), // not available on DDR IF
// .io_slave_2_ar_bits_cache(fsimtop_s_2_axi_arcache), // not available on DDR IF
// .io_slave_2_ar_bits_prot(fsimtop_s_2_axi_arprot), // not available on DDR IF
// .io_slave_2_ar_bits_qos(fsimtop_s_2_axi_arqos), // not available on DDR IF
// .io_slave_2_ar_bits_id(fsimtop_s_2_axi_arid), // not available on DDR IF
//
// .io_slave_2_r_ready(fsimtop_s_2_axi_rready),
// .io_slave_2_r_valid(fsimtop_s_2_axi_rvalid),
// .io_slave_2_r_bits_resp(fsimtop_s_2_axi_rresp),
// .io_slave_2_r_bits_data(fsimtop_s_2_axi_rdata),
// .io_slave_2_r_bits_last(fsimtop_s_2_axi_rlast),
// .io_slave_2_r_bits_id(fsimtop_s_2_axi_rid),
//
// .io_slave_3_aw_ready(fsimtop_s_3_axi_awready),
// .io_slave_3_aw_valid(fsimtop_s_3_axi_awvalid),
// .io_slave_3_aw_bits_addr(fsimtop_s_3_axi_awaddr_small),
// .io_slave_3_aw_bits_len(fsimtop_s_3_axi_awlen),
// .io_slave_3_aw_bits_size(fsimtop_s_3_axi_awsize),
// .io_slave_3_aw_bits_burst(fsimtop_s_3_axi_awburst), // not available on DDR IF
// .io_slave_3_aw_bits_lock(fsimtop_s_3_axi_awlock), // not available on DDR IF
// .io_slave_3_aw_bits_cache(fsimtop_s_3_axi_awcache), // not available on DDR IF
// .io_slave_3_aw_bits_prot(fsimtop_s_3_axi_awprot), // not available on DDR IF
// .io_slave_3_aw_bits_qos(fsimtop_s_3_axi_awqos), // not available on DDR IF
// .io_slave_3_aw_bits_id(fsimtop_s_3_axi_awid),
//
// .io_slave_3_w_ready(fsimtop_s_3_axi_wready),
// .io_slave_3_w_valid(fsimtop_s_3_axi_wvalid),
// .io_slave_3_w_bits_data(fsimtop_s_3_axi_wdata),
// .io_slave_3_w_bits_last(fsimtop_s_3_axi_wlast),
// .io_slave_3_w_bits_strb(fsimtop_s_3_axi_wstrb),
//
// .io_slave_3_b_ready(fsimtop_s_3_axi_bready),
// .io_slave_3_b_valid(fsimtop_s_3_axi_bvalid),
// .io_slave_3_b_bits_resp(fsimtop_s_3_axi_bresp),
// .io_slave_3_b_bits_id(fsimtop_s_3_axi_bid),
//
// .io_slave_3_ar_ready(fsimtop_s_3_axi_arready),
// .io_slave_3_ar_valid(fsimtop_s_3_axi_arvalid),
// .io_slave_3_ar_bits_addr(fsimtop_s_3_axi_araddr_small),
// .io_slave_3_ar_bits_len(fsimtop_s_3_axi_arlen),
// .io_slave_3_ar_bits_size(fsimtop_s_3_axi_arsize),
// .io_slave_3_ar_bits_burst(fsimtop_s_3_axi_arburst), // not available on DDR IF
// .io_slave_3_ar_bits_lock(fsimtop_s_3_axi_arlock), // not available on DDR IF
// .io_slave_3_ar_bits_cache(fsimtop_s_3_axi_arcache), // not available on DDR IF
// .io_slave_3_ar_bits_prot(fsimtop_s_3_axi_arprot), // not available on DDR IF
// .io_slave_3_ar_bits_qos(fsimtop_s_3_axi_arqos), // not available on DDR IF
// .io_slave_3_ar_bits_id(fsimtop_s_3_axi_arid), // not available on DDR IF
//
// .io_slave_3_r_ready(fsimtop_s_3_axi_rready),
// .io_slave_3_r_valid(fsimtop_s_3_axi_rvalid),
// .io_slave_3_r_bits_resp(fsimtop_s_3_axi_rresp),
// .io_slave_3_r_bits_data(fsimtop_s_3_axi_rdata),
// .io_slave_3_r_bits_last(fsimtop_s_3_axi_rlast),
// .io_slave_3_r_bits_id(fsimtop_s_3_axi_rid)
);
endmodule : firesim_wrapper

View File

@ -0,0 +1,24 @@
# *************************************************************************
#
# Copyright 2020 Xilinx, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# *************************************************************************
# Adapted from https://github.com/Xilinx/open-nic-shell
set part xcu250-figd2104-2l-e
set board_part xilinx.com:au250:part0:1.3
set zynq_family 0
set hw_device xcu250_0

View File

@ -0,0 +1,384 @@
################################################################
# This is a generated script based on design: design_1
#
# Though there are limitations about the generated script,
# the main purpose of this utility is to make learning
# IP Integrator Tcl commands easier.
################################################################
namespace eval _tcl {
proc get_script_folder {} {
set script_path [file normalize [info script]]
set script_folder [file dirname $script_path]
return $script_folder
}
}
variable script_folder
set script_folder [_tcl::get_script_folder]
################################################################
# Check if script is running in correct Vivado version.
################################################################
set scripts_vivado_version 2021.1
set current_vivado_version [version -short]
if { [string first $scripts_vivado_version $current_vivado_version] == -1 } {
puts ""
catch {common::send_gid_msg -ssname BD::TCL -id 2041 -severity "ERROR" "This script was generated using Vivado <$scripts_vivado_version> and is being run in <$current_vivado_version> of Vivado. Please run the script in Vivado <$scripts_vivado_version> then open the design in Vivado <$current_vivado_version>. Upgrade the design by running \"Tools => Report => Report IP Status...\", then run write_bd_tcl to create an updated script."}
return 1
}
################################################################
# START
################################################################
# The design that will be created by this Tcl script contains the following
# module references:
# axi_tieoff_master, firesim_wrapper
# Please add the sources of those modules before sourcing this Tcl script.
# CHANGE DESIGN NAME HERE
variable design_name
set design_name design_1
# If you do not already have an existing IP Integrator design open,
# you can create a design using the following command:
# create_bd_design $design_name
# Creating design if needed
set errMsg ""
set nRet 0
set cur_design [current_bd_design -quiet]
set list_cells [get_bd_cells -quiet]
if { ${design_name} eq "" } {
# USE CASES:
# 1) Design_name not set
set errMsg "Please set the variable <design_name> to a non-empty value."
set nRet 1
} elseif { ${cur_design} ne "" && ${list_cells} eq "" } {
# USE CASES:
# 2): Current design opened AND is empty AND names same.
# 3): Current design opened AND is empty AND names diff; design_name NOT in project.
# 4): Current design opened AND is empty AND names diff; design_name exists in project.
if { $cur_design ne $design_name } {
common::send_gid_msg -ssname BD::TCL -id 2001 -severity "INFO" "Changing value of <design_name> from <$design_name> to <$cur_design> since current design is empty."
set design_name [get_property NAME $cur_design]
}
common::send_gid_msg -ssname BD::TCL -id 2002 -severity "INFO" "Constructing design in IPI design <$cur_design>..."
} elseif { ${cur_design} ne "" && $list_cells ne "" && $cur_design eq $design_name } {
# USE CASES:
# 5) Current design opened AND has components AND same names.
set errMsg "Design <$design_name> already exists in your project, please set the variable <design_name> to another value."
set nRet 1
} elseif { [get_files -quiet ${design_name}.bd] ne "" } {
# USE CASES:
# 6) Current opened design, has components, but diff names, design_name exists in project.
# 7) No opened design, design_name exists in project.
set errMsg "Design <$design_name> already exists in your project, please set the variable <design_name> to another value."
set nRet 2
} else {
# USE CASES:
# 8) No opened design, design_name not in project.
# 9) Current opened design, has components, but diff names, design_name not in project.
common::send_gid_msg -ssname BD::TCL -id 2003 -severity "INFO" "Currently there is no design <$design_name> in project, so creating one..."
create_bd_design $design_name
common::send_gid_msg -ssname BD::TCL -id 2004 -severity "INFO" "Making design <$design_name> as current_bd_design."
current_bd_design $design_name
}
common::send_gid_msg -ssname BD::TCL -id 2005 -severity "INFO" "Currently the variable <design_name> is equal to \"$design_name\"."
if { $nRet != 0 } {
catch {common::send_gid_msg -ssname BD::TCL -id 2006 -severity "ERROR" $errMsg}
return $nRet
}
set bCheckIPsPassed 1
##################################################################
# CHECK IPs
##################################################################
set bCheckIPs 1
if { $bCheckIPs == 1 } {
set list_check_ips "\
xilinx.com:ip:axi_clock_converter:2.1\
xilinx.com:ip:axi_dwidth_converter:2.1\
xilinx.com:ip:ddr4:2.2\
xilinx.com:ip:proc_sys_reset:5.0\
xilinx.com:ip:util_vector_logic:2.0\
xilinx.com:ip:util_ds_buf:2.2\
xilinx.com:ip:xdma:4.1\
xilinx.com:ip:xlconstant:1.1\
"
set list_ips_missing ""
common::send_gid_msg -ssname BD::TCL -id 2011 -severity "INFO" "Checking if the following IPs exist in the project's IP catalog: $list_check_ips ."
foreach ip_vlnv $list_check_ips {
set ip_obj [get_ipdefs -all $ip_vlnv]
if { $ip_obj eq "" } {
lappend list_ips_missing $ip_vlnv
}
}
if { $list_ips_missing ne "" } {
catch {common::send_gid_msg -ssname BD::TCL -id 2012 -severity "ERROR" "The following IPs are not found in the IP Catalog:\n $list_ips_missing\n\nResolution: Please add the repository containing the IP(s) to the project." }
set bCheckIPsPassed 0
}
}
##################################################################
# CHECK Modules
##################################################################
set bCheckModules 1
if { $bCheckModules == 1 } {
set list_check_mods "\
axi_tieoff_master\
firesim_wrapper\
"
set list_mods_missing ""
common::send_gid_msg -ssname BD::TCL -id 2020 -severity "INFO" "Checking if the following modules exist in the project's sources: $list_check_mods ."
foreach mod_vlnv $list_check_mods {
if { [can_resolve_reference $mod_vlnv] == 0 } {
lappend list_mods_missing $mod_vlnv
}
}
if { $list_mods_missing ne "" } {
catch {common::send_gid_msg -ssname BD::TCL -id 2021 -severity "ERROR" "The following module(s) are not found in the project: $list_mods_missing" }
common::send_gid_msg -ssname BD::TCL -id 2022 -severity "INFO" "Please add source files for the missing module(s) above."
set bCheckIPsPassed 0
}
}
if { $bCheckIPsPassed != 1 } {
common::send_gid_msg -ssname BD::TCL -id 2023 -severity "WARNING" "Will not continue with creation of design due to the error(s) above."
return 3
}
##################################################################
# DESIGN PROCs
##################################################################
# Procedure to create entire design; Provide argument to make
# procedure reusable. If parentCell is "", will use root.
proc create_root_design { parentCell firesim_freq } {
variable script_folder
variable design_name
if { $parentCell eq "" } {
set parentCell [get_bd_cells /]
}
# Get object for parentCell
set parentObj [get_bd_cells $parentCell]
if { $parentObj == "" } {
catch {common::send_gid_msg -ssname BD::TCL -id 2090 -severity "ERROR" "Unable to find parent cell <$parentCell>!"}
return
}
# Make sure parentObj is hier blk
set parentType [get_property TYPE $parentObj]
if { $parentType ne "hier" } {
catch {common::send_gid_msg -ssname BD::TCL -id 2091 -severity "ERROR" "Parent <$parentObj> has TYPE = <$parentType>. Expected to be <hier>."}
return
}
# Save current instance; Restore later
set oldCurInst [current_bd_instance .]
# Set parent object as current
current_bd_instance $parentObj
# Create interface ports
set ddr4_sdram_c0 [ create_bd_intf_port -mode Master -vlnv xilinx.com:interface:ddr4_rtl:1.0 ddr4_sdram_c0 ]
set default_300mhz_clk0 [ create_bd_intf_port -mode Slave -vlnv xilinx.com:interface:diff_clock_rtl:1.0 default_300mhz_clk0 ]
set_property -dict [ list \
CONFIG.FREQ_HZ {300000000} \
] $default_300mhz_clk0
set pci_express_x16 [ create_bd_intf_port -mode Master -vlnv xilinx.com:interface:pcie_7x_mgt_rtl:1.0 pci_express_x16 ]
set pcie_refclk [ create_bd_intf_port -mode Slave -vlnv xilinx.com:interface:diff_clock_rtl:1.0 pcie_refclk ]
set_property -dict [ list \
CONFIG.FREQ_HZ {100000000} \
] $pcie_refclk
# Create ports
set pcie_perstn [ create_bd_port -dir I -type rst pcie_perstn ]
set_property -dict [ list \
CONFIG.POLARITY {ACTIVE_LOW} \
] $pcie_perstn
set resetn [ create_bd_port -dir I -type rst resetn ]
set_property -dict [ list \
CONFIG.POLARITY {ACTIVE_LOW} \
] $resetn
# Create instance: axi_clock_converter_0, and set properties
set axi_clock_converter_0 [ create_bd_cell -type ip -vlnv xilinx.com:ip:axi_clock_converter:2.1 axi_clock_converter_0 ]
# Create instance: axi_clock_converter_1, and set properties
set axi_clock_converter_1 [ create_bd_cell -type ip -vlnv xilinx.com:ip:axi_clock_converter:2.1 axi_clock_converter_1 ]
# Create instance: axi_dwidth_converter_0, and set properties
set axi_dwidth_converter_0 [ create_bd_cell -type ip -vlnv xilinx.com:ip:axi_dwidth_converter:2.1 axi_dwidth_converter_0 ]
# clock conversion is only available in upsizer FIFO mode. by default we are in downsizer mode so we have to manually enter the correct width.
set_property -dict [list CONFIG.MI_DATA_WIDTH.VALUE_SRC USER] $axi_dwidth_converter_0
set_property -dict [list CONFIG.MI_DATA_WIDTH {512}] $axi_dwidth_converter_0
set_property -dict [ list \
CONFIG.SI_DATA_WIDTH {64} \
CONFIG.SI_ID_WIDTH {16} \
CONFIG.FIFO_MODE {2} \
CONFIG.ACLK_ASYNC {1} \
] $axi_dwidth_converter_0
# Create instance: axi_tieoff_master_0, and set properties
set block_name axi_tieoff_master
set block_cell_name axi_tieoff_master_0
if { [catch {set axi_tieoff_master_0 [create_bd_cell -type module -reference $block_name $block_cell_name] } errmsg] } {
catch {common::send_gid_msg -ssname BD::TCL -id 2095 -severity "ERROR" "Unable to add referenced block <$block_name>. Please add the files for ${block_name}'s definition into the project."}
return 1
} elseif { $axi_tieoff_master_0 eq "" } {
catch {common::send_gid_msg -ssname BD::TCL -id 2096 -severity "ERROR" "Unable to referenced block <$block_name>. Please add the files for ${block_name}'s definition into the project."}
return 1
}
# Create instance: ddr4_0, and set properties
set ddr4_0 [ create_bd_cell -type ip -vlnv xilinx.com:ip:ddr4:2.2 ddr4_0 ]
set_property -dict [ list \
CONFIG.C0.DDR4_AUTO_AP_COL_A3 {true} \
CONFIG.C0.DDR4_InputClockPeriod {3332} \
CONFIG.C0.DDR4_MCS_ECC {false} \
CONFIG.C0_CLOCK_BOARD_INTERFACE {default_300mhz_clk0} \
CONFIG.C0_DDR4_BOARD_INTERFACE {ddr4_sdram_c0} \
CONFIG.Debug_Signal {Disable} \
CONFIG.RESET_BOARD_INTERFACE {resetn} \
] $ddr4_0
# Create instance: firesim_wrapper_0, and set properties
set block_name firesim_wrapper
set block_cell_name firesim_wrapper_0
if { [catch {set firesim_wrapper_0 [create_bd_cell -type module -reference $block_name $block_cell_name] } errmsg] } {
catch {common::send_gid_msg -ssname BD::TCL -id 2095 -severity "ERROR" "Unable to add referenced block <$block_name>. Please add the files for ${block_name}'s definition into the project."}
return 1
} elseif { $firesim_wrapper_0 eq "" } {
catch {common::send_gid_msg -ssname BD::TCL -id 2096 -severity "ERROR" "Unable to referenced block <$block_name>. Please add the files for ${block_name}'s definition into the project."}
return 1
}
# Create instance: proc_sys_reset_0, and set properties
set proc_sys_reset_0 [ create_bd_cell -type ip -vlnv xilinx.com:ip:proc_sys_reset:5.0 proc_sys_reset_0 ]
# Create instance: proc_sys_reset_1, and set properties
set proc_sys_reset_1 [ create_bd_cell -type ip -vlnv xilinx.com:ip:proc_sys_reset:5.0 proc_sys_reset_1 ]
# Create instance: resetn_inv_0, and set properties
set resetn_inv_0 [ create_bd_cell -type ip -vlnv xilinx.com:ip:util_vector_logic:2.0 resetn_inv_0 ]
set_property -dict [ list \
CONFIG.C_OPERATION {not} \
CONFIG.C_SIZE {1} \
] $resetn_inv_0
# Create instance: util_ds_buf, and set properties
set util_ds_buf [ create_bd_cell -type ip -vlnv xilinx.com:ip:util_ds_buf:2.2 util_ds_buf ]
set_property -dict [ list \
CONFIG.C_BUF_TYPE {IBUFDSGTE} \
CONFIG.DIFF_CLK_IN_BOARD_INTERFACE {pcie_refclk} \
CONFIG.USE_BOARD_FLOW {true} \
] $util_ds_buf
# Create instance: xdma_0, and set properties
set xdma_0 [ create_bd_cell -type ip -vlnv xilinx.com:ip:xdma:4.1 xdma_0 ]
set_property -dict [ list \
CONFIG.PCIE_BOARD_INTERFACE {pci_express_x16} \
CONFIG.SYS_RST_N_BOARD_INTERFACE {pcie_perstn} \
CONFIG.axilite_master_en {true} \
CONFIG.axilite_master_size {32} \
CONFIG.pciebar2axibar_axist_bypass {0x0000000000000000} \
CONFIG.pf0_msix_cap_pba_bir {BAR_1} \
CONFIG.pf0_msix_cap_table_bir {BAR_1} \
CONFIG.xdma_axi_intf_mm {AXI_Memory_Mapped} \
CONFIG.xdma_rnum_chnl {4} \
CONFIG.xdma_wnum_chnl {4} \
] $xdma_0
# Create instance: xlconstant_0, and set properties
set xlconstant_0 [ create_bd_cell -type ip -vlnv xilinx.com:ip:xlconstant:1.1 xlconstant_0 ]
set_property -dict [ list \
CONFIG.CONST_VAL {0} \
] $xlconstant_0
set clk_wiz_0 [create_bd_cell -type ip -vlnv xilinx.com:ip:clk_wiz:6.0 clk_wiz_0]
set_property -dict [list CONFIG.CLKOUT1_REQUESTED_OUT_FREQ $firesim_freq CONFIG.USE_LOCKED {false}] $clk_wiz_0
# Create interface connections
connect_bd_intf_net -intf_net axi_clock_converter_0_M_AXI [get_bd_intf_pins axi_clock_converter_0/M_AXI] [get_bd_intf_pins firesim_wrapper_0/S_AXI_DMA]
connect_bd_intf_net -intf_net axi_clock_converter_1_M_AXI [get_bd_intf_pins axi_clock_converter_1/M_AXI] [get_bd_intf_pins firesim_wrapper_0/S_AXI_CTRL]
connect_bd_intf_net -intf_net axi_dwidth_converter_0_M_AXI [get_bd_intf_pins axi_dwidth_converter_0/M_AXI] [get_bd_intf_pins ddr4_0/C0_DDR4_S_AXI]
connect_bd_intf_net -intf_net axi_tieoff_master_0_TIEOFF_M_AXI_CTRL_0 [get_bd_intf_pins axi_tieoff_master_0/TIEOFF_M_AXI_CTRL_0] [get_bd_intf_pins ddr4_0/C0_DDR4_S_AXI_CTRL]
connect_bd_intf_net -intf_net ddr4_0_C0_DDR4 [get_bd_intf_ports ddr4_sdram_c0] [get_bd_intf_pins ddr4_0/C0_DDR4]
connect_bd_intf_net -intf_net default_300mhz_clk0_1 [get_bd_intf_ports default_300mhz_clk0] [get_bd_intf_pins ddr4_0/C0_SYS_CLK]
connect_bd_intf_net -intf_net firesim_wrapper_0_M_AXI_DDR0 [get_bd_intf_pins axi_dwidth_converter_0/S_AXI] [get_bd_intf_pins firesim_wrapper_0/M_AXI_DDR0]
connect_bd_intf_net -intf_net pcie_refclk_1 [get_bd_intf_ports pcie_refclk] [get_bd_intf_pins util_ds_buf/CLK_IN_D]
connect_bd_intf_net -intf_net xdma_0_M_AXI [get_bd_intf_pins axi_clock_converter_0/S_AXI] [get_bd_intf_pins xdma_0/M_AXI]
connect_bd_intf_net -intf_net xdma_0_M_AXI_LITE [get_bd_intf_pins axi_clock_converter_1/S_AXI] [get_bd_intf_pins xdma_0/M_AXI_LITE]
connect_bd_intf_net -intf_net xdma_0_pcie_mgt [get_bd_intf_ports pci_express_x16] [get_bd_intf_pins xdma_0/pcie_mgt]
# Create port connections
connect_bd_net -net sys_clk_30 [get_bd_pins axi_clock_converter_0/m_axi_aclk] [get_bd_pins axi_clock_converter_1/m_axi_aclk] [get_bd_pins axi_dwidth_converter_0/s_axi_aclk] [get_bd_pins firesim_wrapper_0/sys_clk_30] [get_bd_pins proc_sys_reset_0/slowest_sync_clk] [get_bd_pins clk_wiz_0/clk_out1]
connect_bd_net -net ddr4_0_c0_ddr4_ui_clk [get_bd_pins axi_dwidth_converter_0/m_axi_aclk] [get_bd_pins ddr4_0/c0_ddr4_ui_clk] [get_bd_pins proc_sys_reset_1/slowest_sync_clk] [get_bd_pins clk_wiz_0/clk_in1]
connect_bd_net -net pcie_perstn_1 [get_bd_ports pcie_perstn] [get_bd_pins xdma_0/sys_rst_n]
connect_bd_net -net proc_sys_reset_0_interconnect_aresetn [get_bd_pins axi_clock_converter_0/m_axi_aresetn] [get_bd_pins axi_clock_converter_1/m_axi_aresetn] [get_bd_pins axi_dwidth_converter_0/s_axi_aresetn] [get_bd_pins firesim_wrapper_0/sys_reset_n] [get_bd_pins proc_sys_reset_0/interconnect_aresetn]
connect_bd_net -net resetn_1 [get_bd_ports resetn] [get_bd_pins proc_sys_reset_0/ext_reset_in] [get_bd_pins proc_sys_reset_1/ext_reset_in] [get_bd_pins resetn_inv_0/Op1]
connect_bd_net -net resetn_inv_0_Res [get_bd_pins ddr4_0/sys_rst] [get_bd_pins resetn_inv_0/Res] [get_bd_pins clk_wiz_0/reset]
connect_bd_net -net rst_ddr4_0_300M_interconnect_aresetn [get_bd_pins axi_dwidth_converter_0/m_axi_aresetn] [get_bd_pins ddr4_0/c0_ddr4_aresetn] [get_bd_pins proc_sys_reset_1/interconnect_aresetn]
connect_bd_net -net util_ds_buf_IBUF_DS_ODIV2 [get_bd_pins util_ds_buf/IBUF_DS_ODIV2] [get_bd_pins xdma_0/sys_clk]
connect_bd_net -net util_ds_buf_IBUF_OUT [get_bd_pins util_ds_buf/IBUF_OUT] [get_bd_pins xdma_0/sys_clk_gt]
connect_bd_net -net xdma_0_axi_aclk [get_bd_pins axi_clock_converter_0/s_axi_aclk] [get_bd_pins axi_clock_converter_1/s_axi_aclk] [get_bd_pins xdma_0/axi_aclk]
connect_bd_net -net xdma_0_axi_aresetn [get_bd_pins axi_clock_converter_0/s_axi_aresetn] [get_bd_pins axi_clock_converter_1/s_axi_aresetn] [get_bd_pins xdma_0/axi_aresetn]
connect_bd_net -net xlconstant_0_dout [get_bd_pins xdma_0/usr_irq_req] [get_bd_pins xlconstant_0/dout]
# Create address segments
# Restore current instance
current_bd_instance $oldCurInst
validate_bd_design
save_bd_design
}
# End of create_root_design()
##################################################################
# MAIN FLOW
##################################################################
create_root_design "" $desired_host_frequency

View File

@ -0,0 +1,389 @@
################################################################
# This is a generated script based on design: design_1
#
# Though there are limitations about the generated script,
# the main purpose of this utility is to make learning
# IP Integrator Tcl commands easier.
################################################################
namespace eval _tcl {
proc get_script_folder {} {
set script_path [file normalize [info script]]
set script_folder [file dirname $script_path]
return $script_folder
}
}
variable script_folder
set script_folder [_tcl::get_script_folder]
################################################################
# Check if script is running in correct Vivado version.
################################################################
set scripts_vivado_version 2022.2
set current_vivado_version [version -short]
if { [string first $scripts_vivado_version $current_vivado_version] == -1 } {
puts ""
catch {common::send_gid_msg -ssname BD::TCL -id 2041 -severity "ERROR" "This script was generated using Vivado <$scripts_vivado_version> and is being run in <$current_vivado_version> of Vivado. Please run the script in Vivado <$scripts_vivado_version> then open the design in Vivado <$current_vivado_version>. Upgrade the design by running \"Tools => Report => Report IP Status...\", then run write_bd_tcl to create an updated script."}
return 1
}
################################################################
# START
################################################################
# The design that will be created by this Tcl script contains the following
# module references:
# axi_tieoff_master, firesim_wrapper
# Please add the sources of those modules before sourcing this Tcl script.
# CHANGE DESIGN NAME HERE
variable design_name
set design_name design_1
# If you do not already have an existing IP Integrator design open,
# you can create a design using the following command:
# create_bd_design $design_name
# Creating design if needed
set errMsg ""
set nRet 0
set cur_design [current_bd_design -quiet]
set list_cells [get_bd_cells -quiet]
if { ${design_name} eq "" } {
# USE CASES:
# 1) Design_name not set
set errMsg "Please set the variable <design_name> to a non-empty value."
set nRet 1
} elseif { ${cur_design} ne "" && ${list_cells} eq "" } {
# USE CASES:
# 2): Current design opened AND is empty AND names same.
# 3): Current design opened AND is empty AND names diff; design_name NOT in project.
# 4): Current design opened AND is empty AND names diff; design_name exists in project.
if { $cur_design ne $design_name } {
common::send_gid_msg -ssname BD::TCL -id 2001 -severity "INFO" "Changing value of <design_name> from <$design_name> to <$cur_design> since current design is empty."
set design_name [get_property NAME $cur_design]
}
common::send_gid_msg -ssname BD::TCL -id 2002 -severity "INFO" "Constructing design in IPI design <$cur_design>..."
} elseif { ${cur_design} ne "" && $list_cells ne "" && $cur_design eq $design_name } {
# USE CASES:
# 5) Current design opened AND has components AND same names.
set errMsg "Design <$design_name> already exists in your project, please set the variable <design_name> to another value."
set nRet 1
} elseif { [get_files -quiet ${design_name}.bd] ne "" } {
# USE CASES:
# 6) Current opened design, has components, but diff names, design_name exists in project.
# 7) No opened design, design_name exists in project.
set errMsg "Design <$design_name> already exists in your project, please set the variable <design_name> to another value."
set nRet 2
} else {
# USE CASES:
# 8) No opened design, design_name not in project.
# 9) Current opened design, has components, but diff names, design_name not in project.
common::send_gid_msg -ssname BD::TCL -id 2003 -severity "INFO" "Currently there is no design <$design_name> in project, so creating one..."
create_bd_design $design_name
common::send_gid_msg -ssname BD::TCL -id 2004 -severity "INFO" "Making design <$design_name> as current_bd_design."
current_bd_design $design_name
}
common::send_gid_msg -ssname BD::TCL -id 2005 -severity "INFO" "Currently the variable <design_name> is equal to \"$design_name\"."
if { $nRet != 0 } {
catch {common::send_gid_msg -ssname BD::TCL -id 2006 -severity "ERROR" $errMsg}
return $nRet
}
set bCheckIPsPassed 1
##################################################################
# CHECK IPs
##################################################################
set bCheckIPs 1
if { $bCheckIPs == 1 } {
set list_check_ips "\
xilinx.com:ip:axi_clock_converter:2.1\
xilinx.com:ip:axi_dwidth_converter:2.1\
xilinx.com:ip:clk_wiz:6.0\
xilinx.com:ip:ddr4:2.2\
xilinx.com:ip:proc_sys_reset:5.0\
xilinx.com:ip:util_vector_logic:2.0\
xilinx.com:ip:util_ds_buf:2.2\
xilinx.com:ip:xdma:4.1\
xilinx.com:ip:xlconstant:1.1\
"
set list_ips_missing ""
common::send_gid_msg -ssname BD::TCL -id 2011 -severity "INFO" "Checking if the following IPs exist in the project's IP catalog: $list_check_ips ."
foreach ip_vlnv $list_check_ips {
set ip_obj [get_ipdefs -all $ip_vlnv]
if { $ip_obj eq "" } {
lappend list_ips_missing $ip_vlnv
}
}
if { $list_ips_missing ne "" } {
catch {common::send_gid_msg -ssname BD::TCL -id 2012 -severity "ERROR" "The following IPs are not found in the IP Catalog:\n $list_ips_missing\n\nResolution: Please add the repository containing the IP(s) to the project." }
set bCheckIPsPassed 0
}
}
##################################################################
# CHECK Modules
##################################################################
set bCheckModules 1
if { $bCheckModules == 1 } {
set list_check_mods "\
axi_tieoff_master\
firesim_wrapper\
"
set list_mods_missing ""
common::send_gid_msg -ssname BD::TCL -id 2020 -severity "INFO" "Checking if the following modules exist in the project's sources: $list_check_mods ."
foreach mod_vlnv $list_check_mods {
if { [can_resolve_reference $mod_vlnv] == 0 } {
lappend list_mods_missing $mod_vlnv
}
}
if { $list_mods_missing ne "" } {
catch {common::send_gid_msg -ssname BD::TCL -id 2021 -severity "ERROR" "The following module(s) are not found in the project: $list_mods_missing" }
common::send_gid_msg -ssname BD::TCL -id 2022 -severity "INFO" "Please add source files for the missing module(s) above."
set bCheckIPsPassed 0
}
}
if { $bCheckIPsPassed != 1 } {
common::send_gid_msg -ssname BD::TCL -id 2023 -severity "WARNING" "Will not continue with creation of design due to the error(s) above."
return 3
}
##################################################################
# DESIGN PROCs
##################################################################
# Procedure to create entire design; Provide argument to make
# procedure reusable. If parentCell is "", will use root.
proc create_root_design { parentCell firesim_freq } {
variable script_folder
variable design_name
if { $parentCell eq "" } {
set parentCell [get_bd_cells /]
}
# Get object for parentCell
set parentObj [get_bd_cells $parentCell]
if { $parentObj == "" } {
catch {common::send_gid_msg -ssname BD::TCL -id 2090 -severity "ERROR" "Unable to find parent cell <$parentCell>!"}
return
}
# Make sure parentObj is hier blk
set parentType [get_property TYPE $parentObj]
if { $parentType ne "hier" } {
catch {common::send_gid_msg -ssname BD::TCL -id 2091 -severity "ERROR" "Parent <$parentObj> has TYPE = <$parentType>. Expected to be <hier>."}
return
}
# Save current instance; Restore later
set oldCurInst [current_bd_instance .]
# Set parent object as current
current_bd_instance $parentObj
# Create interface ports
set ddr4_sdram_c0 [ create_bd_intf_port -mode Master -vlnv xilinx.com:interface:ddr4_rtl:1.0 ddr4_sdram_c0 ]
set default_300mhz_clk0 [ create_bd_intf_port -mode Slave -vlnv xilinx.com:interface:diff_clock_rtl:1.0 default_300mhz_clk0 ]
set_property -dict [ list \
CONFIG.FREQ_HZ {300000000} \
] $default_300mhz_clk0
set pci_express_x16 [ create_bd_intf_port -mode Master -vlnv xilinx.com:interface:pcie_7x_mgt_rtl:1.0 pci_express_x16 ]
set pcie_refclk [ create_bd_intf_port -mode Slave -vlnv xilinx.com:interface:diff_clock_rtl:1.0 pcie_refclk ]
set_property -dict [ list \
CONFIG.FREQ_HZ {100000000} \
] $pcie_refclk
# Create ports
set pcie_perstn [ create_bd_port -dir I -type rst pcie_perstn ]
set_property -dict [ list \
CONFIG.POLARITY {ACTIVE_LOW} \
] $pcie_perstn
set resetn [ create_bd_port -dir I -type rst resetn ]
set_property -dict [ list \
CONFIG.POLARITY {ACTIVE_LOW} \
] $resetn
# Create instance: axi_clock_converter_0, and set properties
set axi_clock_converter_0 [ create_bd_cell -type ip -vlnv xilinx.com:ip:axi_clock_converter:2.1 axi_clock_converter_0 ]
# Create instance: axi_clock_converter_1, and set properties
set axi_clock_converter_1 [ create_bd_cell -type ip -vlnv xilinx.com:ip:axi_clock_converter:2.1 axi_clock_converter_1 ]
# Create instance: axi_dwidth_converter_0, and set properties
set axi_dwidth_converter_0 [ create_bd_cell -type ip -vlnv xilinx.com:ip:axi_dwidth_converter:2.1 axi_dwidth_converter_0 ]
set_property -dict [list \
CONFIG.ACLK_ASYNC {1} \
CONFIG.FIFO_MODE {2} \
CONFIG.MI_DATA_WIDTH {512} \
CONFIG.SI_DATA_WIDTH {64} \
CONFIG.SI_ID_WIDTH {16} \
] $axi_dwidth_converter_0
# Create instance: axi_tieoff_master_0, and set properties
set block_name axi_tieoff_master
set block_cell_name axi_tieoff_master_0
if { [catch {set axi_tieoff_master_0 [create_bd_cell -type module -reference $block_name $block_cell_name] } errmsg] } {
catch {common::send_gid_msg -ssname BD::TCL -id 2095 -severity "ERROR" "Unable to add referenced block <$block_name>. Please add the files for ${block_name}'s definition into the project."}
return 1
} elseif { $axi_tieoff_master_0 eq "" } {
catch {common::send_gid_msg -ssname BD::TCL -id 2096 -severity "ERROR" "Unable to referenced block <$block_name>. Please add the files for ${block_name}'s definition into the project."}
return 1
}
# Create instance: clk_wiz_0, and set properties
set clk_wiz_0 [ create_bd_cell -type ip -vlnv xilinx.com:ip:clk_wiz:6.0 clk_wiz_0 ]
set_property -dict [list \
CONFIG.CLKOUT1_REQUESTED_OUT_FREQ $firesim_freq \
CONFIG.USE_LOCKED {false} \
] $clk_wiz_0
# Create instance: ddr4_0, and set properties
set ddr4_0 [ create_bd_cell -type ip -vlnv xilinx.com:ip:ddr4:2.2 ddr4_0 ]
set_property -dict [list \
CONFIG.ADDN_UI_CLKOUT1_FREQ_HZ {100} \
CONFIG.C0.DDR4_AUTO_AP_COL_A3 {true} \
CONFIG.C0.DDR4_AxiAddressWidth {34} \
CONFIG.C0.DDR4_EN_PARITY {true} \
CONFIG.C0.DDR4_MCS_ECC {false} \
CONFIG.C0.DDR4_Mem_Add_Map {ROW_COLUMN_BANK_INTLV} \
CONFIG.C0_CLOCK_BOARD_INTERFACE {default_300mhz_clk0} \
CONFIG.C0_DDR4_BOARD_INTERFACE {ddr4_sdram_c0} \
CONFIG.Debug_Signal {Disable} \
CONFIG.RESET_BOARD_INTERFACE {resetn} \
] $ddr4_0
# Create instance: firesim_wrapper_0, and set properties
set block_name firesim_wrapper
set block_cell_name firesim_wrapper_0
if { [catch {set firesim_wrapper_0 [create_bd_cell -type module -reference $block_name $block_cell_name] } errmsg] } {
catch {common::send_gid_msg -ssname BD::TCL -id 2095 -severity "ERROR" "Unable to add referenced block <$block_name>. Please add the files for ${block_name}'s definition into the project."}
return 1
} elseif { $firesim_wrapper_0 eq "" } {
catch {common::send_gid_msg -ssname BD::TCL -id 2096 -severity "ERROR" "Unable to referenced block <$block_name>. Please add the files for ${block_name}'s definition into the project."}
return 1
}
# Create instance: proc_sys_reset_0, and set properties
set proc_sys_reset_0 [ create_bd_cell -type ip -vlnv xilinx.com:ip:proc_sys_reset:5.0 proc_sys_reset_0 ]
# Create instance: proc_sys_reset_1, and set properties
set proc_sys_reset_1 [ create_bd_cell -type ip -vlnv xilinx.com:ip:proc_sys_reset:5.0 proc_sys_reset_1 ]
# Create instance: resetn_inv_0, and set properties
set resetn_inv_0 [ create_bd_cell -type ip -vlnv xilinx.com:ip:util_vector_logic:2.0 resetn_inv_0 ]
set_property -dict [list \
CONFIG.C_OPERATION {not} \
CONFIG.C_SIZE {1} \
] $resetn_inv_0
# Create instance: util_ds_buf, and set properties
set util_ds_buf [ create_bd_cell -type ip -vlnv xilinx.com:ip:util_ds_buf:2.2 util_ds_buf ]
set_property -dict [list \
CONFIG.DIFF_CLK_IN_BOARD_INTERFACE {pcie_refclk} \
CONFIG.USE_BOARD_FLOW {true} \
] $util_ds_buf
# Create instance: xdma_0, and set properties
set xdma_0 [ create_bd_cell -type ip -vlnv xilinx.com:ip:xdma:4.1 xdma_0 ]
set_property -dict [list \
CONFIG.PCIE_BOARD_INTERFACE {pci_express_x16} \
CONFIG.SYS_RST_N_BOARD_INTERFACE {pcie_perstn} \
CONFIG.axilite_master_en {true} \
CONFIG.axilite_master_size {32} \
CONFIG.xdma_axi_intf_mm {AXI_Memory_Mapped} \
CONFIG.xdma_rnum_chnl {4} \
CONFIG.xdma_wnum_chnl {4} \
] $xdma_0
# Create instance: xlconstant_0, and set properties
set xlconstant_0 [ create_bd_cell -type ip -vlnv xilinx.com:ip:xlconstant:1.1 xlconstant_0 ]
set_property CONFIG.CONST_VAL {0} $xlconstant_0
# Create interface connections
connect_bd_intf_net -intf_net axi_clock_converter_0_M_AXI [get_bd_intf_pins axi_clock_converter_0/M_AXI] [get_bd_intf_pins firesim_wrapper_0/S_AXI_DMA]
connect_bd_intf_net -intf_net axi_clock_converter_1_M_AXI [get_bd_intf_pins axi_clock_converter_1/M_AXI] [get_bd_intf_pins firesim_wrapper_0/S_AXI_CTRL]
connect_bd_intf_net -intf_net axi_dwidth_converter_0_M_AXI [get_bd_intf_pins axi_dwidth_converter_0/M_AXI] [get_bd_intf_pins ddr4_0/C0_DDR4_S_AXI]
connect_bd_intf_net -intf_net axi_tieoff_master_0_TIEOFF_M_AXI_CTRL_0 [get_bd_intf_pins axi_tieoff_master_0/TIEOFF_M_AXI_CTRL_0] [get_bd_intf_pins ddr4_0/C0_DDR4_S_AXI_CTRL]
connect_bd_intf_net -intf_net ddr4_0_C0_DDR4 [get_bd_intf_ports ddr4_sdram_c0] [get_bd_intf_pins ddr4_0/C0_DDR4]
connect_bd_intf_net -intf_net default_300mhz_clk0_1 [get_bd_intf_ports default_300mhz_clk0] [get_bd_intf_pins ddr4_0/C0_SYS_CLK]
connect_bd_intf_net -intf_net firesim_wrapper_0_M_AXI_DDR0 [get_bd_intf_pins axi_dwidth_converter_0/S_AXI] [get_bd_intf_pins firesim_wrapper_0/M_AXI_DDR0]
connect_bd_intf_net -intf_net pcie_refclk_1 [get_bd_intf_ports pcie_refclk] [get_bd_intf_pins util_ds_buf/CLK_IN_D]
connect_bd_intf_net -intf_net xdma_0_M_AXI [get_bd_intf_pins axi_clock_converter_0/S_AXI] [get_bd_intf_pins xdma_0/M_AXI]
connect_bd_intf_net -intf_net xdma_0_M_AXI_LITE [get_bd_intf_pins axi_clock_converter_1/S_AXI] [get_bd_intf_pins xdma_0/M_AXI_LITE]
connect_bd_intf_net -intf_net xdma_0_pcie_mgt [get_bd_intf_ports pci_express_x16] [get_bd_intf_pins xdma_0/pcie_mgt]
# Create port connections
connect_bd_net -net ddr4_0_c0_ddr4_ui_clk [get_bd_pins axi_dwidth_converter_0/m_axi_aclk] [get_bd_pins clk_wiz_0/clk_in1] [get_bd_pins ddr4_0/c0_ddr4_ui_clk] [get_bd_pins proc_sys_reset_1/slowest_sync_clk]
connect_bd_net -net pcie_perstn_1 [get_bd_ports pcie_perstn] [get_bd_pins xdma_0/sys_rst_n]
connect_bd_net -net proc_sys_reset_0_interconnect_aresetn [get_bd_pins axi_clock_converter_0/m_axi_aresetn] [get_bd_pins axi_clock_converter_1/m_axi_aresetn] [get_bd_pins axi_dwidth_converter_0/s_axi_aresetn] [get_bd_pins firesim_wrapper_0/sys_reset_n] [get_bd_pins proc_sys_reset_0/interconnect_aresetn]
connect_bd_net -net resetn_1 [get_bd_ports resetn] [get_bd_pins proc_sys_reset_0/ext_reset_in] [get_bd_pins proc_sys_reset_1/ext_reset_in] [get_bd_pins resetn_inv_0/Op1]
connect_bd_net -net resetn_inv_0_Res [get_bd_pins clk_wiz_0/reset] [get_bd_pins ddr4_0/sys_rst] [get_bd_pins resetn_inv_0/Res]
connect_bd_net -net rst_ddr4_0_300M_interconnect_aresetn [get_bd_pins axi_dwidth_converter_0/m_axi_aresetn] [get_bd_pins ddr4_0/c0_ddr4_aresetn] [get_bd_pins proc_sys_reset_1/interconnect_aresetn]
connect_bd_net -net sys_clk_30 [get_bd_pins axi_clock_converter_0/m_axi_aclk] [get_bd_pins axi_clock_converter_1/m_axi_aclk] [get_bd_pins axi_dwidth_converter_0/s_axi_aclk] [get_bd_pins clk_wiz_0/clk_out1] [get_bd_pins firesim_wrapper_0/sys_clk_30] [get_bd_pins proc_sys_reset_0/slowest_sync_clk]
connect_bd_net -net util_ds_buf_IBUF_DS_ODIV2 [get_bd_pins util_ds_buf/IBUF_DS_ODIV2] [get_bd_pins xdma_0/sys_clk]
connect_bd_net -net util_ds_buf_IBUF_OUT [get_bd_pins util_ds_buf/IBUF_OUT] [get_bd_pins xdma_0/sys_clk_gt]
connect_bd_net -net xdma_0_axi_aclk [get_bd_pins axi_clock_converter_0/s_axi_aclk] [get_bd_pins axi_clock_converter_1/s_axi_aclk] [get_bd_pins xdma_0/axi_aclk]
connect_bd_net -net xdma_0_axi_aresetn [get_bd_pins axi_clock_converter_0/s_axi_aresetn] [get_bd_pins axi_clock_converter_1/s_axi_aresetn] [get_bd_pins xdma_0/axi_aresetn]
connect_bd_net -net xlconstant_0_dout [get_bd_pins xdma_0/usr_irq_req] [get_bd_pins xlconstant_0/dout]
# Create address segments
# Restore current instance
current_bd_instance $oldCurInst
validate_bd_design
save_bd_design
}
# End of create_root_design()
##################################################################
# MAIN FLOW
##################################################################
create_root_design "" $desired_host_frequency

View File

@ -0,0 +1,118 @@
set ml_start_directive Explore
set ml_max_critical_paths 150
set ml_max_strategies 5
set ml_qor_suggestions ${root_dir}/vivado_proj/ml_qor_suggestions.rqs
set ml_strategy_dir ${root_dir}/vivado_proj/ml_strategies
# Cleanup
foreach path [list ${ml_qor_suggestions} ${ml_strategy_dir}] {
if {[file exists ${path}]} {
file delete -force -- ${path}
}
}
set impl_run [get_runs impl_1]
set WNS -1
set WHS -1
reset_runs ${impl_run}
set_property STEPS.OPT_DESIGN.ARGS.DIRECTIVE ${ml_start_directive} ${impl_run}
set_property STEPS.PLACE_DESIGN.ARGS.DIRECTIVE ${ml_start_directive} ${impl_run}
set_property STEPS.ROUTE_DESIGN.ARGS.DIRECTIVE ${ml_start_directive} ${impl_run}
set_property STEPS.PHYS_OPT_DESIGN.IS_ENABLED true ${impl_run}
set_property STEPS.PHYS_OPT_DESIGN.ARGS.DIRECTIVE ${ml_start_directive} ${impl_run}
set_property STEPS.POST_ROUTE_PHYS_OPT_DESIGN.ARGS.DIRECTIVE ${ml_start_directive} ${impl_run}
launch_runs ${impl_run} -to_step route_design -jobs ${jobs}
wait_on_run ${impl_run}
if {[get_property PROGRESS [get_runs ${impl_run}]] != "100%"} {
puts "ERROR: implementation failed"
exit 1
}
set WNS [get_property STATS.WNS [get_runs ${impl_run}]]
set WHS [get_property STATS.WHS [get_runs ${impl_run}]]
if {$WNS < 0 || $WHS < 0} {
set ml_tcls [list]
open_run ${impl_run}
report_qor_suggestions -max_paths ${ml_max_critical_paths} -max_strategies ${ml_max_strategies} -no_split -quiet
write_qor_suggestions -force -strategy_dir ${ml_strategy_dir} ${ml_qor_suggestions}
close_design
for {set i 1} {$i <= ${ml_max_strategies}} {incr i} {
set tclFile ${root_dir}/vivado_proj/ml_strategies/impl_1Project_MLStrategyCreateRun${i}.tcl
if {[file exists ${tclFile}]} {
lappend ml_tcls ${tclFile}
}
}
if {([llength ${ml_tcls}] == 0) && ([file exists ${ml_qor_suggestions}])} {
puts "INFO: no ML strategies were found, using base qor suggestions"
add_files -force -fileset utils_1 ${ml_qor_suggestions}
reset_runs ${impl_run}
set_property RQS_FILES ${ml_qor_suggestions} ${impl_run}
set_property STEPS.OPT_DESIGN.ARGS.DIRECTIVE RQS ${impl_run}
set_property STEPS.PLACE_DESIGN.ARGS.DIRECTIVE RQS ${impl_run}
set_property STEPS.PHYS_OPT_DESIGN.IS_ENABLED true ${impl_run}
set_property STEPS.PHYS_OPT_DESIGN.ARGS.DIRECTIVE RQS ${impl_run}
set_property STEPS.ROUTE_DESIGN.ARGS.DIRECTIVE RQS ${impl_run}
launch_runs ${impl_run} -to_step route_design -jobs ${jobs}
wait_on_run ${impl_run}
if {[get_property PROGRESS [get_runs ${impl_run}]] != "100%"} {
puts "ERROR: implementation failed"
exit 1
}
set WNS [get_property STATS.WNS [get_runs ${impl_run}]]
set WHS [get_property STATS.WHS [get_runs ${impl_run}]]
} else {
foreach tclFile ${ml_tcls} {
puts "INFO: using ML strategy from ${tclFile}"
source ${tclFile}
set impl_run ${ml_strategy_run}
launch_runs ${impl_run} -to_step route_design -jobs ${jobs}
wait_on_run ${impl_run}
if {[get_property PROGRESS [get_runs ${impl_run}]] != "100%"} {
puts "ERROR: implementation failed"
exit 1
}
set WNS [get_property STATS.WNS [get_runs ${impl_run}]]
set WHS [get_property STATS.WHS [get_runs ${impl_run}]]
if {$WNS >= 0 && $WHS >= 0} {
break
}
}
}
if {$WNS < 0 || $WHS < 0} {
puts "INFO: no more ML strategies available"
}
}
if {$WNS < 0 || $WHS < 0} {
puts "ERROR: did not meet timing!"
exit 1
}
launch_runs ${impl_run} -next_step -jobs ${jobs}
wait_on_run ${impl_run}
if {[get_property PROGRESS [get_runs ${impl_run}]] != "100%"} {
puts "ERROR: implementation failed"
exit 1
}
file copy -force ${root_dir}/vivado_proj/firesim.runs/${impl_run}/design_1_wrapper.bit ${root_dir}/vivado_proj/firesim.bit

View File

@ -0,0 +1,75 @@
variable impl_run [get_runs impl_1]
variable WNS -1
variable WHS -1
set idr_start_directive Explore
set impl_run [get_runs impl_1]
reset_runs ${impl_run}
set_property STEPS.OPT_DESIGN.ARGS.DIRECTIVE ${idr_start_directive} ${impl_run}
set_property STEPS.PLACE_DESIGN.ARGS.DIRECTIVE ${idr_start_directive} ${impl_run}
set_property STEPS.ROUTE_DESIGN.ARGS.DIRECTIVE ${idr_start_directive} ${impl_run}
set_property STEPS.PHYS_OPT_DESIGN.IS_ENABLED true ${impl_run}
set_property STEPS.PHYS_OPT_DESIGN.ARGS.DIRECTIVE ${idr_start_directive} ${impl_run}
set_property STEPS.POST_ROUTE_PHYS_OPT_DESIGN.ARGS.DIRECTIVE ${idr_start_directive} ${impl_run}
launch_runs ${impl_run} -to_step route_design -jobs ${jobs}
wait_on_run ${impl_run}
if {[get_property PROGRESS ${impl_run}] != "100%"} {
puts "ERROR: first normal implementation failed"
exit 1
}
set WNS [get_property STATS.WNS ${impl_run}]
set WHS [get_property STATS.WHS ${impl_run}]
if {$WNS >= 0 && $WHS >= 0} {
launch_runs ${impl_run} -next_step -jobs ${jobs}
wait_on_run ${impl_run}
if {[get_property PROGRESS ${impl_run}] != "100%"} {
puts "ERROR: bitstream generation failed"
exit 1
}
} else {
# Intelligent Design Runs (IDR) Flow
create_run -flow {Vivado IDR Flow 2022} -parent_run synth_1 idr_impl_1
set_property REFERENCE_RUN ${impl_run} [get_runs idr_impl_1]
set impl_run [get_runs idr_impl_1]
launch_runs ${impl_run} -jobs ${jobs}
wait_on_run ${impl_run}
if {[get_property PROGRESS ${impl_run}] != "100%"} {
puts "ERROR: idr implementation failed"
exit 1
}
# We need to figure out which IDR implementation run was successful
foreach sub_impl_run [get_runs ${impl_run}*] {
if {[get_property PROGRESS ${sub_impl_run}] == "100%"} {
set WNS [get_property STATS.WNS ${sub_impl_run}]
set WHS [get_property STATS.WHS ${sub_impl_run}]
if {$WNS >= 0 && $WHS >= 0} {
puts "INFO: timing met in idr run ${sub_impl_run}"
break
}
}
}
if {$WNS < 0 || $WHS < 0} {
puts "ERROR: did not meet timing!"
exit 1
}
puts "INFO: generate bitstream"
launch_runs ${impl_run} -to_step write_bitstream -jobs ${jobs}
wait_on_run ${impl_run}
if {[get_property PROGRESS ${impl_run}] != "100%"} {
puts "ERROR: bitstream generation failed"
exit 1
}
}
file copy -force ${root_dir}/vivado_proj/firesim.runs/${impl_run}/design_1_wrapper.bit ${root_dir}/vivado_proj/firesim.bit

View File

@ -0,0 +1,30 @@
variable impl_run [get_runs impl_1]
variable WNS -1
variable WHS -1
reset_runs ${impl_run}
launch_runs ${impl_run} -to_step route_design -jobs ${jobs}
wait_on_run ${impl_run}
if {[get_property PROGRESS ${impl_run}] != "100%"} {
puts "ERROR: implementation failed"
exit 1
}
set WNS [get_property STATS.WNS ${impl_run}]
set WHS [get_property STATS.WHS ${impl_run}]
if {$WNS < 0 || $WHS < 0} {
puts "ERROR: did not meet timing!"
exit 1
}
launch_runs ${impl_run} -next_step -jobs ${jobs}
wait_on_run ${impl_run}
if {[get_property PROGRESS ${impl_run}] != "100%"} {
puts "ERROR: implementation failed"
exit 1
}
file copy -force ${root_dir}/vivado_proj/firesim.runs/${impl_run}/design_1_wrapper.bit ${root_dir}/vivado_proj/firesim.bit

View File

@ -0,0 +1,75 @@
variable impl_run [get_runs impl_1]
variable WNS -1
variable WHS -1
set idr_start_directive Explore
set impl_run [get_runs impl_1]
reset_runs ${impl_run}
set_property STEPS.OPT_DESIGN.ARGS.DIRECTIVE ${idr_start_directive} ${impl_run}
set_property STEPS.PLACE_DESIGN.ARGS.DIRECTIVE ${idr_start_directive} ${impl_run}
set_property STEPS.ROUTE_DESIGN.ARGS.DIRECTIVE ${idr_start_directive} ${impl_run}
set_property STEPS.PHYS_OPT_DESIGN.IS_ENABLED true ${impl_run}
set_property STEPS.PHYS_OPT_DESIGN.ARGS.DIRECTIVE ${idr_start_directive} ${impl_run}
set_property STEPS.POST_ROUTE_PHYS_OPT_DESIGN.ARGS.DIRECTIVE ${idr_start_directive} ${impl_run}
launch_runs ${impl_run} -to_step route_design -jobs ${jobs}
wait_on_run ${impl_run}
if {[get_property PROGRESS ${impl_run}] != "100%"} {
puts "ERROR: first normal implementation failed"
exit 1
}
set WNS [get_property STATS.WNS ${impl_run}]
set WHS [get_property STATS.WHS ${impl_run}]
if {$WNS >= 0 && $WHS >= 0} {
launch_runs ${impl_run} -next_step -jobs ${jobs}
wait_on_run ${impl_run}
if {[get_property PROGRESS ${impl_run}] != "100%"} {
puts "ERROR: bitstream generation failed"
exit 1
}
} else {
# Intelligent Design Runs (IDR) Flow
create_run -flow {Vivado IDR Flow 2022} -parent_run synth_1 idr_impl_1
set_property REFERENCE_RUN ${impl_run} [get_runs idr_impl_1]
set impl_run [get_runs idr_impl_1]
launch_runs ${impl_run} -jobs ${jobs}
wait_on_run ${impl_run}
if {[get_property PROGRESS ${impl_run}] != "100%"} {
puts "ERROR: idr implementation failed"
exit 1
}
# We need to figure out which IDR implementation run was successful
foreach sub_impl_run [get_runs ${impl_run}*] {
if {[get_property PROGRESS ${sub_impl_run}] == "100%"} {
set WNS [get_property STATS.WNS ${sub_impl_run}]
set WHS [get_property STATS.WHS ${sub_impl_run}]
if {$WNS >= 0 && $WHS >= 0} {
puts "INFO: timing met in idr run ${sub_impl_run}"
break
}
}
}
if {$WNS < 0 || $WHS < 0} {
puts "ERROR: did not meet timing!"
exit 1
}
puts "INFO: generate bitstream"
launch_runs ${impl_run} -to_step write_bitstream -jobs ${jobs}
wait_on_run ${impl_run}
if {[get_property PROGRESS ${impl_run}] != "100%"} {
puts "ERROR: bitstream generation failed"
exit 1
}
}
file copy -force ${root_dir}/vivado_proj/firesim.runs/${impl_run}/design_1_wrapper.bit ${root_dir}/vivado_proj/firesim.bit

View File

@ -0,0 +1,118 @@
set ml_start_directive Explore
set ml_max_critical_paths 150
set ml_max_strategies 5
set ml_qor_suggestions ${root_dir}/vivado_proj/ml_qor_suggestions.rqs
set ml_strategy_dir ${root_dir}/vivado_proj/ml_strategies
# Cleanup
foreach path [list ${ml_qor_suggestions} ${ml_strategy_dir}] {
if {[file exists ${path}]} {
file delete -force -- ${path}
}
}
set impl_run [get_runs impl_1]
set WNS -1
set WHS -1
reset_runs ${impl_run}
set_property STEPS.OPT_DESIGN.ARGS.DIRECTIVE ${ml_start_directive} ${impl_run}
set_property STEPS.PLACE_DESIGN.ARGS.DIRECTIVE ${ml_start_directive} ${impl_run}
set_property STEPS.ROUTE_DESIGN.ARGS.DIRECTIVE ${ml_start_directive} ${impl_run}
set_property STEPS.PHYS_OPT_DESIGN.IS_ENABLED true ${impl_run}
set_property STEPS.PHYS_OPT_DESIGN.ARGS.DIRECTIVE ${ml_start_directive} ${impl_run}
set_property STEPS.POST_ROUTE_PHYS_OPT_DESIGN.ARGS.DIRECTIVE ${ml_start_directive} ${impl_run}
launch_runs ${impl_run} -to_step route_design -jobs ${jobs}
wait_on_run ${impl_run}
if {[get_property PROGRESS [get_runs ${impl_run}]] != "100%"} {
puts "ERROR: implementation failed"
exit 1
}
set WNS [get_property STATS.WNS [get_runs ${impl_run}]]
set WHS [get_property STATS.WHS [get_runs ${impl_run}]]
if {$WNS < 0 || $WHS < 0} {
set ml_tcls [list]
open_run ${impl_run}
report_qor_suggestions -max_paths ${ml_max_critical_paths} -max_strategies ${ml_max_strategies} -no_split -quiet
write_qor_suggestions -force -strategy_dir ${ml_strategy_dir} ${ml_qor_suggestions}
close_design
for {set i 1} {$i <= ${ml_max_strategies}} {incr i} {
set tclFile ${root_dir}/vivado_proj/ml_strategies/impl_1Project_MLStrategyCreateRun${i}.tcl
if {[file exists ${tclFile}]} {
lappend ml_tcls ${tclFile}
}
}
if {([llength ${ml_tcls}] == 0) && ([file exists ${ml_qor_suggestions}])} {
puts "INFO: no ML strategies were found, using base qor suggestions"
add_files -force -fileset utils_1 ${ml_qor_suggestions}
reset_runs ${impl_run}
set_property RQS_FILES ${ml_qor_suggestions} ${impl_run}
set_property STEPS.OPT_DESIGN.ARGS.DIRECTIVE RQS ${impl_run}
set_property STEPS.PLACE_DESIGN.ARGS.DIRECTIVE RQS ${impl_run}
set_property STEPS.PHYS_OPT_DESIGN.IS_ENABLED true ${impl_run}
set_property STEPS.PHYS_OPT_DESIGN.ARGS.DIRECTIVE RQS ${impl_run}
set_property STEPS.ROUTE_DESIGN.ARGS.DIRECTIVE RQS ${impl_run}
launch_runs ${impl_run} -to_step route_design -jobs ${jobs}
wait_on_run ${impl_run}
if {[get_property PROGRESS [get_runs ${impl_run}]] != "100%"} {
puts "ERROR: implementation failed"
exit 1
}
set WNS [get_property STATS.WNS [get_runs ${impl_run}]]
set WHS [get_property STATS.WHS [get_runs ${impl_run}]]
} else {
foreach tclFile ${ml_tcls} {
puts "INFO: using ML strategy from ${tclFile}"
source ${tclFile}
set impl_run ${ml_strategy_run}
launch_runs ${impl_run} -to_step route_design -jobs ${jobs}
wait_on_run ${impl_run}
if {[get_property PROGRESS [get_runs ${impl_run}]] != "100%"} {
puts "ERROR: implementation failed"
exit 1
}
set WNS [get_property STATS.WNS [get_runs ${impl_run}]]
set WHS [get_property STATS.WHS [get_runs ${impl_run}]]
if {$WNS >= 0 && $WHS >= 0} {
break
}
}
}
if {$WNS < 0 || $WHS < 0} {
puts "INFO: no more ML strategies available"
}
}
if {$WNS < 0 || $WHS < 0} {
puts "ERROR: did not meet timing!"
exit 1
}
launch_runs ${impl_run} -next_step -jobs ${jobs}
wait_on_run ${impl_run}
if {[get_property PROGRESS [get_runs ${impl_run}]] != "100%"} {
puts "ERROR: implementation failed"
exit 1
}
file copy -force ${root_dir}/vivado_proj/firesim.runs/${impl_run}/design_1_wrapper.bit ${root_dir}/vivado_proj/firesim.bit

View File

@ -0,0 +1,102 @@
set root_dir [pwd]
set vivado_version [version -short]
set ifrequency [lindex $argv 0]
set istrategy [lindex $argv 1]
set iboard [lindex $argv 2]
proc retrieveVersionedFile {filename version} {
set first [file rootname $filename]
set last [file extension $filename]
if {[file exists ${first}_${version}${last}]} {
return ${first}_${version}${last}
}
return $filename
}
puts $vivado_version
if {![file exists [set sourceFile [retrieveVersionedFile ${root_dir}/scripts/platform_env.tcl $vivado_version]]]} {
puts "ERROR: could not find $sourceFile"
exit 1
}
source $sourceFile
if {![file exists [set sourceFile [retrieveVersionedFile ${root_dir}/scripts/${iboard}.tcl $vivado_version]]]} {
puts "ERROR: could not find $sourceFile"
exit 1
}
source $sourceFile
# Cleanup
foreach path [list ${root_dir}/vivado_proj/firesim.bit] {
if {[file exists ${path}]} {
file delete -force -- ${path}
}
}
create_project -force firesim ${root_dir}/vivado_proj -part $part
set_property board_part $board_part [current_project]
# Loading all the verilog files
foreach addFile [list ${root_dir}/design/axi_tieoff_master.v ${root_dir}/design/firesim_wrapper.v ${root_dir}/design/FireSim-generated.sv ${root_dir}/design/FireSim-generated.defines.vh] {
set addFile [retrieveVersionedFile $addFile $vivado_version]
if {![file exists $addFile]} {
puts "ERROR: could not find file $addFile"
exit 1
}
add_files $addFile
if {[file extension $addFile] == ".vh"} {
set_property IS_GLOBAL_INCLUDE 1 [get_files $addFile]
}
}
set desired_host_frequency $ifrequency
set strategy $istrategy
# Loading create_bd.tcl
if {![file exists [set sourceFile [retrieveVersionedFile ${root_dir}/scripts/create_bd_${vivado_version}.tcl $vivado_version]]]} {
puts "ERROR: could not find $sourceFile"
exit 1
}
source $sourceFile
# Making wrapper
make_wrapper -files [get_files ${root_dir}/vivado_proj/firesim.srcs/sources_1/bd/design_1/design_1.bd] -top
add_files -norecurse ${root_dir}/vivado_proj/firesim.gen/sources_1/bd/design_1/hdl/design_1_wrapper.v
# Adding additional constraint sets
if {[file exists [set constrFile [retrieveVersionedFile ${root_dir}/design/FireSim-generated.synthesis.xdc $vivado_version]]]} {
create_fileset -constrset synth
add_files -fileset synth -norecurse $constrFile
}
if {[file exists [set constrFile [retrieveVersionedFile ${root_dir}/design/FireSim-generated.implementation.xdc $vivado_version]]]} {
create_fileset -constrset impl
add_files -fileset impl -norecurse $constrFile
}
update_compile_order -fileset sources_1
set_property top design_1_wrapper [current_fileset]
update_compile_order -fileset sources_1
if {[llength [get_filesets -quiet synth]]} {
set_property constrset synth [get_runs synth_1]
}
if {[llength [get_filesets -quiet impl]]} {
set_property constrset impl [get_runs impl_1]
}
foreach sourceFile [list ${root_dir}/scripts/synthesis.tcl ${root_dir}/scripts/implementation_${vivado_version}.tcl] {
set sourceFile [retrieveVersionedFile $sourceFile $vivado_version]
if {![file exists $sourceFile]} {
puts "ERROR: could not find $sourceFile"
exit 1
}
source $sourceFile
}
puts "Done!"
exit 0

View File

@ -0,0 +1 @@
set jobs 12

View File

@ -0,0 +1,10 @@
variable synth_run [get_runs synth_1]
reset_runs ${synth_run}
launch_runs ${synth_run} -jobs ${jobs}
wait_on_run ${synth_run}
if {[get_property PROGRESS ${synth_run}] != "100%"} {
puts "ERROR: synthesis failed"
exit 1
}

View File

@ -0,0 +1 @@
../cl_firesim/scripts/au250.tcl

View File

@ -0,0 +1,194 @@
#!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
import pwd
import re
from pathlib import Path
from typing import Optional, Dict, Any, List
pciDevicesPath = Path('/sys/bus/pci/devices')
def get_bridge_bdf(id: str) -> str:
for entry in pciDevicesPath.iterdir():
if re.match('^0000:' + re.escape(id) + ':[a-fA-F0-9]{2}\.[0-7]$', entry.name):
bridgePath = entry.resolve().absolute().parent
if bridgePath.exists():
return bridgePath.name
print(":ERROR: Unable to obtain bridge BDF")
sys.exit(1)
def get_fpga_bdfs(id: str) -> List[str]:
result = []
for entry in pciDevicesPath.iterdir():
if re.match('^0000:' + re.escape(id) + ':[a-fA-F0-9]{2}\.[0-7]$', entry.name):
result.append(entry.name)
return result
def get_fpga_devs(id) -> List[Path]:
def readUevent(path: Path) -> Dict[Any, Any]:
if not (path / 'uevent').exists():
return {}
return { entry[0]: entry[1] for entry in [line.strip('\n\r ').split('=') for line in open(f'{path}/uevent', 'r').readlines()] if len(entry) >= 2 }
def xdmaResolver(path: Path) -> List[Path]:
xdmaDevs = []
for f in ['resource', 'resource0', 'resource1']:
rsrcPath = (path / f)
if rsrcPath.exists():
xdmaDevs.append(rsrcPath)
xdmaPath = (path / 'xdma')
if xdmaPath.is_dir():
ueventEntries = [readUevent(xdmaPath / entry.name) for entry in xdmaPath.iterdir() if (xdmaPath / entry.name).is_dir()]
xdmaDevs.extend([Path('/dev') / uevent['DEVNAME'] for uevent in ueventEntries if 'DEVNAME' in uevent and (Path('/dev') / uevent['DEVNAME']).exists()])
return xdmaDevs
resolvers = {
'xdma' : xdmaResolver
}
returnDevs = []
fpgaDevices = get_fpga_bdfs(id)
for fpgaDev in fpgaDevices:
path = pciDevicesPath / fpgaDev
fpgaDevUevent = readUevent(path)
if 'DRIVER' not in fpgaDevUevent:
print(":WARNING: Verify that 'xdma' driver is loaded")
continue
if fpgaDevUevent['DRIVER'] not in resolvers:
continue
returnDevs.extend(resolvers[fpgaDevUevent['DRIVER']](path.resolve()))
return returnDevs
# clear SERR bit in command register
# https://support.xilinx.com/s/question/0D52E00006hpjPHSAY/dell-r720-poweredge-server-reboots-on-fpga-reprogramming?language=en_US
def clear_serr(id: str) -> None:
bridgeBDF = get_bridge_bdf(id)
run = subprocess.run(['setpci', '-s', bridgeBDF, 'COMMAND=0000:0100'])
if run.returncode != 0:
print(":ERROR: Unable to clear SERR bit")
sys.exit(1)
# clear fatal error reporting enable bit in the device control register
# https://support.xilinx.com/s/question/0D52E00006hpjPHSAY/dell-r720-poweredge-server-reboots-on-fpga-reprogramming?language=en_US
def clear_fatal_error_reporting(id: str) -> None:
bridgeBDF = get_bridge_bdf(id)
run = subprocess.run(['setpci', '-s', bridgeBDF, 'CAP_EXP+8.w=0000:0004'])
if run.returncode != 0:
print(":ERROR: Unable to clear SERR bit")
sys.exit(1)
def write_to_linux_device_path(path: Path, data: str = '1\n') -> None:
try:
open(path, 'w').write(data)
except:
print(f":ERROR: Cannot write to {path} value: {data}")
sys.exit(1)
def remove(id: str) -> None:
bridgeBDF = get_bridge_bdf(id)
deviceBDFs = get_fpga_bdfs(id)
for deviceBDF in deviceBDFs:
removePath = pciDevicesPath / bridgeBDF / deviceBDF / 'remove'
if removePath.exists():
write_to_linux_device_path(removePath)
def rescan(id: str) -> None:
bridgeBDF = get_bridge_bdf(id)
if bridgeBDF is not None:
rescanPath = pciDevicesPath / bridgeBDF / 'rescan'
write_to_linux_device_path(rescanPath)
else:
write_to_linux_device_path('/sys/bus/pci/rescan')
# enable memory mapped transfers for the fpga
# https://support.xilinx.com/s/question/0D52E00006iHlNoSAK/lspci-reports-bar-0-disabled?language=en_US
def enable_memmapped_transfers(id: str) -> None:
deviceBDFs = get_fpga_bdfs(id)
for deviceBDF in deviceBDFs:
run = subprocess.run(['setpci', '-s', deviceBDF, 'COMMAND=0x02'])
if run.returncode != 0:
print(f":ERROR: Unable to enable memmapped transfers on {deviceBDF}")
sys.exit(1)
def program_fpga(serial: str, board: str, bitstream: str) -> None:
print(":WARNING: This only can target the 1st FPGA on a machine currently...")
pVivado = subprocess.Popen(
[
'vivado',
'-mode', 'tcl',
'-nolog', '-nojournal', '-notrace',
'-source', scriptPath / 'program_fpga.tcl',
'-tclargs',
'-board', board,
'-bitstream_path', bitstream,
],
stdin=subprocess.DEVNULL
)
pVivado.wait()
if pVivado.returncode != 0:
print(":ERROR: Unable to flash FPGA")
sys.exit(1)
def get_serial_from_bdf(id: str) -> str:
deviceBDFs = get_fpga_bdfs(parsed_args.bus_id)
if len(deviceBDFs) == 0:
print(f":ERROR: Unable to obtain Extended Device BDF for {parsed_args.bus_id}")
sys.exit(1)
return "TODO"
def main(args: List[str]) -> int:
parser = argparse.ArgumentParser(description="Program a Xilinx XDMA-enabled FPGA")
megroup = parser.add_mutually_exclusive_group(required=True)
megroup.add_argument("--bus_id", help="Bus number of FPGA to flash (i.e. ****:<THIS>:**.*)")
megroup.add_argument("--serial_no", help="Serial number of FPGA to flash (i.e. what 'get_hw_target' shows in Vivado)")
parser.add_argument("--bitstream", help="Bitstream to flash onto FPGA", required=True, type=Path)
parser.add_argument("--board", help="FPGA board to flash", required=True)
parsed_args = parser.parse_args(args)
scriptPath = Path(__file__).resolve().parent
eUserId = os.geteuid()
sudoUserId = os.getenv('SUDO_UID')
isAdmin = (eUserId == 0) and (sudoUserId is None)
userId = eUserId if sudoUserId is None else int(sudoUserId)
if not isAdmin:
print(":ERROR: Requires running script with 'sudo'")
sys.exit(1)
if not parsed_args.bitstream.is_file() or not parsed_args.bitstream.exists():
print(f":ERROR: Invalid bitstream: {parsed_args.bitstream}")
sys.exit(1)
else:
parsed_args.bitstream = parsed_args.bitstream.absolute()
if parsed_args.bus_id:
serialNumber = get_serial_from_bdf(id)
clear_serr(parsed_args.bus_id)
clear_fatal_error_reporting(parsed_args.bus_id)
remove(parsed_args.bus_id)
program_fpga(serialNumber, parsed_args.board, parsed_args.bitstream)
rescan(parsed_args.bus_id)
enable_memmapped_transfers(parsed_args.bus_id)
print(f"Successfully programmed FPGA {parsed_args.bus_id} with {parsed_args.bitstream}")
if parsed_args.serial_no:
program_fpga(parsed_args.serial_no, parsed_args.board, parsed_args.bitstream)
print(f"Successfully programmed FPGA {parsed_args.serial_no} with {parsed_args.bitstream}")
print(":WARNING: Please warm reboot the machine")
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))

View File

@ -0,0 +1,58 @@
#!/usr/bin/env bash
# Adapted from https://github.com/Xilinx/open-nic-shell
echo $#
if [[ $# -le 1 ]] || [[ -z EXTENDED_DEVICE_BDF1 ]] || [[ -z $XILINX_VIVADO ]]; then
echo "Usage: EXTENDED_DEVICE_BDF1=<EXTENDED_DEVICE_BDF1> program_fpga.sh BITSTREAM_PATH BOARD [PROBES_PATH]"
echo "Please export EXTENDED_DEVICE_BDF1 and [EXTENDED_DEVICE_BDF2 (if needed for 2 port boards)]"
echo "Example: EXTENDED_DEVICE_BDF1=<0000:86:00.0> program_fpga.sh BITSTREAM_PATH BOARD [PROBES_PATH]"
echo "Please ensure vivado is loaded into system path."
exit 1
fi
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
set -Eeuo pipefail
set -x
bridge_bdf=""
bitstream_path=$1
board=$2
probes_path="${3:-}"
# ^^ Probes are used for specifying hardware debugger symbols.
# Infer bridge
if [ -e "/sys/bus/pci/devices/$EXTENDED_DEVICE_BDF1" ]; then
bridge_bdf=$(basename $(dirname $(readlink "/sys/bus/pci/devices/$EXTENDED_DEVICE_BDF1")))
# Both devices will be on the same bridge as they are on the same FPGA board.
fi
# Remove
if [[ $bridge_bdf != "" ]]; then
echo 1 | sudo tee "/sys/bus/pci/devices/${bridge_bdf}/${EXTENDED_DEVICE_BDF1}/remove" > /dev/null
if [[ -n "${EXTENDED_DEVICE_BDF2:-}" ]] && [[ -e "/sys/bus/pci/devices/${bridge_bdf}/${EXTENDED_DEVICE_BDF2}" ]]; then
echo 1 | sudo tee "/sys/bus/pci/devices/${bridge_bdf}/${EXTENDED_DEVICE_BDF2}/remove" > /dev/null
fi
else
echo "Could not find bridge_bdf for the device $EXTENDED_DEVICE_BDF1"
echo "If remove was called on the device already, then manually set bridge_bdf here and comment 'exit 1'."
exit 1
fi
# Program fpga
vivado -mode tcl -source $SCRIPT_DIR/program_fpga.tcl \
-tclargs -board $board \
-bitstream_path $bitstream_path \
-probes_path $probes_path
# Rescan
echo 1 | sudo tee "/sys/bus/pci/devices/${bridge_bdf}/rescan" > /dev/null
sudo setpci -s $EXTENDED_DEVICE_BDF1 COMMAND=0x02
if [[ -n "${EXTENDED_DEVICE_BDF2:-}" ]]; then
sudo setpci -s $EXTENDED_DEVICE_BDF2 COMMAND=0x02
fi
echo "program_fpga.sh completed"
echo "Warm reboot machine if the FPGA wasn't initially setup with an XDMA bitstream."

View File

@ -0,0 +1,64 @@
# Adapted from https://github.com/Xilinx/open-nic-shell
# Directory variables
set script_path [file normalize [info script]]
set script_dir [file dirname $script_path]
set root_dir [file dirname $script_dir]
# Loading options
# bitstream_path Path to the bitstream
# board Board name
array set options {
-bitstream_path ""
-probes_path ""
-board au50
}
# Expect arguments in the form of `-argument value`
for {set i 0} {$i < $argc} {incr i 2} {
set arg [lindex $argv $i]
set val [lindex $argv [expr $i+1]]
if {[info exists options($arg)]} {
set options($arg) $val
puts "Set option $arg to $val"
} else {
puts "Skip unknown argument $arg and its value $val"
}
}
# Settings based on defaults or passed in values
foreach {key value} [array get options] {
set [string range $key 1 end] $value
}
source ${script_dir}/${board}.tcl
puts "Program file: $options(-bitstream_path)"
puts "Probes file: $options(-probes_path)"
puts "Board: $options(-board)"
puts "HW device: $hw_device"
open_hw_manager
connect_hw_server -allow_non_jtag
## by default vivado opens a default hw target
#close_hw_target
# note: helps view amount of fpgas
get_hw_targets
# TODO:
# when no FPGA is programmed
# can use id to index into get_hw_targets and program that specific FPGA
# when you notice a PCI-ID associated w/ the FPGA
# TODO: how do you determine the ID of the FPGA from the PCI-BDF
open_hw_target
current_hw_device [get_hw_devices $hw_device]
refresh_hw_device -update_hw_probes false [lindex [get_hw_devices $hw_device] 0]
set_property PROBES.FILE ${options(-probes_path)} [get_hw_devices $hw_device]
set_property FULL_PROBES.FILE ${options(-probes_path)} [get_hw_devices $hw_device]
set_property PROGRAM.FILE ${options(-bitstream_path)} [get_hw_devices $hw_device]
program_hw_devices [get_hw_devices $hw_device]
refresh_hw_device [lindex [get_hw_devices $hw_device] 0]
exit

Some files were not shown because too many files have changed in this diff Show More