Merge remote-tracking branch 'origin/main' into noclone

This commit is contained in:
Jerry Zhao 2023-07-09 23:28:55 -07:00
commit 259c723ffe
90 changed files with 2166 additions and 1472 deletions

View File

@ -26,7 +26,7 @@ to use this instance as a GH-A self-hosted runner (see https://docs.github.com/e
Running FPGA-related Tasks
--------------------------
CI now includes the capability to run FPGA-simulations on specific PRs. This requires that you tag your PR on creation with the tag `ci:fpga-deploy`. Adding the tag after the PR is created will not run the FPGA jobs without a resynchronization event (e.g., closing + reopening the PR, adding a new commit, or rebasing the branch).
CI now includes the capability to run FPGA-simulations on specific PRs. This requires that you tag your PR on creation with the tag `ci:fpga-deploy`. Adding the tag after the PR is created will not run the FPGA jobs without a resynchronization event (e.g., closing + reopening the PR, adding a new commit, or rebasing the branch).
Debugging Failures
------------------
@ -44,7 +44,7 @@ If the instance is stopped, then you must request a AWS IAM user account from th
Prior CI Jobs for Pull Requests
------------------------------
The default behavior is that a new commit to a PR will cancel any existing workflows that are still running. This is to save resources and is done by the `cancel-prior-workflows` job. If you wish to
The default behavior is that a new commit to a PR will cancel any existing workflows that are still running. This is to save resources and is done by the `cancel-prior-workflows` job. If you wish to
allow all prior workflows to keep running, add the `ci:persist-prior-workflows` tag to your PR. Please use this tag sparingly, and with caution.
GitHub Secrets
@ -63,4 +63,4 @@ GitHub Secrets
* **FIRESIM_PEM**: Used by the manager on CI manager instances and VMs
* **FIRESIM_PEM_PUBLIC**: Public key of the above secret, used to setup the key in Azure
* **FIRESIM_REPO_DEP_KEY**: Used to push scala doc to GH pages
* **GH_A_PERSONAL_ACCESS_TOKEN**: Used to dynamically register and deregister GitHub Actions runners. See `https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token`, and enable the `workflow` (Update GitHub Action workflows) setting.
* **BARTENDER_PERSONAL_ACCESS_TOKEN**: Used to dynamically register and deregister GitHub Actions runners. See `https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token`, and enable the `workflow` (Update GitHub Action workflows) setting.

View File

@ -11,7 +11,7 @@ def run_docs_generated_components_check():
with cd(manager_fsim_dir), prefix('source sourceme-manager.sh'):
with prefix("cd deploy"):
run("cat config_runtime.yaml")
path = 'docs/Getting-Started-Guides/AWS-EC2-F1-Tutorial/Running-Simulations-Tutorial/DOCS_EXAMPLE_config_runtime.yaml'
path = 'docs/Getting-Started-Guides/AWS-EC2-F1-Getting-Started/Running-Simulations/DOCS_EXAMPLE_config_runtime.yaml'
run(f"cat ../{path}")
run(f"diff config_runtime.yaml ../{path}")
run("firesim --help")

View File

@ -19,7 +19,7 @@ def run_docs_generated_components_check():
with prefix('source sourceme-manager.sh --skip-ssh-setup'):
with prefix("cd deploy"):
run("cat config_runtime.yaml")
path = "docs/Getting-Started-Guides/On-Premises-FPGA-Tutorial/Running-Simulations/DOCS_EXAMPLE_config_runtime.yaml"
path = "docs/Getting-Started-Guides/On-Premises-FPGA-Getting-Started/Running-Simulations/DOCS_EXAMPLE_config_runtime.yaml"
run(f"cat ../{path}")
run(f"diff config_runtime.yaml ../{path}")

View File

@ -15,7 +15,11 @@ gha_runs_api_url = f"{gha_api_url}/runs"
gha_workflow_api_url = f"{gha_runs_api_url}/{ci_env['GITHUB_RUN_ID']}"
def get_header(gh_token: str) -> Dict[str, str]:
return {"Authorization": f"token {gh_token.strip()}", "Accept": "application/vnd.github+json"}
return {
"Authorization": f"token {gh_token.strip()}",
"Accept": "application/vnd.github+json",
"User-Agent": "bar-tender",
}
def get_runners(gh_token: str) -> List[Dict[str, Any]]:
r = requests.get(gha_runners_api_url, headers=get_header(gh_token))

View File

@ -9,7 +9,7 @@ defaults:
shell: bash -leo pipefail {0}
env:
PERSONAL_ACCESS_TOKEN: ${{ secrets.GH_A_PERSONAL_ACCESS_TOKEN }}
PERSONAL_ACCESS_TOKEN: ${{ secrets.BARTENDER_PERSONAL_ACCESS_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View File

@ -24,7 +24,7 @@ env:
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
FIRESIM_PEM: ${{ secrets.FIRESIM_PEM }}
FIRESIM-REPO-DEP-KEY: ${{ secrets.FIRESIM_REPO_DEP_KEY }}
FIRESIM-REPO-DEP-KEY: ${{ secrets.BARTENDER_PRIVATE_SSH_KEY }}
MANAGER_FIRESIM_LOCATION: "~/firesim"
LANG: "en_US.UTF-8" # required by SBT when it sees boost directories
LANGUAGE: "en_US:en"

View File

@ -12,7 +12,7 @@ defaults:
shell: bash -leo pipefail {0}
env:
PERSONAL_ACCESS_TOKEN: ${{ secrets.GH_A_PERSONAL_ACCESS_TOKEN }}
PERSONAL_ACCESS_TOKEN: ${{ secrets.BARTENDER_PERSONAL_ACCESS_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@ -123,7 +123,7 @@ jobs:
uses: ./.github/actions/change-workflow-instance-states
with:
new-state: terminate
github-token: ${{ secrets.GH_A_PERSONAL_ACCESS_TOKEN }}
github-token: ${{ env.PERSONAL_ACCESS_TOKEN }}
setup-manager:
name: setup-manager
@ -149,7 +149,7 @@ jobs:
uses: ./.github/actions/change-workflow-instance-states
with:
new-state: terminate
github-token: ${{ secrets.GH_A_PERSONAL_ACCESS_TOKEN }}
github-token: ${{ env.PERSONAL_ACCESS_TOKEN }}
build-default-workloads:
# Conditionally build rootfs images only if deploying to FPGA to save CI resources

View File

@ -13,3 +13,9 @@ submodules:
conda:
environment: conda-reqs/docs.yaml
formats:
- pdf
- htmlzip
- epub

View File

@ -3,7 +3,7 @@
![FireSim Documentation Status](https://readthedocs.org/projects/firesim/badge/)
![Github Actions Status](https://github.com/firesim/firesim/actions/workflows/firesim-run-tests.yml/badge.svg)
| We're running the First FireSim and Chipyard User/Developer Workshop at ASPLOS 2023 on March 26, 2023! This workshop will feature a full-day of submitted talks from users and developers in the FireSim and Chipyard community. Learn more and **submit your work** on the [2023 Workshop Page](https://fires.im/workshop-2023/)! |
| We held the First FireSim and Chipyard User/Developer Workshop at ASPLOS 2023 on March 26, 2023! This workshop featured a full-day of talks from users and developers in the FireSim and Chipyard community. YouTube videos of the talks are available on the [2023 Workshop Page](https://fires.im/workshop-2023/)! |
|--------|
## Contents
@ -24,7 +24,7 @@ You can also get help from the FireSim user community on our [User Forum](https:
Additionally, we frequently run tutorials at various conferences
and events; for overview purposes, you can find the most recent slide decks at [fires.im/tutorial-recent](https://fires.im/tutorial-recent) (you should still follow [docs.fires.im](https://docs.fires.im) for the most up to date getting-started guide).
Another good overview from a recent seminar (in video format) can be found [on YouTube](https://www.youtube.com/watch?v=UlYOsRBhtY8).
Another good overview from a recent event (in video format) can be found [on YouTube](https://www.youtube.com/watch?v=_leRHbe5t6M).
## What is FireSim?
@ -36,18 +36,19 @@ ASIC RTL with cycle-accurate hardware and software models for other system compo
scale from individual SoC simulations hosted on on-prem FPGAs (e.g., a single Xilinx Alveo board attached to a desktop)
to massive datacenter-scale simulations harnessing hundreds of cloud FPGAs (e.g., on Amazon EC2 F1).
**Who's using and developing FireSim?** FireSim users across academia and industry have written over 25 papers using FireSim in many areas, including computer architecture, systems, networking, circuits, security, and more (see the [Publications page](https://fires.im/publications/)). FireSim has also been used in the development of shipping commercial silicon. FireSim was originally developed in the [Electrical Engineering and Computer Sciences
**Who's using and developing FireSim?** FireSim users across academia and industry (at 20+ institutions) have published over 40 papers using FireSim in many areas, including computer architecture, systems, networking, security, scientific computing, circuits, design automation, and more (see the [Publications page](https://fires.im/publications/)). FireSim has also been used in the development of shipping commercial silicon. FireSim was originally developed in the [Electrical Engineering and Computer Sciences
Department][eecs] at the [University of California, Berkeley][berkeley], but now has industrial and academic contributors from all over the world.
You can learn more about FireSim in the following places:
* **FireSim website**: https://fires.im
* **FireSim ISCA 2018 Paper**: [Paper PDF](https://sagark.org/assets/pubs/firesim-isca2018.pdf) | [IEEE Xplore](https://ieeexplore.ieee.org/document/8416816) | [ACM DL](https://dl.acm.org/citation.cfm?id=3276543) | [BibTeX](https://sagark.org/assets/pubs/firesim-isca2018.bib.txt) | Selected as one of IEEE Micros “Top Picks from Computer Architecture Conferences, 2018”.
* **FireSim documentation**: https://docs.fires.im
* **FireSim (+Chipyard) Tutorial**: https://fires.im/tutorial/
* **Scala API Documentation**: https://fires.im/firesim/latest/api/
* **Two-minute lightning talk from ISCA 2018** (FireSim simulating a datacenter): [YouTube](https://www.youtube.com/watch?v=4XwoSe5c8lY)
* **Chisel Community Conference Tutorial**: [YouTube](https://www.youtube.com/watch?v=S3OriQnJXYQ)
* **FireSim ISCA-50 Retrospective**: [Paper PDF](https://sites.coecis.cornell.edu/isca50retrospective/files/2023/06/Karandikar_2018_FireSim.pdf)
* **FireSim documentation**: [https://docs.fires.im](https://docs.fires.im)
* **Scala API Documentation**: [https://fires.im/firesim/latest/api/](https://fires.im/firesim/latest/api/)
* **LatchUp 2023 Update Talk**: [YouTube](https://www.youtube.com/watch?v=_leRHbe5t6M)
* **FireSim (+Chipyard) Tutorial**: [https://fires.im/tutorial-recent/](https://fires.im/tutorial-recent/)
* **ASPLOS 2023 Tutorial Videos**: [YouTube Playlist](https://www.youtube.com/playlist?list=PL-YKJjRMRb9xe1RP4uoM69CRyXZZFy2ta)
* **Updates/News**: [Changelog](/CHANGELOG.md) | [FireSim Blog](https://fires.im/blog/) | [Twitter](https://twitter.com/firesimproject)
## What can I simulate with FireSim?
@ -99,7 +100,7 @@ Head to the [FireSim Website](https://fires.im) to learn more.
### **ISCA 2018**: FireSim: FPGA-Accelerated Cycle-Exact Scale-Out System Simulation in the Public Cloud
You can learn more about FireSim in our ISCA 2018 paper, which covers the overall FireSim infrastructure and large distributed simulations of networked clusters. This paper was **selected as one of IEEE Micros “Top Picks from Computer Architecture Conferences, 2018”.**
You can learn more about FireSim in our ISCA 2018 paper, which covers the overall FireSim infrastructure and large distributed simulations of networked clusters. This paper was selected as one of **IEEE Micros "Top Picks from Computer Architecture Conferences, 2018"** and for **"ISCA@50 25-year Retrospective 1996-2020"**.
> Sagar Karandikar, Howard Mao, Donggyu Kim, David Biancolin, Alon Amid, Dayeol
Lee, Nathan Pemberton, Emmanuel Amaro, Colin Schmidt, Aditya Chopra, Qijing
@ -139,7 +140,7 @@ Our paper describing FireSim's Compiler, _Golden Gate_:
### **ASPLOS 2020**: FirePerf: FPGA-Accelerated Full-System Hardware/Software Performance Profiling and Co-Design
Our paper to appear in ASPLOS 2020 discusses system-level profiling features in FireSim:
Our paper that discusses system-level profiling features in FireSim:
> Sagar Karandikar, Albert Ou, Alon Amid, Howard Mao, Randy Katz, Borivoje Nikolić, and Krste Asanović, **FirePerf: FPGA-Accelerated Full-System Hardware/Software Performance Profiling and Co-Design**, *In Proceedings of the Twenty-Fifth International Conference on Architectural Support for Programming Languages and Operating Systems (ASPLOS 2020)*, Lausanne, Switzerland, March 2020.
@ -153,6 +154,21 @@ In this special issue, we describe the automated instance-multithreading optimiz
[Article PDF](https://davidbiancolin.github.io/papers/firesim-micro21.pdf)
### **ISCA@50 Retrospective: 1996-2020**: FireSim: FPGA-Accelerated Cycle-Exact Scale-Out System Simulation in the Public Cloud
This retrospective paper, included in the "ISCA@50 Retrospective: 1996-2020" collection, provides an update and retrospective
on FireSim's development and evolution since the original ISCA 2018 paper.
> Sagar Karandikar, Howard Mao, Donggyu Kim, David Biancolin, Alon Amid, Dayeol
Lee, Nathan Pemberton, Emmanuel Amaro, Colin Schmidt, Aditya Chopra, Qijing
Huang, Kyle Kovacs, Borivoje Nikolic, Randy Katz, Jonathan Bachrach, and Krste
Asanović. **FireSim: FPGA-Accelerated Cycle-Exact Scale-Out System Simulation in
the Public Cloud**. *In ISCA@50 Retrospective: 1996-2020*,
Edited by José F. Martínez and Lizy K. John, June 2023.
[Retrospective PDF](https://sites.coecis.cornell.edu/isca50retrospective/files/2023/06/Karandikar_2018_FireSim.pdf)
You can find other publications, including publications that *use* FireSim on the [FireSim Website](https://fires.im/publications/).
[ucb-bar]: http://bar.eecs.berkeley.edu

View File

@ -73,7 +73,7 @@ set -e
if [ "$tag_ret_code" -ne 0 ]; then
if [ "$SKIP_VALIDATE" = false ]; then
printf '\033[2J' # clear screen
read -p "WARNING: You are not on an official release of FireSim."$'\n'"Type \"y\" to continue if this is intended, otherwise see https://docs.fires.im/en/stable/Initial-Setup/Setting-up-your-Manager-Instance.html#setting-up-the-firesim-repo: " validate
read -p "WARNING: You are not on an official release of FireSim."$'\n'"Type \"y\" to continue if this is intended, otherwise see the FireSim Docs for pointers to the latest official release: " validate
[[ "$validate" == [yY] ]] || exit 5
echo "Setting up non-official FireSim release"
fi

View File

@ -836,8 +836,12 @@ class RuntimeConfig:
# setup workload config obj, aka a list of workloads that can be assigned
# to a server
self.workload = WorkloadConfig(self.innerconf.workload_name, self.launch_time,
self.innerconf.suffixtag)
if args.task != 'enumeratefpgas':
self.workload = WorkloadConfig(self.innerconf.workload_name, self.launch_time,
self.innerconf.suffixtag)
else:
self.workload = WorkloadConfig('dummy.json', self.launch_time,
self.innerconf.suffixtag)
# start constructing the target configuration tree
self.firesim_topology_with_passes = FireSimTopologyWithPasses(

View File

@ -63,7 +63,6 @@ vitis_firesim_gemmini_rocket_singlecore_no_nic:
bitstream_tar: https://raw.githubusercontent.com/firesim/firesim-public-bitstreams/0af81b912264abbe3f90f8140987814291090560/vitis/vitis_firesim_gemmini_rocket_singlecore_no_nic.tar.gz
deploy_quintuplet_override: null
custom_runtime_config: null
# DOCREF START: Xilinx Alveo HWDB Entries
alveo_u250_firesim_rocket_singlecore_no_nic:
bitstream_tar: https://raw.githubusercontent.com/firesim/firesim-public-bitstreams/1dc6be48bfe043bbc47e24660c1ef5076a22b7e4/xilinx_alveo_u250/alveo_u250_firesim_rocket_singlecore_no_nic.tar.gz
deploy_quintuplet_override: null
@ -80,7 +79,6 @@ alveo_u280_firesim_rocket_singlecore_no_nic:
bitstream_tar: https://raw.githubusercontent.com/firesim/firesim-public-bitstreams/f445dd689c74d9e9c8e5fdba19e299488f9446ce/xilinx_alveo_u280/alveo_u280_firesim_rocket_singlecore_no_nic.tar.gz
deploy_quintuplet_override: null
custom_runtime_config: null
# DOCREF END: Xilinx Alveo HWDB Entries
xilinx_vcu118_firesim_rocket_singlecore_4GB_no_nic:
bitstream_tar: https://raw.githubusercontent.com/firesim/firesim-public-bitstreams/df66683984628552f25acba52e5247ed78321994/xilinx_vcu118/xilinx_vcu118_firesim_rocket_singlecore_4GB_no_nic.tar.gz
deploy_quintuplet_override: null
@ -88,4 +86,4 @@ xilinx_vcu118_firesim_rocket_singlecore_4GB_no_nic:
nitefury_firesim_rocket_singlecore_no_nic:
bitstream_tar: https://raw.githubusercontent.com/firesim/firesim-public-bitstreams/b06e34569c2e4b350f8adeb96168244f2d43422b/rhsresearch_nitefury_ii/nitefury_firesim_rocket_singlecore_no_nic.tar.gz
deploy_quintuplet_override: null
custom_runtime_config: null
custom_runtime_config: null

View File

@ -0,0 +1,7 @@
{
"benchmark_name" : "dummy",
"common_bootbinary" : "dummy",
"common_rootfs" : null,
"common_outputs" : [],
"common_simulation_outputs" : ["uartlog", "memory_stats*.csv"]
}

View File

View File

@ -1,12 +1,12 @@
Non-Source Dependency Management
================================
In the AWS EC2 F1 setup, in :doc:`/Getting-Started-Guides/AWS-EC2-F1-Tutorial/Initial-Setup/Setting-up-your-Manager-Instance`, we quickly copy-pasted the contents
In the AWS EC2 F1 setup, in :doc:`/Getting-Started-Guides/AWS-EC2-F1-Getting-Started/Initial-Setup/Setting-up-your-Manager-Instance`, we quickly copy-pasted the contents
of ``scripts/machine-launch-script.sh`` into the EC2 Management Console and
that script installed many dependencies that FireSim needs using
`conda <https://conda.io/en/latest/index.html>`_, a platform-agnostic package
`Conda <https://conda.io/en/latest/index.html>`_, a platform-agnostic package
manager, specifically using packages from the `conda-forge community <https://conda-forge.org/#about>`_
(or in the case of :doc:`/Getting-Started-Guides/AWS-EC2-F1-Tutorial/Initial-Setup/Setting-up-your-Manager-Instance`, we ran ``scripts/machine-launch-script.sh``).
(or in the case of :doc:`/Getting-Started-Guides/AWS-EC2-F1-Getting-Started/Initial-Setup/Setting-up-your-Manager-Instance`, we ran ``scripts/machine-launch-script.sh``).
In many situations, you may not need to know anything about ``conda``. By default, the
``machine-launch-script.sh`` installs ``conda`` into ``/opt/conda`` and all of the FireSim dependencies into
@ -23,7 +23,7 @@ is that you are able to write into the install location. See ``machine-launch-s
To :ref:`run a simulation on a F1 FPGA <running_simulations>` , FireSim currently requires that
you are able to act as root via ``sudo``.
However, you can do many things without having root, like :doc:`/Getting-Started-Guides/AWS-EC2-F1-Tutorial/Building-a-FireSim-AFI`,
However, you can do many things without having root, like :doc:`/Getting-Started-Guides/AWS-EC2-F1-Getting-Started/Building-a-FireSim-AFI`,
`<meta-simulation>`_ of a FireSim system using Verilator or even developing new features in FireSim.
Updating a Package Version
@ -103,7 +103,7 @@ like the following ::
This shows you that the first time ``machine-launch-script.sh`` was run, it created 'revision' 0 of the environment with
many packages. After updating the version of ``moto`` and rerunning, 'revision' 1 was created by updating the version
of ``moto``. At any time, you can revert your conda environment back to an older 'revision' using ``conda install -revision <n>``
of ``moto``. At any time, you can revert your Conda environment back to an older 'revision' using ``conda install -revision <n>``
Multiple Environments
---------------------
@ -151,7 +151,7 @@ Look for what you need in this order:
mailing list know so that we can help get the addition merged.
#. `PyPI <https://pypi.org/>`_ (for Python packages). While it is possible to install packages with pip into a ``conda``
environment, `there are caveats <https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html?highlight=pip#using-pip-in-an-environment>`_.
In short, you're less likely to create a mess if you use only conda to manage the requirements and dependencies
In short, you're less likely to create a mess if you use only Conda to manage the requirements and dependencies
in your environment.
#. System packages as a last resort. It's very difficult to have the same tools on different platforms when they are being
built and shipped by different systems and organizations. That being said, in a pinch, you can find a section for
@ -187,10 +187,10 @@ If you instead need to enable debugging or possibly actively hack on the source
If you are developing a Python package, it is usually easiest to install all dependencies using ``conda`` and then install
your package in 'development mode' using ``pip install -e <path to clone>`` (and making sure that you are using ``pip`` from your environment).
Running conda with sudo
Running Conda with sudo
-----------------------
``tl;dr;`` run conda like this when using ``sudo``::
``tl;dr;`` run Conda like this when using ``sudo``::
sudo -E $CONDA_EXE <remaining options to conda>
@ -202,16 +202,16 @@ You also probably want to include the ``-E`` option to ``sudo`` (or more specifi
``--preserve-env=CONDA_DEFAULT_ENV``) so that the default choice for the environment to modify
is preserved in the sudo environment.
Running things from your conda environment with sudo
Running things from your Conda environment with sudo
----------------------------------------------------
If you are running other commands using sudo (perhaps to run something under gdb), remember, the ``secure_path``
does not include the conda environment by default and you will need to specify the full path to what you want to run,
does not include the Conda environment by default and you will need to specify the full path to what you want to run,
or in some cases, it is easiest to wrap what you want to run in a full login shell invocation like::
sudo /bin/bash -l -c "<command to run as root>"
The ``-l`` option to ``bash`` ensures that the **default** conda environment is fully activated. In the rare case that
The ``-l`` option to ``bash`` ensures that the **default** Conda environment is fully activated. In the rare case that
you are using a non-default named environment, you will want to activate it before running your command::
sudo /bin/bash -l -c "conda activate <myenv> && <command to run as root>"

View File

@ -159,7 +159,7 @@ are homogeneous and use this value for all nodes.
You should set this to one of the hardware configurations you have defined already in
``config_hwdb.yaml``. You should set this to the NAME (mapping title) of the
hardware configuration from ``config_hwdb.yaml``, NOT the actual AGFI or ``xclbin`` itself
hardware configuration from ``config_hwdb.yaml``, NOT the actual AGFI or ``bitstream_tar`` itself
(NOT something like ``agfi-XYZ...``).
@ -316,7 +316,7 @@ write:
``agfis_to_share``
^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. Warning:: This is only used in the AWS EC2 case.
.. Note:: This is only used in the AWS EC2 case.
This is used by the ``shareagfi`` command to share the specified agfis with the
users specified in the next (``share_with_accounts``) section. In this section,
@ -342,7 +342,7 @@ you would use:
``share_with_accounts``
^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. Warning:: This is only used in the AWS EC2 case.
.. Note:: This is only used in the AWS EC2 case.
A list of AWS account IDs that you want to share the AGFIs listed in
``agfis_to_share`` with when calling the manager's ``shareagfi`` command. You
@ -470,7 +470,7 @@ to the relative name of the config. For example,
``bit_builder_recipe``
"""""""""""""""""""""""
This specifies the bitstream type to generate for a particular recipe (ex. build a Vitis ``xclbin``).
This specifies the bitstream type to generate for a particular recipe.
This must point to a file in ``deploy/bit-builder-recipes/``.
See :ref:`bit-builder-recipe` for more details on bit builders and their arguments.
@ -497,12 +497,12 @@ Here is a sample of this configuration file:
This file tracks hardware configurations that you can deploy as simulated nodes
in FireSim. Each such configuration contains a name for easy reference in higher-level
configurations, defined in the section header, an handle to a bitstream (i.e. an AGFI or ``xclbin`` path), which represents the
configurations, defined in the section header, an handle to a bitstream (i.e. an AGFI or ``bitstream_tar`` path), which represents the
FPGA image, a custom runtime config, if one is needed, and a deploy quintuplet
override if one is necessary.
When you build a new bitstream, you should put the default version of it in this
file so that it can be referenced from your other configuration files (i.e. the AGFI ID or ``xclbin`` path).
When you build a new bitstream, you should put it in this
file so that it can be referenced from your other configuration files.
The following is an example section from this file - you can add as many of
these as necessary:
@ -512,10 +512,12 @@ these as necessary:
:start-after: DOCREF START: Example HWDB Entry
:end-before: DOCREF END: Example HWDB Entry
``NAME_GOES_HERE``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Here are the components of these entries:
In this example, ``firesim_rocket_quadcore_nic_l2_llc4mb_ddr3`` is the name that will be
The name: ``firesim_boom_singlecore_nic_l2_llc4mb_ddr3``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In this example, ``firesim_boom_singlecore_nic_l2_llc4mb_ddr3`` is the name that will be
used to reference this hardware design in other configuration locations. The following
items describe this hardware configuration:
@ -523,22 +525,18 @@ items describe this hardware configuration:
"""""""""""""""
This represents the AGFI (FPGA Image) used by this hardware configuration.
Only used in AWS EC2 F1 FireSim configurations (a ``xclbin`` key/value cannot exist with this
Only used in AWS EC2 F1 FireSim configurations (a ``bitstream_tar`` key/value cannot exist with this
key/value in the same recipe).
``xclbin``
"""""""""""""""
Indicates where the bitstream (FPGA Image) is located, may be one of:
* A Uniform Resource Identifier (URI), (see :ref:`uri-path-support` for details)
* A filesystem path available to the manager. Local paths are relative to the `deploy` folder.
``bitstream_tar``
"""""""""""""""""
This is not shown in the example entry above, but would be used for an on-premises bitstream.
Indicates where the bitstream (FPGA Image) and metadata associated with it is located, may be one of:
* A Uniform Resource Identifier (URI), (see :ref:`uri-path-support` for details)
* A filesystem path available to the manager. Local paths are relative to the `deploy` folder.
* A Uniform Resource Identifier (URI), (see :ref:`uri-path-support` for details)
* A filesystem path available to the manager. Local paths are relative to the `deploy` folder.
``deploy_quintuplet_override``
""""""""""""""""""""""""""""""
@ -567,11 +565,13 @@ to the relative name of the config. For example,
``driver_tar``
"""""""""""""""""""""""""""""
They key can be one of:
* A Uniform Resource Identifier (URI), (see :ref:`uri-path-support` for details)
* A filesystem path available to the manager. Local paths are relative to the `deploy` folder.
When this key is present, the local driver will not build from source.
The value for this key can be one of:
* A Uniform Resource Identifier (URI), (see :ref:`uri-path-support` for details)
* A filesystem path available to the manager. Local paths are relative to the `deploy` folder.
When this key is present, the FireSim FPGA-driver software will not be built from source.
Instead, during `firesim infrasetup`, this file will be deployed and extracted
into the `sim_slot_X` folder on the run farm instance. This file may
be a `.tar`, `.tar.gz`, `.tar.bz2` or any other format that GNU tar (version 1.26)
@ -586,7 +586,7 @@ Add more hardware config sections, like ``NAME_GOES_HERE_2``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can add as many of these entries to ``config_hwdb.yaml`` as you want, following the format
discussed above (i.e. you provide ``agfi`` or ``xclbin``, ``deploy_quintuplet_override``, and ``custom_runtime_config``).
discussed above (i.e. you provide ``agfi`` or ``bitstream_tar``, ``deploy_quintuplet_override``, and ``custom_runtime_config``).
.. _run-farm-recipe:
@ -605,7 +605,7 @@ This key/value specifies a run farm class to use for launching, managing, and te
run farm hosts used for simulations.
By default, run farm classes can be found in :gh-file-ref:`deploy/runtools/run_farm.py`. However, you can specify
your own custom run farm classes by adding your python file to the ``PYTHONPATH``.
For example, to use the ``AWSEC2F1`` build farm class, you would write ``run_farm_type: AWSEC2F1``.
For example, to use the ``AWSEC2F1`` run farm class, you would write ``run_farm_type: AWSEC2F1``.
``args``
^^^^^^^^^^^^^^^^^^^^^
@ -617,12 +617,8 @@ the ``_parse_args`` function in the run farm class given by ``run_farm_type``.
``aws_ec2.yaml`` run farm recipe
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This run farm recipe configures a FireSim run farm to use AWS EC2 instances.
Here is an example of this configuration file:
.. literalinclude:: /../deploy/run-farm-recipes/aws_ec2.yaml
:language: yaml
The run farm recipe shown above configures a FireSim run farm to use AWS EC2 instances.
It contains several key/value pairs:
``run_farm_tag``
""""""""""""""""
@ -734,7 +730,7 @@ for AWS EC2 simulations.
``externally_provisioned.yaml`` run farm recipe
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This run farm is an allows users to provide an list of pre-setup unmanaged run farm hosts (by hostname or IP address) that
This run farm allows users to provide a list of pre-setup unmanaged run farm hosts (by hostname or IP address) that
they can run simulations on.
Note that this run farm type does not launch or terminate the run farm hosts. This functionality should be handled by the user.
For example, users can use this run farm type to run simulations locally.
@ -752,7 +748,7 @@ simulations across all run farm hosts.
For example, this class manages how to flash FPGAs with bitstreams, how to copy back results, and how to check if a simulation is running.
By default, deploy platform classes can be found in :gh-file-ref:`deploy/runtools/run_farm_deploy_managers.py`. However, you can specify
your own custom run farm classes by adding your python file to the ``PYTHONPATH``.
There are default deploy managers / platforms that correspond to AWS EC2 F1 FPGAs, Vitis FPGAs, and Xilinx Alveo U250/U280 FPGAs, ``EC2InstanceDeployManager``, ``VitisInstanceDeployManager``, ``XilinxAlveo{U250,U280}InstanceDeployManager``, respectively.
There are default deploy managers / platforms that correspond to AWS EC2 F1 FPGAs, Vitis FPGAs, Xilinx Alveo U250/U280 FPGAs, Xilinx VCU118 FPGAs, and RHS Research Nitefury II FPGAs: ``EC2InstanceDeployManager``, ``VitisInstanceDeployManager``, ``Xilinx{AlveoU250,AlveoU280,VCU118}InstanceDeployManager``, and ``RHSResearchNitefuryIIInstanceDeployManager`` respectively.
For example, to use the ``EC2InstanceDeployManager`` deploy platform class, you would write ``default_platform: EC2InstanceDeployManager``.
``default_simulation_dir``
@ -941,7 +937,7 @@ When enabled, this appends the current users AWS user ID and region to the ``s3_
``vitis.yaml`` bit builder recipe
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This bit builder recipe configures a build farm host to build an Vitis bitstream (FPGA bitstream called an ``xclbin``).
This bit builder recipe configures a build farm host to build an Vitis bitstream (FPGA bitstream called an ``xclbin``, packaged into a ``bitstream_tar``).
``device``
""""""""""""""""""""""""""
@ -955,9 +951,20 @@ Here is an example of this configuration file:
``xilinx_alveo_u250.yaml`` bit builder recipe
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This bit builder recipe configures a build farm host to build an Xilinx Alveo U250 bitstream.
This bit builder recipe configures a build farm host to build an Xilinx Alveo U250 bitstream, packaged into a ``bitstream_tar``.
``xilinx_alveo_u280.yaml`` bit builder recipe
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This bit builder recipe configures a build farm host to build an Xilinx Alveo U280 bitstream.
This bit builder recipe configures a build farm host to build an Xilinx Alveo U280 bitstream, packaged into a ``bitstream_tar``.
``xilinx_vcu118.yaml`` bit builder recipe
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This bit builder recipe configures a build farm host to build an Xilinx VCU118 bitstream, packaged into a ``bitstream_tar``.
``rhsresearch_nitefury_ii.yaml`` bit builder recipe
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This bit builder recipe configures a build farm host to build an RHS Research Nitefury II bitstream, packaged into a ``bitstream_tar``.

View File

@ -23,21 +23,15 @@ Then, do platform-specific init steps for the given ``--platform``.
* Prompt the user for email address and subscribe them to notifications for their own builds.
* Setup the ``config_runtime.yaml`` and ``config_build.yaml`` files with AWS run/build farm arguments.
.. tab:: ``vitis``
.. tab:: All other platforms
* Setup the ``config_runtime.yaml`` and ``config_build.yaml`` files with externally provisioned run/build farm arguments.
.. tab:: ``xilinx_alveo_u250``
* Setup the ``config_runtime.yaml`` and ``config_build.yaml`` files with externally provisioned run/build farm arguments.
.. tab:: ``xilinx_alveo_u280``
This includes platforms such as: ``xilinx_alveo_u250``, ``xilinx_alveo_u280``, ``xilinx_vcu118``, and ``rhsresearch_nitefury_ii``.
* Setup the ``config_runtime.yaml`` and ``config_build.yaml`` files with externally provisioned run/build farm arguments.
You can re-run this whenever you want to get clean configuration files.
.. note:: In the case of ``f1``, you can just hit Enter when prompted for ``aws configure`` credentials and your email
.. note:: For ``f1``, you can just hit Enter when prompted for ``aws configure`` credentials and your email
address, and both will keep your previously specified values.
If you run this command by accident and didn't mean to overwrite your
@ -71,18 +65,7 @@ For each config, the build process entails:
9. [Local/AWS Infra] Submit the tar file to the AWS backend for conversion to an AFI
10. [Local] Wait for the AFI to become available, then notify the user of completion by email
.. tab:: Vitis
1. [Locally] Run the elaboration process for your hardware configuration
2. [Locally] FAME-1 transform the design with MIDAS
3. [Locally] Attach simulation models (I/O widgets, memory model, etc.)
4. [Locally] Emit Verilog to run through the FPGA Flow
5. Use a build farm configuration to launch/use build hosts for each configuration you want to build
6. [Local/Remote] Prep build hosts, copy generated Verilog for hardware configuration to build instance
7. [Local or Remote] Run Vitis Synthesis and P&R for the configuration
8. [Local/Remote] Copy back all output generated by Vitis (including ``xclbin`` bitstream)
.. tab:: Xilinx Alveo U250/U280
.. tab:: XDMA-based On-Prem.
1. [Locally] Run the elaboration process for your hardware configuration
2. [Locally] FAME-1 transform the design with MIDAS
@ -93,14 +76,27 @@ For each config, the build process entails:
7. [Local or Remote] Run Vivado Synthesis and P&R for the configuration
8. [Local/Remote] Copy back all output generated by Vivado (including ``bit`` bitstream)
.. tab:: Vitis-based On-Prem.
1. [Locally] Run the elaboration process for your hardware configuration
2. [Locally] FAME-1 transform the design with MIDAS
3. [Locally] Attach simulation models (I/O widgets, memory model, etc.)
4. [Locally] Emit Verilog to run through the FPGA Flow
5. Use a build farm configuration to launch/use build hosts for each configuration you want to build
6. [Local/Remote] Prep build hosts, copy generated Verilog for hardware configuration to build instance
7. [Local or Remote] Run Vitis Synthesis and P&R for the configuration
8. [Local/Remote] Copy back all output generated by Vitis (including the ``bitstream_tar`` containing the ``xclbin`` bitstream)
This process happens in parallel for all of the builds you specify. The command
will exit when all builds are completed (but you will get notified as
INDIVIDUAL builds complete if on F1) and indicate whether all builds passed or a
build failed by the exit code.
.. Note:: **It is highly recommended that you either run this command in a ``screen`` or use
``mosh`` to access the build instance. Builds will not finish if the manager is
killed due to disconnection to the instance.**
.. Note:: **It is highly recommended that you either run this command in a** ``screen`` **or use**
``mosh`` **to access the manager instance. Builds will not finish if the manager is
killed due to ssh disconnection from the manager instance.**
When you run a build for a particular configuration, a directory named
``LAUNCHTIME-CONFIG_TRIPLET-BUILD_NAME`` is created in ``firesim/deploy/results-build/``.
@ -113,18 +109,18 @@ This directory will contain:
- ``AGFI_INFO``: Describes the state of the AFI being built, while the manager is running. Upon build completion, this contains the AGFI/AFI that was produced, along with its metadata.
- ``cl_firesim:``: This directory is essentially the Vivado project that built the FPGA image, in the state it was in when the Vivado build process completed. This contains reports, stdout from the build, and the final tar file produced by Vivado. This also contains a copy of the generated verilog (``FireSim-generated.sv``) used to produce this build.
.. tab:: Vitis
The Vitis project collateral that built the FPGA image, in the state it was in when the Vitis build process completed.
This contains reports, ``stdout`` from the build, and the final bitstream ``xclbin`` file produced by Vitis.
This also contains a copy of the generated verilog (``FireSim-generated.sv``) used to produce this build.
.. tab:: Xilinx Alveo U250/U280
.. tab:: XDMA-based On-Prem.
The Vivado project collateral that built the FPGA image, in the state it was in when the Vivado build process completed.
This contains reports, ``stdout`` from the build, and the final ``bitstream_tar`` bitstream/metadata file produced by Vivado.
This also contains a copy of the generated verilog (``FireSim-generated.sv``) used to produce this build.
.. tab:: Vitis-based On-Prem.
The Vitis project collateral that built the FPGA image, in the state it was in when the Vitis build process completed.
This contains reports, ``stdout`` from the build, and the final ``bitstream_tar`` produced from the Vitis-generated ``xclbin`` bitstream.
This also contains a copy of the generated verilog (``FireSim-generated.sv``) used to produce this build.
If this command is cancelled by a SIGINT, it will prompt for confirmation
that you want to terminate the build instances.
If you respond in the affirmative, it will move forward with the termination.
@ -142,6 +138,13 @@ build farm without prompting for confirmation if a SIGINT is received:
``firesim builddriver``
--------------------------------
For FPGA-based simulations (when ``metasimulation_enabled`` is ``false`` in
``config_runtime.yaml``), this command will build the host-side simulation
driver, also without requiring any simulation hosts to be launched or reachable.
For complicated designs, running this before running ``firesim launchrunfarm``
can reduce the time spent leaving FPGA hosts idling while waiting for
driver build.
For metasimulations (when ``metasimulation_enabled`` is ``true`` in
``config_runtime.yaml``), this command will build the entire software
simulator without requiring any simulation hosts to be launched or reachable.
@ -150,19 +153,12 @@ your primary simulation tool while developing target RTL, since it allows you
to run the Chisel build flow and iterate on your design without
launching/setting up extra machines to run simulations.
For FPGA-based simulations (when ``metasimulation_enabled`` is ``false`` in
``config_runtime.yaml``), this command will build the host-side simulation
driver, also without requiring any simulation hosts to be launched or reachable.
For complicated designs, running this before running ``firesim launchrunfarm``
can reduce the time spent leaving FPGA hosts idling while waiting for
driver build.
.. _firesim-tar2afi:
``firesim tar2afi``
----------------------
.. Warning:: Can only be used in the F1 case.
.. Note:: Can only be used for the F1 platform.
This command can be used to run only steps 9 & 10 from an aborted ``firesim buildbitstream`` for F1 that has been
manually corrected. ``firesim tar2afi`` assumes that you have a
@ -174,8 +170,8 @@ specifying an already existing LAUNCHTIME.
This command will run for the configurations specified in :ref:`config-build` and
:ref:`config-build-recipes` as with :ref:`firesim-buildbitstream`. It is likely that you may want
to comment out ``BUILD_NAME`` that successfully completed :ref:`firesim-buildbitstream` before
running this command.
to comment out build recipe names that successfully completed the :ref:`firesim-buildbitstream` process
before running this command.
.. _firesim-shareagfi:
@ -183,7 +179,7 @@ running this command.
``firesim shareagfi``
----------------------
.. Warning:: Can only be used in the F1 case.
.. Note:: Can only be used for the F1 platform.
This command allows you to share AGFIs that you have already built (that are
listed in :ref:`config-hwdb`) with other users. It will take the
@ -202,8 +198,10 @@ that someone else owns and gave you access to.
``firesim launchrunfarm``
---------------------------
This command launches a **Run Farm** on which you run simulations. Run farms
consist of a set of **run farm hosts** that can be spawned by AWS EC2 or managed by the user.
.. Note:: Can only be used for the F1 platform.
This command launches a **Run Farm** on AWS EC2 on which you run simulations. Run farms
consist of a set of **run farm instances** that can be spawned on AWS EC2.
The ``run_farm`` mapping in ``config_runtime.yaml`` determines the run farm used and its configuration (see :ref:`config-runtime`).
The ``base_recipe`` key/value pair specifies the default set of arguments to use for a particular run farm type.
To change the run farm type, a new ``base_recipe`` file must be provided from ``deploy/run-farm-recipes``.
@ -212,38 +210,22 @@ These keys/values must match the same mapping structure as the ``args`` mapping.
Overridden arguments override recursively such that all key/values present in the override args replace the default arguments given
by the ``base_recipe``. In the case of sequences, a overridden sequence completely replaces the corresponding sequence in the default args.
.. tabs::
An AWS EC2 run farm consists of AWS instances like ``f1.16xlarge``, ``f1.4xlarge``, ``f1.2xlarge``, and ``m4.16xlarge`` instances.
Before you run the command, you define the number of each that you want in the ``recipe_arg_overrides`` section of
``config_runtime.yaml`` or in the ``base_recipe`` itself.
.. tab:: AWS EC2 Run Farm Recipe (``aws_ec2.yaml``)
A launched run farm is tagged with a ``run_farm_tag``,
which is used to disambiguate multiple parallel run
farms; that is, you can have many run farms running, each running a different
experiment at the same time, each with its own unique ``run_farm_tag``. One
convenient feature to add to your AWS management panel is the column for
``fsimcluster``, which contains the ``run_farm_tag`` value. You can see how to do
that in the :ref:`fsimcluster-aws-panel` section.
An AWS EC2 run farm consists of AWS instances like ``f1.16xlarge``, ``f1.4xlarge``, ``f1.2xlarge``, and ``m4.16xlarge`` instances.
Before you run the command, you define the number of each that you want in the ``recipe_arg_overrides`` section of
``config_runtime.yaml`` or in the ``base_recipe`` itself.
A launched run farm is tagged with a ``run_farm_tag``,
which is used to disambiguate multiple parallel run
farms; that is, you can have many run farms running, each running a different
experiment at the same time, each with its own unique ``run_farm_tag``. One
convenient feature to add to your AWS management panel is the column for
``fsimcluster``, which contains the ``run_farm_tag`` value. You can see how to do
that in the :ref:`fsimcluster-aws-panel` section.
The other options in the ``run_farm`` section, ``run_instance_market``,
``spot_interruption_behavior``, and ``spot_max_price`` define *how* instances in
the run farm are launched. See the documentation for ``config_runtime.yaml`` for
more details on other arguments (see :ref:`config-runtime`).
.. tab:: Externally Provisioned Run Farm Recipe (``externally_provisioned.yaml``)
An Externally Provisioned run farm consists of a set of unmanaged run farm hosts given by the user.
A run farm host is configured by a ``default_platform`` that determines how to run simulations on the host.
Additionally a sequence of hosts is given in ``run_farm_hosts_to_use``.
This sequence consists of a mapping from an unique hostname/IP address to a specification that indicates the
amount of FPGAs it hosts, the number of potential metasimulations it can run, and more.
Before you run the command, you define sequence of run farm hosts in the ``recipe_arg_overrides`` section of
``config_runtime.yaml`` or in the ``base_recipe`` itself.
See the documentation for ``config_runtime.yaml`` for
more details on other arguments (see :ref:`config-runtime`).
The other options in the ``run_farm`` section, ``run_instance_market``,
``spot_interruption_behavior``, and ``spot_max_price`` define *how* instances in
the run farm are launched. See the documentation for ``config_runtime.yaml`` for
more details on other arguments (see :ref:`config-runtime`).
**ERRATA**: One current requirement is that you must define a target config in
the ``target_config`` section of ``config_runtime.yaml`` that does not require
@ -257,7 +239,7 @@ will see the command print out instance IDs for the correct number/types of
instances (you do not need to pay attention to these or record them).
If an error occurs, it will be printed to console.
.. warning:: For the AWS EC2 case, once you run this command, your run farm will continue to run until you call
.. warning:: On AWS EC2, once you run this command, your run farm will continue to run until you call
``firesim terminaterunfarm``. This means you will be charged for the running
instances in your run farm until you call ``terminaterunfarm``. You are
responsible for ensuring that instances are only running when you want them to
@ -268,23 +250,16 @@ If an error occurs, it will be printed to console.
``firesim terminaterunfarm``
-----------------------------
This command potentially terminates some or all of the instances in the Run Farm defined
.. Note:: Can only be used for the F1 platform.
This command terminates some or all of the instances in the Run Farm defined
in your ``config_runtime.yaml`` file by the ``run_farm`` ``base_recipe``, depending on the command line arguments
you supply.
.. tabs::
.. tab:: AWS EC2 Run Farm Recipe (``aws_ec2.yaml``)
By default, running ``firesim terminaterunfarm`` will terminate
ALL instances with the specified ``run_farm_tag``. When you run this command,
it will prompt for confirmation that you want to terminate the listed instances.
If you respond in the affirmative, it will move forward with the termination.
.. tab:: Externally Provisioned Run Farm Recipe (``externally_provisioned.yaml``)
By default, this run of ``firesim terminaterunfarm`` does nothing since externally managed
run farm hosts should be managed by the user (and not by FireSim).
By default, running ``firesim terminaterunfarm`` will terminate
ALL instances with the specified ``run_farm_tag``. When you run this command,
it will prompt for confirmation that you want to terminate the listed instances.
If you respond in the affirmative, it will move forward with the termination.
If you do not want to have to confirm the termination (e.g. you are using this
command in a script), you can give the command the ``--forceterminate`` command
@ -295,15 +270,10 @@ RUN FARM WITHOUT PROMPTING FOR CONFIRMATION:
firesim terminaterunfarm --forceterminate
.. Warning:: DEPRECATION: The ``--terminatesome<INSTANCE>`` flags have been changed to a single ``--terminatesome`` flag and will be removed in a future version
.. Warning:: The following ``--terminatesome<INSTANCE>`` flags are only available for AWS EC2.
There a few additional commandline arguments that let you terminate only
some of the instances in a particular Run Farm: ``--terminatesomef116 INT``,
``--terminatesomef14 INT``, ``--terminatesomef12 INT``, and
``--terminatesomem416 INT``, which will terminate ONLY as many of each type of
instance as you specify.
The ``--terminatesome=INSTANCE_TYPE:COUNT`` flag additionally allows you to
terminate only some (``COUNT``) of the instances of a particular type
(``INSTANCE_TYPE``) in a particular Run Farm.
Here are some examples:
@ -311,7 +281,7 @@ Here are some examples:
[ start with 2 f1.16xlarges, 2 f1.2xlarges, 2 m4.16xlarges ]
firesim terminaterunfarm --terminatesomef116 1 --forceterminate
firesim terminaterunfarm --terminatesome=f1.16xlarge:1 --forceterminate
[ now, we have: 1 f1.16xlarges, 2 f1.2xlarges, 2 m4.16xlarges ]
@ -320,12 +290,12 @@ Here are some examples:
[ start with 2 f1.16xlarges, 2 f1.2xlarges, 2 m4.16xlarges ]
firesim terminaterunfarm --terminatesomef116 1 --terminatesomef12 2 --forceterminate
firesim terminaterunfarm --terminatesome=f1.16xlarge:1 --terminatesome=f1.2xlarge:2 --forceterminate
[ now, we have: 1 f1.16xlarges, 0 f1.2xlarges, 2 m4.16xlarges ]
.. warning:: In the AWS EC2 case, Once you call ``launchrunfarm``, you will be charged for running instances in
.. warning:: On AWS EC2, once you call ``launchrunfarm``, you will be charged for running instances in
your Run Farm until you call ``terminaterunfarm``. You are responsible for
ensuring that instances are only running when you want them to be by checking
the AWS EC2 Management Panel.
@ -352,12 +322,12 @@ is a rough outline of what the command does:
Details about setting up your simulation configuration can be found in
:ref:`config-runtime`.
**Once you run a simulation, you should re-run ``firesim infrasetup`` before
**Once you run a simulation, you should re-run** ``firesim infrasetup`` **before
starting another one, even if it is the same exact simulation on the same Run
Farm.**
You can see detailed output from an example run of ``infrasetup`` in the
:ref:`single-node-sim` and :ref:`cluster-sim` Tutorials.
:ref:`single-node-sim` and :ref:`cluster-sim` Getting Started Guides.
.. _firesim-boot:
@ -430,10 +400,30 @@ the workloads, hardware configurations, and abstract host mappings for each
simulation (and optionally, switch) in your design. These diagrams are located
in ``firesim/deploy/generated-topology-diagrams/``, named after your topology.
Here is an example of such a diagram (click to expand/zoom):
Here is an example of such a diagram (click to expand/zoom, it will likely be
illegible without expanding):
.. figure:: runcheck_example.png
:scale: 50 %
:alt: Example diagram from running ``firesim runcheck``
Example diagram for an 8-node cluster with one ToR switch
.. _firesim-enumeratefpgas:
``firesim enumeratefpgas``
-----------------------------------
.. Note:: Can only be used for XDMA-based On-Premises platforms.
This command should be run once for each on-premises Run Farm you plan to use
that contains XDMA-based FPGAs. When run, the command will generate a file
(``/opt/firesim-db.json``) on each Run Farm Machine in the run farm that
contains a mapping from the FPGA ID used for JTAG programming to the PCIe ID
used to run simulations for each FPGA attached to the machine.
If you ever change the physical layout of a Run Farm Machine in your Run Farm
(e.g., which PCIe slot the FPGAs are attached to), you will need to re-run this
command.

View File

@ -17,6 +17,7 @@ many URI protocols we do not test.
Likewise, individual URI protocols will have their own requirements for specifying credentials.
Documentation supplying credentials is provided by the individual protocol implementation. For
example:
* `adlfs for Azure Data-Lake Gen1 and Gen2 <https://github.com/fsspec/adlfs#details>`_
* `gcfs for Google Cloud Services <https://gcsfs.readthedocs.io/en/latest/#credentials>`_
* `s3fs for AWS S3 <https://s3fs.readthedocs.io/en/latest/#credentials>`_

View File

@ -1,8 +1,8 @@
Managing the Conda Lock File
------------------------------
The default conda environment set by ``build-setup.sh`` uses the `lock file ("*.conda-lock.yml") <https://github.com/conda-incubator/conda-lock>`_ in ``conda-reqs/*``.
This file is derived from the conda requirements files (``*.yaml``) also located at ``conda-reqs/*``.
The default Conda environment set by ``build-setup.sh`` uses the `lock file ("*.conda-lock.yml") <https://github.com/conda-incubator/conda-lock>`_ in ``conda-reqs/*``.
This file is derived from the Conda requirements files (``*.yaml``) also located at ``conda-reqs/*``.
Updating Conda Requirements
===========================
@ -16,7 +16,7 @@ There are two different methods:
Caveats of the Conda Lock File and CI
=====================================
Unfortunately, so far as we know, there is no way to derive the conda requirements files from the conda lock file.
Unfortunately, so far as we know, there is no way to derive the Conda requirements files from the Conda lock file.
Thus, there is no way to verify that a lock file satisfies a set of requirements given by a requirements file(s).
It is recommended that anytime you update a requirements file, you update the lock file in the same PR.
This check is what the ``check-conda-lock-modified`` CI job does.

View File

@ -3,134 +3,107 @@
FireSim Basics
===================================
FireSim is a cycle-accurate, FPGA-accelerated scale-out computer system
simulation platform developed in the Berkeley Architecture Research Group in
the EECS Department at the University of California, Berkeley.
FireSim is an open-source
FPGA-accelerated full-system hardware simulation platform that makes
it easy to validate, profile, and debug RTL hardware implementations
at 10s to 100s of MHz. FireSim simplifies co-simulating
ASIC RTL with cycle-accurate hardware and software models for other system components (e.g. I/Os). FireSim can productively
scale from individual SoC simulations hosted on on-prem FPGAs (e.g., a single Xilinx Alveo board attached to a desktop)
to massive datacenter-scale simulations harnessing hundreds of cloud FPGAs (e.g., on Amazon EC2 F1).
FireSim is capable of simulating from **one to thousands of multi-core compute
nodes**, derived from **silicon-proven** and **open** target-RTL, with an optional
cycle-accurate network simulation tying them together. FireSim runs on FPGAs in **public
cloud** environments like AWS EC2 F1, removing the high capex traditionally
involved in large-scale FPGA-based simulation, as well as on on-premises FPGAs.
FireSim users across academia and industry (at 20+ institutions) have published
over 40 papers using FireSim in many areas, including computer architecture,
systems, networking, security, scientific computing, circuits, design
automation, and more (see the `Publications page <https://fires.im/publications>`__ on
the FireSim website to learn more). FireSim
has also been used in the development of commercially-available silicon. FireSim
was originally developed in the Electrical Engineering and Computer Sciences
Department at the University of California, Berkeley, but
now has industrial and academic contributors from all over the world.
FireSim is useful both for datacenter architecture research as well as running
many single-node architectural experiments in parallel on FPGAs. By harnessing
a standardized host platform and providing a large amount of
automation/tooling, FireSim drastically simplifies the process of building and
deploying large-scale FPGA-based hardware simulations.
This documentation will walk you through getting started with using FireSim on
your platform and also serves as a reference for more advanced FireSim features. For higher-level
technical discussion about FireSim, see the `FireSim website <https://fires.im>`__.
To learn more, see the `FireSim website <https://fires.im>`__ and the FireSim
`ISCA 2018 paper <https://sagark.org/assets/pubs/firesim-isca2018.pdf>`__.
For a two-minute overview that describes how FireSim simulates a datacenter,
see our ISCA 2018 lightning talk `on YouTube <https://www.youtube.com/watch?v=4XwoSe5c8lY>`__.
Common FireSim usage models
---------------------------------------
Three common use cases:
--------------------------
Below are three common usage models for FireSim. The first two are the most common, while the
third model is primarily for those interested in warehouse-scale computer research. The getting
started guides on this documentation site will cover all three models.
Single-Node Simulation In Parallel Using On-Premises FPGAs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1. Single-Node Simulations Using One or More On-Premises FPGAs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In this mode, FireSim allows for simulation of individual Rocket
Chip-based nodes without a network, which allows individual simulations to run
at ~150 MHz. The FireSim manager has the ability to automatically distribute
jobs to on-premises FPGAs allowing users to harness existing FPGAs for quick turnaround time and
maximum flexibility. For example, users can run all of SPECInt2017 on Rocket Chip
in ~1 day by running the 10 separate workloads in parallel on 10 on-premises FPGAs.
In this usage model, FireSim allows for simulation of targets consisting of
individual SoC designs (e.g., those produced by `Chipyard <https://chipyard.readthedocs.io/>`__)
at 150+ MHz running on on-premises
FPGAs, such as those attached to your local desktop, laptop, or cluster. Just
like on the cloud, the FireSim manager can automatically distribute and manage
jobs on one or more on-premises FPGAs, including running complex workloads like
SPECInt2017 with full reference inputs.
Single-Node Simulation In Parallel Using Cloud FPGAs
2. Single-Node Simulations Using Cloud FPGAs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In this mode, FireSim allows for simulation of individual Rocket
Chip-based nodes without a network, which allows individual simulations to run
at ~150 MHz. The FireSim manager has the ability to automatically distribute
jobs to many parallel simulations running on cloud FPGAs, expediting the process of running large
workloads like SPEC. For example, users can run all of SPECInt2017 on Rocket Chip
in ~1 day by running the 10 separate workloads in parallel on 10 FPGAs hosted in the cloud.
This usage model is similar to the previous on-premises case, but instead
deploys simulations on FPGAs attached to cloud instances, rather than requiring
users to obtain and set-up on-premises FPGAs. This allows for dynamically
scaling the number of FPGAs in-use to match workload requirements. For example,
on AWS EC2 F1, it is just as cost effective to run the 10 workloads in SPECInt2017 in parallel
on 10 cloud FPGAs vs. running them serially on one cloud FPGA.
Datacenter/Cluster Simulation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. note::
All automation in FireSim works in both the on-premises and cloud
usage models, which enables a **hybrid usage model** where early development happens
on one (or a small cluster of) on-premises FPGA(s), while bursting to a large
number of cloud FPGAs when a high degree of parallelism is necessary.
3. Datacenter/Cluster Simulations on On-Premises or Cloud FPGAs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In this mode, FireSim also models a cycle-accurate network with
parameterizeable bandwidth and link latency, as well as configurable
topology, to accurately model current and future datacenter-scale
parameterizeable bandwidth, link latency, and configurable
topology to accurately model current and future datacenter-scale
systems. For example, FireSim has been used to simulate 1024 quad-core
Rocket Chip-based nodes, interconnected by a 200 Gbps, 2us network. To learn
RISC-V Rocket Chip-based nodes, interconnected by a 200 Gbps, 2us Ethernet network. To learn
more about this use case, see our `ISCA 2018 paper
<https://sagark.org/assets/pubs/firesim-isca2018.pdf>`__ or `two-minute lightning talk
<https://www.youtube.com/watch?v=4XwoSe5c8lY>`__.
<https://sagark.org/assets/pubs/firesim-isca2018.pdf>`__.
Other Use Cases
---------------------
This release does not support a non-cycle-accurate network as our `AWS Compute Blog Post/Demo
<https://aws.amazon.com/blogs/compute/bringing-datacenter-scale-hardware-software-co-design-to-the-cloud-with-firesim-and-amazon-ec2-f1-instances/>`__
used. This feature will be restored in a future release.
If you have other use-cases that we haven't covered or don't fit into the above
buckets, feel free to contact us!
If you have other use-cases that we haven't covered, feel free to contact us!
Choose your platform to get started
--------------------------------------
FireSim supports many types of FPGAs and FPGA platforms! Click one of the following links to work through the getting started guide for your particular platform.
Background/Terminology
---------------------------
* :doc:`/Getting-Started-Guides/AWS-EC2-F1-Getting-Started/index`
* Status: ✅ All FireSim Features Supported.
.. figure:: img/firesim_env.png
:alt: FireSim Infrastructure Setup
* :doc:`/Getting-Started-Guides/On-Premises-FPGA-Getting-Started/Xilinx-Alveo-U250-FPGAs`
* Status: ✅ All FireSim Features Supported.
FireSim Infrastructure Diagram
* :doc:`/Getting-Started-Guides/On-Premises-FPGA-Getting-Started/Xilinx-Alveo-U280-FPGAs`
* Status: ✅ All FireSim Features Supported.
**FireSim Manager** (``firesim``)
This program (available on your path as ``firesim``
once we source necessary scripts) automates the work required to launch FPGA
builds and run simulations. Most users will only have to interact with the
manager most of the time. If you're familiar with tools like Vagrant or Docker, the ``firesim``
command is just like the ``vagrant`` and ``docker`` commands, but for FPGA simulators
instead of VMs/containers.
* :doc:`/Getting-Started-Guides/On-Premises-FPGA-Getting-Started/Xilinx-VCU118-FPGAs`
* Status: ✅ All FireSim Features Supported.
**Manager Instance**
This is the main host (ex. AWS EC2 instance or local machine) that you will
SSH-into and do work on. This is where you'll clone your copy of FireSim and
use the FireSim Manager to deploy builds/simulations from.
* :doc:`/Getting-Started-Guides/On-Premises-FPGA-Getting-Started/RHS-Research-Nitefury-II-FPGAs`
* Status: ✅ All FireSim Features Supported.
**Build Farm**
These are instances that are managed by the FireSim manager when you run FPGA builds.
The manager will automatically ship source for builds to these instances and
run the Verilog -> FPGA Image process on them.
* :doc:`Getting-Started-Guides/On-Premises-FPGA-Getting-Started/Xilinx-Vitis-FPGAs`
* Status: ⚠️ DMA-based Bridges Not Supported. The Vitis-based U250 flow is **not recommended** unless you have specific constraints that require using Vitis. Notably, the Vitis-based flow does not support DMA-based FireSim bridges (e.g., TracerV, Synthesizable Printfs, etc.), while the XDMA-based flows support all FireSim features, as shown above. If you're unsure, use the XDMA-based U250 flow instead: :doc:`/Getting-Started-Guides/On-Premises-FPGA-Getting-Started/Xilinx-Alveo-U250-FPGAs`.
**Run Farm**
These are a collection of instances that the manager
manages and deploys simulations onto. You can use multiple
Run Farms in parallel, to run multiple separate
simulations in parallel.
To disambiguate between the computers being simulated and the computers doing
the simulating, we also define:
**Target**
The design and environment under simulation. Generally, a
group of one or more multi-core RISC-V microprocessors with or without a network between them.
**Host**
The computers executing the FireSim simulation -- the **Run Farm** from above.
We frequently prefix words with these terms. For example, software can run
on the simulated RISC-V system (*target*-software) or on a host x86 machine (*host*-software).
**Golden Gate (MIDAS II)**
The FIRRTL compiler used by FireSim to convert target RTL into a decoupled
simulator. Formerly named MIDAS.
Get Started
-----------
FireSim supports many type of FPGAs and FPGA platforms!
Click one of the following links to get started with your particular platform.
.. warning:: If using a Xilinx Alveo U250 or U280, we recommend the FPGA-specific flows instead of the Xilinx Vitis flow.
* :doc:`/Getting-Started-Guides/AWS-EC2-F1-Tutorial/index`
* :doc:`/Getting-Started-Guides/On-Premises-FPGA-Tutorial/Xilinx-Alveo-U250-FPGAs`
* :doc:`/Getting-Started-Guides/On-Premises-FPGA-Tutorial/Xilinx-Alveo-U280-FPGAs`
* :doc:`Getting-Started-Guides/On-Premises-FPGA-Tutorial/Xilinx-Vitis-FPGAs`

View File

@ -10,45 +10,33 @@ Amazon S3 Setup
During the build process, the build system will need to upload a tar
file to Amazon S3 in order to complete the build process using Amazon's
backend scripts (which convert the Vivado-generated tar into an AFI).
The manager will create this bucket for you automatically, you just need
to specify a name.
The manager will create this bucket for you automatically.
So, choose a bucket name, e.g. ``firesim``. Bucket names must be
globally unique. If you choose one that's already taken, the manager
will notice and complain when you tell it to build an AFI. To set your
bucket name, open :gh-file-ref:`deploy/bit-builder-recipes/f1.yaml` in your editor and under the
particular recipe you plan to build, replace
Bucket names must be globally unique, so the default bucket name used by the
manager will be ``firesim-(YOUR_AWS_USERNAME)-(REGION)``. If the bucket name
that the manager tries to use is inaccessible to you (because someone else has
taken the same name), the manager will notice
and complain when you tell it to build an AFI.
.. code-block:: yaml
s3_bucket_name: firesim
with your own bucket name, e.g.:
.. code-block:: yaml
s3_bucket_name: firesim
.. Note:: This isn't necessary if you set the ``append_userid_region`` key/value pair to ``true``.
In the unlikely event that you need to change the bucket name from the aforementioned default,
you can edit the ``s3_bucket_name`` value in :gh-file-ref:`deploy/bit-builder-recipes/f1.yaml`
and set ``append_userid_region`` to ``false``.
Build Recipes
---------------
In the ``deploy/config_build.yaml`` file, you will notice that the ``builds_to_run``
section currently contains several lines, which
indicates to the build system that you want to run all of these builds in
parallel, with the parameters listed in the relevant section of the
``deploy/config_build_recipes.yaml`` file. Here you can set parameters of the simulated
system, and also select the type of instance on which the Vivado build will be
deployed. From our experimentation, there are diminishing returns using
anything above a ``z1d.2xlarge``, so we default to that. If you do wish to use a
different build instance type keep in mind that Vivado will consume in excess
of 32 GiB for large designs.
indicates to the build system that you want to run all of the listed builds in
parallel, with the parameters for each listed in the relevant section of the
``deploy/config_build_recipes.yaml`` file. In ``deploy/config_build_recipes.yaml``, you can set parameters of the simulated
system.
To start out, let's build a simple design, ``firesim_rocket_quadcore_no_nic_l2_llc4mb_ddr3``.
To start out, let's build a simple design, ``firesim_rocket_quadcore_no_nic_l2_llc4mb_ddr3``, which
is the same design we used a pre-built version of to run simulations in the earlier single-node simulation guide.
This is a design that has four cores, no nic, and uses the 4MB LLC + DDR3 memory model.
To do so, comment out all of the other build entries in ``deploy/config_build.yaml``, besides the one we want. So, you should
To do so, delete (or comment out) all of the other build recipe names listed in the ``builds_to_run`` section of ``deploy/config_build.yaml``, besides the one we want. So, you should
end up with something like this (a line beginning with a ``#`` is a comment):
.. code-block:: yaml
@ -59,6 +47,17 @@ end up with something like this (a line beginning with a ``#`` is a comment):
- firesim_rocket_quadcore_no_nic_l2_llc4mb_ddr3
Build Farm Instance Types
-------------------------------
FireSim will run Vivado for each build on its own ``z1d.2xlarge`` instance. You
can change the instance type used by modifying the ``instance_type`` value in :gh-file-ref:`deploy/build-farm-recipes/aws_ec2.yaml`.
From our experimentation, there are diminishing returns using
anything larger than a ``z1d.2xlarge``. If you do wish to use a
different build instance type, keep in mind that Vivado will consume in excess
of 32 GiB of DRAM for large designs.
Running a Build
----------------------
@ -68,7 +67,7 @@ Now, we can run a build like so:
firesim buildbitstream
This will run through the entire build process, taking the Chisel RTL
This will run through the entire build process, taking the Chisel (or Verilog) RTL
and producing an AFI/AGFI that runs on the FPGA. This whole process will
usually take a few hours. When the build
completes, you will see a directory in
@ -87,6 +86,13 @@ that should look something like this:
Build Completion Email
In addition to being included in the email, the manager will also print the
entry that can be added to ``config_hwdb.yaml`` so that the generated AGFI can
be used to run simulations. Note that on AWS, you will **not** have access to a
physical bitstream file. The final bitstream is stored in a backend managed by
AWS and the only piece of information we need to program the bitstream onto AWS
F1 FPGAs is the value of the ``agfi:`` key in the ``config_hwdb.yaml`` entry.
Now that you know how to generate your own FPGA image, you can modify the target-design
to add your own features, then build a FireSim-compatible FPGA image automatically!
To learn more advanced FireSim features, you can choose a link under the "Advanced

View File

@ -0,0 +1,15 @@
Background/Terminology
==============================
.. |manager_machine| replace:: **Manager Instance**
.. |build_farm_machine| replace:: **Build Farm Instances**
.. |run_farm_machine| replace:: **Run Farm Instances**
.. |mach_or_inst| replace:: Instance
.. |mach_or_inst_l| replace:: instances
.. |mach_details| replace:: a "vanilla" AWS EC2 instance without an FPGA attached
.. |mach_or_inst2| replace:: cloud instances
.. |simple_setup| replace:: Later parts of this guide will explain in further detail how each of these instance types is launched and managed.
.. include:: ../../Terminology-Template.rst

View File

@ -31,22 +31,37 @@ private key locally as ``firesim.pem``. You can use this key to access
all instances from your local machine. We will copy this file to our
manager instance later, so that the manager can also use it.
Check your EC2 Instance Limits
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Double Check your EC2 Instance Limits
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
AWS limits access to particular instance types for new/infrequently used
accounts to protect their infrastructure. You should make sure that your
account has access to ``f1.2xlarge``, ``f1.4xlarge``, ``f1.16xlarge``,
``m4.16xlarge``, and ``c5.4xlarge`` instances by looking at the "Limits" page
in the EC2 panel, which you can access
`here <https://console.aws.amazon.com/ec2/v2/home#Limits:>`__. The
values listed on this page represent the maximum number of any of these
accounts to protect their infrastructure. You can learn more about how
these limits/quotas work `here <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-on-demand-instances.html#ec2-on-demand-instances-limits>`__.
You should make sure that your
account has the ability to launch a sufficient number of instances to follow
this guide by looking at the "Service Quotas" page in the AWS Console, which you can access
`here <https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/>`__.
Be sure that the correct region is selected once you open this page.
The values listed on this page represent the maximum number vCPUs of any of these
instances that you can run at once, which will limit the size of
simulations (# of nodes) that you can run. If you need to increase your
limits, follow the instructions on the
:ref:`limitincrease` page.
To follow this guide, you need to be able to run one ``f1.2xlarge`` instance
and two ``c5.4xlarge`` instances.
simulations (e.g., number of parallel FPGAs) that you can run. If you need to
increase your limits, follow the instructions below.
To complete this guide, you need to have the following limits:
* ``Running On-Demand F instances``: 64 vCPUs.
* This is sufficient for 8 parallel FPGAs. Each 8 vCPUs = one FPGA.
* ``Running On-Demand Standard (A, C, D, H, I, M, R, T, Z) instances``: 24 vCPUs.
* This is sufficient for one ``c5.4xlarge`` manager instance and one ``z1d.2xlarge`` build farm instance.
If you have insufficient limits, follow the instructions on the :ref:`limitincrease` page.
Start a t2.nano instance to run the remaining configuration commands
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -70,6 +85,8 @@ Launch a ``t2.nano`` by following these instructions:
settings)
6. Click on the instance ID and note the instance's public IP address.
.. _run-scripts-t2:
Run scripts from the t2.nano
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -104,7 +121,7 @@ On this machine, run the following:
Within the prompt, you should specify the same region that you chose
above (one of ``us-east-1``, ``us-west-2``, ``eu-west-1``) and set the default
output format to ``json``. You will need to generate an AWS access key in the "Security Credentials" menu of your AWS settings (as instructed in https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey ). You can also learn more about the ``aws configure`` command on the following page: https://docs.aws.amazon.com/cli/latest/reference/configure/index.html
output format to ``json``. You will need to generate an AWS access key in the "Security Credentials" menu of your AWS settings (as instructed in https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey ). You should keep the AWS access key information in a safe place, so that you can refer to it again when setting up the manager instance. You can learn more about the ``aws configure`` command on the following page: https://docs.aws.amazon.com/cli/latest/reference/configure/index.html
Again on the ``t2.nano`` instance, do the following:

View File

@ -0,0 +1,54 @@
.. _first-time-aws:
First-time AWS User Setup
==============================
If you've never used AWS before and don't have an account, follow the instructions
below to get started.
Creating an AWS Account
-----------------------
First, you'll need an AWS account. Create one by going to
`aws.amazon.com <https://aws.amazon.com>`__ and clicking "Sign Up."
You'll want to create a personal account. You will have to give it a
credit card number.
.. _limitincrease:
Requesting Limit Increases
--------------------------
AWS limits access to particular instance types for new/infrequently used
accounts to protect their infrastructure. You can learn more about how
these limits/quotas work `here <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-on-demand-instances.html#ec2-on-demand-instances-limits>`__.
You should make sure that your
account has the ability to launch a sufficient number of instances to follow
this guide by looking at the "Service Quotas" page in the AWS Console, which you can access
`here <https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/>`__.
Be sure that the correct region is selected once you open this page.
The values listed on this page represent the maximum number vCPUs of any of these
instances that you can run at once, which will limit the size of
simulations (e.g., number of parallel FPGAs) that you can run. If you need to
increase your limits, follow the instructions below.
To complete this guide, you need to have the following limits:
* ``Running On-Demand F instances``: 64 vCPUs.
* This is sufficient for 8 parallel FPGAs. Each 8 vCPUs = one FPGA.
* ``Running On-Demand Standard (A, C, D, H, I, M, R, T, Z) instances``: 24 vCPUs.
* This is sufficient for one ``c5.4xlarge`` manager instance and one ``z1d.2xlarge`` build farm instance.
If you have insufficient limits, request a limit increase by following these steps:
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html#request-increase
In your request, enter the vCPU limits for the two instance classes shown above.
This process sometimes has a human in the loop, so you should submit it ASAP. At
this point, you should wait for the response to this request.
Hit Next below to continue.

View File

@ -12,7 +12,7 @@ Launching a "Manager Instance"
Now, we need to launch a "Manager Instance" that acts as a
"head" node that we will ``ssh`` or ``mosh`` into to work from.
Since we will deploy the heavy lifting to separate ``c5.4xlarge`` and
Since we will deploy the heavy lifting to separate ``z1d.2xlarge`` and
``f1`` instances later, the Manager Instance can be a relatively cheap instance.
In this guide, however, we will use a ``c5.4xlarge``,
running the AWS FPGA Developer AMI. (Be sure to subscribe to the AMI
@ -34,11 +34,15 @@ To launch a manager instance, follow these steps:
#. In the *Application and OS Images* search box, search for
``FPGA Developer AMI - 1.12.2-40257ab5-6688-4c95-97d1-e251a40fd1fc`` and
select the AMI that appears under the **Community AMIs** tab (there
should be only one). **DO NOT USE ANY OTHER VERSION.** For example, **do not** use `FPGA Developer AMI` from the *AWS Marketplace AMIs* tab, as you will likely get an incorrect version of the AMI.
If you find that there are no results for this search, you can try incrementing the last part of the version number in the search string, e.g., ``1.12.2 -> 1.12.3``. Other parts of the search term should be unchanged.
should be only one).
* If you find that there are no results for this search, you can try incrementing the last part of the **version number** (``Z`` in ``X.Y.Z``) in the search string, e.g., ``1.12.2 -> 1.12.3``. Other parts of the search string should be unchanged.
* **Do not** use `FPGA Developer AMI` from the *AWS Marketplace AMIs* tab, as you will likely get an incorrect version of the AMI.
#. In the *Instance Type* drop-down, select the instance type of
your choosing. A good choice is a ``c5.4xlarge`` (16 cores, 32 GiB) or a ``z1d.2xlarge`` (8 cores, 64 GiB).
#. In the *Key pair (login)* drop-down, select the ``firesim`` key pair we setup earlier.
your choosing. A good choice is a ``c5.4xlarge`` (16 cores, 32 GiB DRAM) or a ``z1d.2xlarge`` (8 cores, 64 GiB DRAM).
#. In the *Key pair (login)* drop-down, select the ``firesim`` key pair we set up earlier.
#. In the *Network settings* drop-down click *edit* and modify the following settings:
#. Under *VPC - required*, select the ``firesim`` VPC. Any subnet within the ``firesim`` VPC is fine.
@ -47,11 +51,11 @@ To launch a manager instance, follow these steps:
created for you earlier. Do **NOT** select the ``for-farms-only-firesim`` security group that might also be in the list (it is also fine if this group does not appear in your list).
#. In the *Configure storage* section, increase the size of the root
volume to at least 300GB. The default of 85GB can quickly become too small as
volume to at least 300GB. The default of 120GB can quickly become too small as
you accumulate large Vivado reports/outputs, large waveforms, XSim outputs,
and large root filesystems for simulations. You should remove the
small (5-8GB) secondary volume that is added by default.
#. In the *Advanced details* drop-down, we'll leave most settings unchanged. The exceptions being:
#. In the *Advanced details* drop-down, change the following:
#. Under *Termination protection*, select Enable. This adds a layer of
protection to prevent your manager instance from being terminated by
@ -62,7 +66,7 @@ To launch a manager instance, follow these steps:
.. include:: /../scripts/machine-launch-script.sh
:code: bash
When your instance boots, this will install a compatible set of all the dependencies needed to run FireSim on your instance using conda.
When your instance boots, this will install a compatible set of all the dependencies needed to run FireSim on your instance using Conda.
#. Double check your configuration. The most common misconfigurations that may require repeating this process include:
@ -110,7 +114,10 @@ In either case, ``ssh`` into your instance (e.g. ``ssh -i firesim.pem centos@YOU
machine launch script started
machine launch script completed
Once this line appears, exit and re-``ssh`` into the system. If you want
You can also view the live output of the installation process by running ``tail -f /tmp/machine-launchstatus.log``.
Once ``machine launch script completed`` appears in
``/tmp/machine-launchstatus``, exit and re-``ssh`` into the system. If you want
to use ``mosh``, ``mosh`` back into the system.
Key Setup, Part 2
@ -155,7 +162,7 @@ first time will take some time -- however each time after that should be instant
Also, if your ``firesim.pem`` key requires a passphrase, you will be asked for
it here and ``ssh-agent`` should cache it.
**Every time you login to your manager instance to use FireSim, you should ``cd`` into
**Every time you login to your manager instance to use FireSim, you should** ``cd`` **into
your firesim directory and source this file again.**
@ -170,10 +177,11 @@ through the rest of the FireSim setup process. To run it, do the following:
firesim managerinit --platform f1
This will first prompt you to setup AWS credentials on the instance, which allows
the manager to automatically manage build/simulation nodes. See
https://docs.aws.amazon.com/cli/latest/userguide/tutorial-ec2-ubuntu.html#configure-cli-launch-ec2
for more about these credentials. When prompted, you should specify the same
region that you chose above and set the default output format to ``json``.
the manager to automatically manage build/simulation nodes. You can use the same
AWS access key you created when running setup commands on the ``t2.nano``
instance earlier (in :ref:`run-scripts-t2`). When prompted, you should specify the same
region that you've been selecting thus far (one of ``us-east-1``, ``us-west-2``, or
``eu-west-1``) and set the default output format to ``json``.
Next, it will prompt you for an email address, which is used to
send email notifications upon FPGA build completion and optionally for

View File

@ -10,6 +10,7 @@ FireSim, as well as cloning/installing FireSim on your manager instance.
:maxdepth: 2
:caption: Initial Setup/Installation:
Background-Terminology
First-time-AWS-User-Setup
Configuring-Required-Infrastructure-in-Your-AWS-Account
Setting-up-your-Manager-Instance

View File

@ -10,23 +10,11 @@ This will require one ``f1.16xlarge`` (8 FPGA) instance.
Make sure you are ``ssh`` or ``mosh``'d into your manager instance and have sourced
``sourceme-manager.sh`` before running any of these commands.
Returning to a clean configuration
-------------------------------------
If you already ran the single-node tutorial, let's return to a clean FireSim
manager configuration by doing the following:
.. code-block:: bash
cd firesim/deploy
cp sample-backup-configs/sample_config_runtime.yaml config_runtime.yaml
Building target software
------------------------
If you already built target software during the single-node tutorial, you can
skip to the next part (Setting up the manager configuration). If you haven't followed the single-node tutorial,
If you already built target software during the single-node getting started guide, you can
skip to the next part (Setting up the manager configuration). If you haven't followed the single-node getting started guide,
continue with this section.
In these instructions, we'll assume that you want to boot the buildroot-based
@ -37,20 +25,22 @@ this like so:
.. code-block:: bash
cd firesim/sw/firesim-software
./init-submodules.sh
./marshal -v build br-base.json
This process will take about 10 to 15 minutes on a ``c5.4xlarge`` instance.
Once this is completed, you'll have the following files:
- ``firesim/sw/firesim-software/images/firechip/br-base/br-disk-bin`` - a bootloader + Linux
- ``firesim/sw/firesim-software/images/firechip/br-base/br-base-bin`` - a bootloader + Linux
kernel image for the nodes we will simulate.
- ``firesim/sw/firesim-software/images/firechip/br-base/br-disk.img`` - a disk image for
- ``firesim/sw/firesim-software/images/firechip/br-base/br-base.img`` - a disk image for
each the nodes we will simulate
These files will be used to form base images to either build more complicated
workloads (see the :ref:`defining-custom-workloads` section) or to copy around
for deploying.
Setting up the manager configuration
-------------------------------------
@ -66,68 +56,35 @@ you have not modified it):
:code: yaml
For the 8-node cluster simulation, the defaults in this file are close to what
we want but require slight modification. Let's outline the important parameters:
we want but require slight modification. Let's outline the important parameters
we need to change:
* ``f1.16xlarges:``: Change this parameter to ``1``. This tells the manager that we want to launch one ``f1.16xlarge`` when we call the ``launchrunfarm`` command.
* ``f1.2xlarges:``: Change this parameter to ``0``. This tells the manager to not launch any ``f1.2xlarge`` machines when we call the ``launchrunfarm`` command.
* ``topology:``: Change this parameter to ``example_8config``. This tells the manager to use the topology named ``example_8config`` which is defined in ``deploy/runtools/user_topology.py``. This topology simulates an 8-node cluster with one ToR switch.
* ``default_hw_config:`` Change this parameter to ``firesim_rocket_quadcore_nic_l2_llc4mb_ddr3``. This tells the manager that we want to simulate a quad-core Rocket Chip configuration with 512 KB of L2, 4 MB of L3 (LLC), 16 GB of DDR3, and a NIC, for each of the simulated nodes in the topology.
.. attention::
**[Advanced users] Simulating BOOM instead of Rocket Chip**: If you would like to simulate a single-core `BOOM <https://github.com/ucb-bar/riscv-boom>`__ as a target, set ``default_hw_config`` to ``firesim_boom_singlecore_nic_l2_llc4mb_ddr3``.
There are also some parameters that we won't need to change, but are worth highlighting:
* ``f1.16xlarges: 1``: Change this parameter. This tells the manager that we want to launch one ``f1.16xlarge`` when we call the ``launchrunfarm`` command.
* ``f1.4xlarges: 0``: Change this parameter. This tells the manager to not launch any ``f1.4xlarge`` machines when we call the ``launchrunfarm`` command.
* ``topology: example_8config``: This tells the manager to use the topology named ``example_8config`` which is defined in ``deploy/runtools/user_topology.py``. This topology simulates an 8-node cluster with one ToR switch.
* ``link_latency: 6405``: This models a network with 6405 cycles of link latency. Since we are modeling processors running at 3.2 Ghz, 1 cycle = 1/3.2 ns, so 6405 cycles is roughly 2 microseconds.
* ``switching_latency: 10``: This models switches with a minimum port-to-port latency of 10 cycles.
* ``net_bandwidth: 200``: This sets the bandwidth of the NICs to 200 Gbit/s. Currently you can set any integer value less than this without making hardware modifications.
* ``default_hw_config: firesim_rocket_quadcore_nic_l2_llc4mb_ddr3``: This tells the manager to use a quad-core Rocket Chip configuration with 512 KB of L2, 4 MB of L3 (LLC) and 16 GB of DDR3, with a NIC, for each of the simulated nodes in the topology.
You'll see other parameters here, like ``run_instance_market``,
``spot_interruption_behavior``, and ``spot_max_price``. If you're an experienced
AWS user, you can see what these do by looking at the
:ref:`manager-configuration-files` section. Otherwise, don't change them.
As in the single-node tutorial, we will leave the ``workload:`` mapping
unchanged here, since we do want to run the buildroot-based Linux on our
As in the single-node getting started guide, we will leave the ``workload:`` mapping
unchanged here, since we want to run the default buildroot-based Linux on our
simulated system. The ``terminate_on_completion`` feature is an advanced feature
that you can learn more about in the :ref:`manager-configuration-files`
section.
As a final sanity check, your ``config_runtime.yaml`` file should now look like this:
.. code-block:: yaml
run_farm:
base_recipe: run-farm-recipes/aws_ec2.yaml
recipe_arg_overrides:
run_farm_tag: mainrunfarm
always_expand_run_farm: true
launch_instances_timeout_minutes: 60
run_instance_market: ondemand
spot_interruption_behavior: terminate
spot_max_price: ondemand
default_simulation_dir: /home/centos
run_farm_hosts_to_use:
- f1.16xlarge: 1
- f1.4xlarge: 0
- f1.2xlarge: 0
- m4.16xlarge: 0
- z1d.3xlarge: 0
- z1d.6xlarge: 0
target_config:
topology: example_8config
no_net_num_nodes: 1
link_latency: 6405
switching_latency: 10
net_bandwidth: 200
profile_interval: -1
default_hw_config: firesim_rocket_quadcore_nic_l2_llc4mb_ddr3
plusarg_passthrough: ""
workload:
workload_name: linux-uniform.json
terminate_on_completion: no
suffix_tag: null
.. attention::
**[Advanced users] Simulating BOOM instead of Rocket Chip**: If you would like to simulate a single-core `BOOM <https://github.com/ucb-bar/riscv-boom>`__ as a target, set ``default_hw_config`` to ``firesim_boom_singlecore_nic_l2_llc4mb_ddr3``.
Launching a Simulation!
-----------------------------
@ -183,7 +140,7 @@ Setting up the simulation infrastructure
The manager will also take care of building and deploying all software
components necessary to run your simulation (including switches for the networked
case). The manager will also handle
flashing FPGAs. To tell the manager to setup our simulation infrastructure,
programming FPGAs. To tell the manager to set up our simulation infrastructure,
let's run:
.. code-block:: bash
@ -251,7 +208,7 @@ infrastructure necessary to run everything in our simulation.
So, let's launch our simulation!
Running a simulation!
Running the simulation
^^^^^^^^^^^^^^^^^^^^^^^^^
Finally, let's run our simulation! To do so, run:
@ -327,7 +284,7 @@ a live status page once simulations are kicked-off:
--------------------------------------------------------------------------------
In cycle-accurate networked mode, this will only exit when any ONE of the
In cycle-accurate networked mode, this will exit when any ONE of the
simulated nodes shuts down. So, let's let it run and open another ssh
connection to the manager instance. From there, ``cd`` into your firesim
directory again and ``source sourceme-manager.sh`` again to get our ssh key
@ -382,7 +339,7 @@ If you also ran the single-node no-nic simulation you'll notice a difference
in this boot output -- here, Linux sees the NIC and its assigned MAC address and
automatically brings up the ``eth0`` interface at boot.
Now, you can login to the system! The username is ``root``.
Now, you can login to the system! The username is ``root`` and there is no password.
At this point, you should be presented with a regular console,
where you can type commands into the simulation and run programs. For example:
@ -396,7 +353,7 @@ where you can type commands into the simulation and run programs. For example:
#
At this point, you can run workloads as you'd like. To finish off this tutorial,
At this point, you can run workloads as you'd like. To finish off this getting started guide,
let's poweroff the simulated system and see what the manager does. To do so,
in the console of the simulated system, run ``poweroff -f``:
@ -530,7 +487,7 @@ useful for running benchmarks automatically. Note that there is a directory for
each simulated node and each simulated switch in the cluster. The
:ref:`defining-custom-workloads` section describes this process in detail.
For now, let's wrap-up our tutorial by terminating the ``f1.16xlarge`` instance
For now, let's wrap-up our guide by terminating the ``f1.16xlarge`` instance
that we launched. To do so, run:
.. code-block:: bash
@ -575,5 +532,5 @@ responsible for ensuring that your instances are terminated appropriately.**
Congratulations on running a cluster FireSim simulation! At this point, you can
check-out some of the advanced features of FireSim in the sidebar to the left.
Or, hit next to continue to a tutorial that shows you how to build your own
Or, hit next to continue to a guide that shows you how to build your own
custom FPGA images.

View File

@ -16,7 +16,7 @@ Building target software
In these instructions, we'll assume that you want to boot Linux on your
simulated node. To do so, we'll need to build our FireSim-compatible RISC-V
Linux distro. For this tutorial, we will use a simple buildroot-based
Linux distro. For this guide, we will use a simple buildroot-based
distribution. You can do this like so:
.. code-block:: bash
@ -51,37 +51,28 @@ you have not modified it):
.. include:: DOCS_EXAMPLE_config_runtime.yaml
:code: yaml
We'll need to modify a couple of these lines.
We won't have to modify any of the defaults for this single-node simulation guide,
but let's walk through several of the key parts of the file.
First, let's tell the manager to use the correct numbers and types of instances.
You'll notice that in the ``run_farm`` mapping, the manager is configured to
launch a Run Farm named ``mainrunfarm`` (given by the ``run_farm_tag``. Notice that under ``run_farm_hosts_to_use`` no ``f1.16xlarge``\ s,
``m4.16xlarge``\ s, ``f1.4xlarge``\ s, or ``f1.2xlarge``\ s are used. The tag specified here allows the
manager to differentiate amongst many parallel run farms (each running
a workload) that you may be operating -- but more on that later.
First, let's see how the correct numbers and types of instances are specified to the manager:
Since we only want to simulate a single node, let's switch to using one
``f1.2xlarge``. To do so, change the ``run_farm_hosts_to_use`` sequence to the following:
* You'll notice first that in the ``run_farm`` mapping, the manager is
configured to launch a Run Farm named ``mainrunfarm`` (given by the
``run_farm_tag``). The tag specified here allows the manager to differentiate
amongst many parallel run farms (each running some workload on some target design) that you may be
operating. In this case, the default is fine since we're only running
a single run farm.
* Notice that under ``run_farm_hosts_to_use``, the only non-zero value is for ``f1.2xlarge``,
which should be set to ``1``. This is exactly what we'll need for this guide.
* You'll see other parameters in the ``run_farm`` mapping, like
``run_instance_market``, ``spot_interruption_behavior``, and
``spot_max_price``. If you're an experienced AWS user, you can see what these
do by looking at the :ref:`manager-configuration-files` section. Otherwise,
don't change them.
.. code-block:: yaml
run_farm_hosts_to_use:
- f1.16xlarge: 0
- f1.4xlarge: 0
- f1.2xlarge: 1
- m4.16xlarge: 0
- z1d.3xlarge: 0
- z1d.6xlarge: 0
- z1d.12xlarge: 0
You'll see other parameters in the ``run_farm`` mapping, like ``run_instance_market``,
``spot_interruption_behavior``, and ``spot_max_price``. If you're an experienced
AWS user, you can see what these do by looking at the
:ref:`manager-configuration-files` section. Otherwise, don't change them.
Now, let's verify that the ``target_config`` mapping will model the correct target design.
By default, it is set to model a single-node with no network.
It should look like the following:
Next, let's look at how the target design is specified to the manager. This is located
in the ``target_config`` section of ``firesim/deploy/config_runtime.yaml``, shown below
(with comments removed):
.. code-block:: yaml
@ -93,67 +84,47 @@ It should look like the following:
net_bandwidth: 200
profile_interval: -1
# This references a section from config_hwdb.yaml
# In homogeneous configurations, use this to set the hardware config deployed
# for all simulators
default_hw_config: firesim_rocket_quadcore_no_nic_l2_llc4mb_ddr3
plusarg_passthrough: ""
Note ``topology`` is set to
``no_net_config``, indicating that we do not want a network. Then,
``no_net_num_nodes`` is set to ``1``, indicating that we only want to simulate
one node. Lastly, the ``default_hw_config`` is
``firesim_rocket_quadcore_no_nic_l2_llc4mb_ddr3``. This uses a hardware configuration that does not
have a NIC. This hardware configuration models a Quad-core Rocket Chip with 4
MB of L2 cache and 16 GB of DDR3, and **no** network interface card.
We will leave the ``workload`` mapping unchanged here, since we do
want to run the buildroot-based Linux on our simulated system. The ``terminate_on_completion``
feature is an advanced feature that you can learn more about in the
:ref:`manager-configuration-files` section.
Here are some highlights of this section:
As a final sanity check, in the mappings we changed, the ``config_runtime.yaml`` file should now look like this:
.. code-block:: yaml
run_farm:
base_recipe: run-farm-recipes/aws_ec2.yaml
recipe_arg_overrides:
run_farm_tag: mainrunfarm
always_expand_run_farm: true
launch_instances_timeout_minutes: 60
run_instance_market: ondemand
spot_interruption_behavior: terminate
spot_max_price: ondemand
default_simulation_dir: /home/centos
run_farm_hosts_to_use:
- f1.16xlarge: 0
- f1.4xlarge: 0
- f1.2xlarge: 1
- m4.16xlarge: 0
- z1d.3xlarge: 0
- z1d.6xlarge: 0
target_config:
topology: no_net_config
no_net_num_nodes: 1
link_latency: 6405
switching_latency: 10
net_bandwidth: 200
profile_interval: -1
default_hw_config: firesim_rocket_quadcore_no_nic_l2_llc4mb_ddr3
plusarg_passthrough: ""
workload:
workload_name: linux-uniform.json
terminate_on_completion: no
suffix_tag: null
* ``topology`` is set to ``no_net_config``, indicating that we do not want a
network.
* ``no_net_num_nodes`` is set to ``1``, indicating that we only want to
simulate one node.
* ``default_hw_config`` is ``firesim_rocket_quadcore_no_nic_l2_llc4mb_ddr3``.
This references a pre-built, publically-available AWS FPGA Image that is
specified in ``firesim/deploy/config_hwdb.yaml``. This pre-built image
models a Quad-core Rocket Chip with 4 MB of L2 cache and 16 GB
of DDR3, and no network interface card.
.. attention::
**[Advanced users] Simulating BOOM instead of Rocket Chip**: If you would like to simulate a single-core `BOOM <https://github.com/ucb-bar/riscv-boom>`__ as a target, set ``default_hw_config`` to ``firesim_boom_singlecore_no_nic_l2_llc4mb_ddr3``.
Finally, let's take a look at the ``workload`` section, which defines the
target software that we'd like to run on the simulated target design. By
default, it should look like this:
.. code-block:: yaml
workload:
workload_name: linux-uniform.json
terminate_on_completion: no
suffix_tag: null
We'll also leave the ``workload`` mapping unchanged here, since we
want to run the specified buildroot-based Linux (``linux-uniform.json``) on our
simulated system. The ``terminate_on_completion`` feature is an advanced
feature that you can learn more about in the :ref:`manager-configuration-files`
section.
Launching a Simulation!
-----------------------------
@ -209,7 +180,7 @@ Setting up the simulation infrastructure
The manager will also take care of building and deploying all software
components necessary to run your simulation. The manager will also handle
flashing FPGAs. To tell the manager to setup our simulation infrastructure,
programming FPGAs. To tell the manager to set up our simulation infrastructure,
let's run:
.. code-block:: bash
@ -252,7 +223,7 @@ necessary to run a simulation.
So, let's launch our simulation!
Running a simulation!
Running the simulation
^^^^^^^^^^^^^^^^^^^^^^^^^
Finally, let's run our simulation! To do so, run:
@ -312,7 +283,7 @@ live status page:
This will only exit once all of the simulated nodes have shut down. So, let's let it
run and open another ssh connection to the manager instance. From there, ``cd`` into
your firesim directory again and ``source sourceme-manager.sh`` again to get
our ssh key setup. To access our simulated system, ssh into the IP address being
our ssh key set up. To access our simulated system, ssh into the IP address being
printed by the status page, **from your manager instance**. In our case, from
the above output, we see that our simulated system is running on the instance with
IP ``172.30.2.174``. So, run:
@ -358,7 +329,7 @@ with a Linux login prompt, like so:
You can ignore the messages about the network -- that is expected because we
are simulating a design without a NIC.
Now, you can login to the system! The username is ``root``.
Now, you can login to the system! The username is ``root`` and there is no password.
At this point, you should be presented with a regular console,
where you can type commands into the simulation and run programs. For example:
@ -372,7 +343,7 @@ where you can type commands into the simulation and run programs. For example:
#
At this point, you can run workloads as you'd like. To finish off this tutorial,
At this point, you can run workloads as you'd like. To finish off this guide,
let's poweroff the simulated system and see what the manager does. To do so,
in the console of the simulated system, run ``poweroff -f``:
@ -453,7 +424,7 @@ automatically copied back to our manager after we run a simulation, which is
useful for running benchmarks automatically. The
:ref:`defining-custom-workloads` section describes this process in detail.
For now, let's wrap-up our tutorial by terminating the ``f1.2xlarge`` instance
For now, let's wrap-up our guide by terminating the ``f1.2xlarge`` instance
that we launched. To do so, run:
.. code-block:: bash
@ -499,4 +470,4 @@ Congratulations on running your first FireSim simulation! At this point, you can
check-out some of the advanced features of FireSim in the sidebar to the left
(for example, we expect that many people will be interested in the ability to
automatically run the SPEC17 benchmarks: :ref:`spec-2017`), or you can continue
on with the cluster simulation tutorial.
on with the cluster simulation guide.

View File

@ -0,0 +1,44 @@
AWS EC2 F1 Getting Started Guide
=====================================
The getting started guides that follow this page will guide you through the complete flow for
getting an example FireSim simulation up and running using AWS EC2 F1. At the end of this
guide, you'll have a simulation that simulates a single quad-core Rocket
Chip-based node with a 4 MB last level cache, 16 GB DDR3, and no NIC. After
this, you can continue to a guide that shows you how to simulate
a globally-cycle-accurate cluster-scale FireSim simulation. The final guide
will show you how to build your own FPGA images with customized hardware.
After you complete these guides, you can look at the "Advanced Docs"
in the sidebar to the left.
Here's a high-level outline of what we'll be doing in our AWS EC2 F1 getting started guides:
#. **Initial Setup/Installation**
a. Background/Terminology: We will discuss some key terminology that will be used in
the rest of the guides.
#. First-time AWS User Setup: You can skip this if you already have an AWS
account/payment method set up.
#. Configuring required AWS resources in your account: This sets up the
appropriate VPCs/subnets/security groups required to run FireSim.
#. Setting up a "Manager Instance" from which you will coordinate building
and deploying simulations.
#. **Single-node simulation guide**: This guide walks you through the process of running one simulation on a Run Farm consisting of a single ``f1.2xlarge``, using our pre-built public FireSim AGFIs.
#. **Cluster simulation guide**: This guide walks you through the process of running an 8-node cluster simulation on a Run Farm consisting of one ``f1.16xlarge``, using our pre-built public FireSim AGFIs and switch models.
#. **Building your own hardware designs guide (Chisel to FPGA Image)**: This guide walks you through the full process of taking Rocket Chip RTL and any custom RTL plugged into Rocket Chip and producing a FireSim AGFI to plug into your simulations. This automatically runs Chisel elaboration, FAME-1 Transformation, and the Vivado FPGA flow.
Generally speaking, you only need to follow step 4 if you're modifying Chisel
RTL or changing non-runtime configurable hardware parameters.
.. toctree::
:maxdepth: 2
Initial-Setup/index
Running-Simulations/index
Building-a-FireSim-AFI

View File

@ -1,63 +0,0 @@
.. _first-time-aws:
First-time AWS User Setup
==============================
If you've never used AWS before and don't have an account, follow the instructions
below to get started.
Creating an AWS Account
-----------------------
First, you'll need an AWS account. Create one by going to
`aws.amazon.com <https://aws.amazon.com>`__ and clicking "Sign Up."
You'll want to create a personal account. You will have to give it a
credit card number.
AWS Credit at Berkeley
----------------------
If you're an internal user at Berkeley and affiliated with UCB-BAR or the RISE
Lab, see the `RISE Lab Wiki
<https://rise.cs.berkeley.edu/wiki/resources/aws>`__ for instructions on
getting access to the AWS credit pool. Otherwise, continue with the following section.
.. _limitincrease:
Requesting Limit Increases
--------------------------
In our experience, new AWS accounts do not have access to EC2 F1 instances by
default. In order to get access, you should file a limit increase
request. You can learn more about EC2 instance limits here: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-on-demand-instances.html#ec2-on-demand-instances-limits
To request a limit increase, follow these steps:
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html
You'll probably want to start out with the following request, depending on your existing limits:
.. code-block:: text
Limit Type: EC2 Instances
Region: US East (Northern Virginia)
Primary Instance Type: All F instances
Limit: Instance Limit
New limit value: 64
This limit of 64 vCPUs for F instances allows you to run one node on the ``f1.2xlarge`` or eight nodes on the
``f1.16xlarge``.
For the "Use Case Description", you should describe your project and write
something about hardware simulation and mention that information about the tool
you're using can be found at: https://fires.im
This process has a human in the loop, so you should submit it ASAP. At
this point, you should wait for the response to this request.
If you're at Berkeley/UCB-BAR, you also need to wait until your account has
been added to the RISE billing pool, otherwise your personal CC will be charged
for AWS usage.
Hit Next below to continue.

View File

@ -1,41 +0,0 @@
AWS EC2 F1 Getting Started
==========================
The tutorials that follow this page will guide you through the complete flow for
getting an example FireSim simulation up and running using AWS EC2 F1. At the end of this
tutorial, you'll have a simulation that simulates a single quad-core Rocket
Chip-based node with a 4 MB last level cache, 16 GB DDR3, and no NIC. After
this, you can continue to a tutorial that shows you how to simulate
a globally-cycle-accurate cluster-scale FireSim simulation. The final tutorial
will show you how to build your own FPGA images with customized hardware.
After you complete these tutorials, you can look at the "Advanced Docs"
in the sidebar to the left.
Here's a high-level outline of what we'll be doing in our AWS EC2 F1 tutorials:
#. **Initial Setup/Installation**
a. First-time AWS User Setup: You can skip this if you already have an AWS
account/payment method set up.
#. Configuring required AWS resources in your account: This sets up the
appropriate VPCs/subnets/security groups required to run FireSim.
#. Setting up a "Manager Instance" from which you will coordinate building
and deploying simulations.
#. **Single-node simulation tutorial**: This tutorial guides you through the process of running one simulation on a Run Farm consisting of a single ``f1.2xlarge``, using our pre-built public FireSim AGFIs.
#. **Cluster simulation tutorial**: This tutorial guides you through the process of running an 8-node cluster simulation on a Run Farm consisting of one ``f1.16xlarge``, using our pre-built public FireSim AGFIs and switch models.
#. **Building your own hardware designs tutorial (Chisel to FPGA Image)**: This tutorial guides you through the full process of taking Rocket Chip RTL and any custom RTL plugged into Rocket Chip and producing a FireSim AGFI to plug into your simulations. This automatically runs Chisel elaboration, FAME-1 Transformation, and the Vivado FPGA flow.
Generally speaking, you only need to follow step 4 if you're modifying Chisel
RTL or changing non-runtime configurable hardware parameters.
.. toctree::
:maxdepth: 2
Initial-Setup/index
Running-Simulations-Tutorial/index
Building-a-FireSim-AFI

View File

@ -0,0 +1,19 @@
.. |fpga_name| replace:: RHS Research Nitefury II
.. |hwdb_entry_name| replace:: ``nitefury_firesim_rocket_singlecore_no_nic``
.. |hwdb_entry_name_non_code| replace:: nitefury_firesim_rocket_singlecore_no_nic
.. |builder_name| replace:: Xilinx Vivado
.. |bit_builder_path| replace:: ``bit-builder-recipes/rhsresearch_nitefury_ii.yaml``
.. |vivado_with_version| replace:: Vivado 2022.1
.. |vivado_version_number_only| replace:: 2022.1
.. |vivado_default_install_path| replace:: ``/tools/Xilinx/Vivado/2022.1``
.. |board_package_install| replace:: No special board support package is required for the Nitefury II. Move on to the next step.
Building Your Own Hardware Designs
===================================================================
This section will guide you through building a |fpga_name| FPGA bitstream to run FireSim simulations.
.. include:: Xilinx-XDMA-Build-Farm-Setup-Template.rst
.. include:: Xilinx-All-Bitstream-Template.rst

View File

@ -0,0 +1,71 @@
Configuring a Build in the Manager
-------------------------------------
In the ``deploy/config_build.yaml`` file, you will notice that the ``builds_to_run``
section currently contains several lines, which
indicates to the build system that you want to run all of these "build recipes" in
parallel, with the parameters for each "build recipe" listed in the relevant section of the
``deploy/config_build_recipes.yaml`` file.
In this guide, we'll build the default FireSim design for the |fpga_name|, which is specified
by the |hwdb_entry_name| section in ``deploy/config_build_recipes.yaml``.
This was the same configuration used to build the pre-built bitstream that you used to run
simulations in the guide to running a simulation.
Looking at the |hwdb_entry_name| section in ``deploy/config_build_recipes.yaml``,
there are a few notable items:
* ``TARGET_CONFIG`` specifies that this configuration is a simple singlecore RISC-V Rocket with a single DRAM channel.
* ``bit_builder_recipe`` points to |bit_builder_path|, which is found in the :gh-file-ref:`deploy` directory and tells the FireSim build system how to build bitstreams for this FPGA.
Having looked at this entry, let's now set up the build in ``deploy/config_build.yaml``. First, we'll set up the ``build_farm`` mapping, which specifies the Build Farm Machines that are available to build FPGA bitstreams.
* ``base_recipe`` will map to ``build-farm-recipes/externally_provisioned.yaml``. This indicates to the FireSim manager that the machines used to run builds are existing machines that have been set up by the user, instead of cloud instances that are automatically provisioned.
* ``default_build_dir`` is the directory in which builds will run out of on your Build Farm Machines. Change the default ``null`` to a path where you would like temporary build data to be stored on your Build Farm Machines.
* ``build_farm_hosts`` is a section that contains a list of IP addresses or hostnames of machines in your Build Farm. By default, ``localhost`` is specified. If you are using a separate Build Farm Machine, you should replace this with the IP address or hostname of the Build Farm Machine on which you would like to run the build.
Having configured our Build Farm, let's specify the design we'd like to build. To do this, edit the ``builds_to_run`` section in ``deploy/config_build.yaml`` so that it looks like the following:
.. code-block:: text
:substitutions:
builds_to_run:
- |hwdb_entry_name_non_code|
In essence, you should delete or comment out all the other items in the ``builds_to_run`` section besides |hwdb_entry_name|.
Running the Build
----------------------
Now, we can run a build like so:
.. code-block:: bash
firesim buildbitstream
This will run through the entire build process, taking the Chisel (or Verilog) RTL
and producing a bitstream that runs on the |fpga_name| FPGA. This whole process will
usually take a few hours. When the build
completes, you will see a directory in
``deploy/results-build/``, named after your build parameter
settings, that contains all of the outputs of the |builder_name| build process.
Additionally, the manager will print out a path to a log file
that describes everything that happened, in-detail, during this run (this is a
good file to send us if you encounter problems).
The manager will also print an entry that can be added to ``config_hwdb.yaml`` so that the
bitstream can be used to run simulations. This entry will contain a ``bitstream_tar`` key whose
value is the path to the final generated bitstream file. You can share generated bitstreams
with others by sharing the file listed in ``bitstream_tar`` and the ``config_hwdb.yaml``
entry for it.
Now that you know how to generate your own FPGA image, you can modify the target-design
to add your own features, then build a FireSim-compatible FPGA image automatically!
This is the end of the Getting Started Guide. To learn more advanced FireSim
features, you can choose a link under the "Advanced Docs" section to the left.

View File

@ -0,0 +1,19 @@
.. |fpga_name| replace:: Xilinx Alveo U250
.. |hwdb_entry_name| replace:: ``alveo_u250_firesim_rocket_singlecore_no_nic``
.. |hwdb_entry_name_non_code| replace:: alveo_u250_firesim_rocket_singlecore_no_nic
.. |builder_name| replace:: Xilinx Vivado
.. |bit_builder_path| replace:: ``bit-builder-recipes/xilinx_alveo_u250.yaml``
.. |vivado_with_version| replace:: Vivado 2021.1
.. |vivado_version_number_only| replace:: 2021.1
.. |vivado_default_install_path| replace:: ``/tools/Xilinx/Vivado/2021.1``
.. |board_package_install| replace:: Download the ``au250`` board support package directory from https://github.com/Xilinx/open-nic-shell/tree/main/board_files/Xilinx and place the directory in ``/tools/Xilinx/Vivado/2021.1/data/xhub/boards/XilinxBoardStore/boards/Xilinx/``.
Building Your Own Hardware Designs
===================================================================
This section will guide you through building a |fpga_name| FPGA bitstream to run FireSim simulations.
.. include:: Xilinx-XDMA-Build-Farm-Setup-Template.rst
.. include:: Xilinx-All-Bitstream-Template.rst

View File

@ -0,0 +1,19 @@
.. |fpga_name| replace:: Xilinx Alveo U280
.. |hwdb_entry_name| replace:: ``alveo_u280_firesim_rocket_singlecore_no_nic``
.. |hwdb_entry_name_non_code| replace:: alveo_u280_firesim_rocket_singlecore_no_nic
.. |builder_name| replace:: Xilinx Vivado
.. |bit_builder_path| replace:: ``bit-builder-recipes/xilinx_alveo_u280.yaml``
.. |vivado_with_version| replace:: Vivado 2021.1
.. |vivado_version_number_only| replace:: 2021.1
.. |vivado_default_install_path| replace:: ``/tools/Xilinx/Vivado/2021.1``
.. |board_package_install| replace:: Download the ``au280`` board support package directory from https://github.com/Xilinx/open-nic-shell/tree/main/board_files/Xilinx and place the directory in ``/tools/Xilinx/Vivado/2021.1/data/xhub/boards/XilinxBoardStore/boards/Xilinx/``.
Building Your Own Hardware Designs
===================================================================
This section will guide you through building a |fpga_name| FPGA bitstream to run FireSim simulations.
.. include:: Xilinx-XDMA-Build-Farm-Setup-Template.rst
.. include:: Xilinx-All-Bitstream-Template.rst

View File

@ -0,0 +1,19 @@
.. |fpga_name| replace:: Xilinx VCU118
.. |hwdb_entry_name| replace:: ``xilinx_vcu118_firesim_rocket_singlecore_4GB_no_nic``
.. |hwdb_entry_name_non_code| replace:: xilinx_vcu118_firesim_rocket_singlecore_4GB_no_nic
.. |builder_name| replace:: Xilinx Vivado
.. |bit_builder_path| replace:: ``bit-builder-recipes/xilinx_vcu118.yaml``
.. |vivado_with_version| replace:: Vivado 2019.1
.. |vivado_version_number_only| replace:: 2019.1
.. |vivado_default_install_path| replace:: ``/tools/Xilinx/Vivado/2019.1``
.. |board_package_install| replace:: No special board support package is required for the VCU118. Move on to the next step.
Building Your Own Hardware Designs
===================================================================
This section will guide you through building a |fpga_name| FPGA bitstream to run FireSim simulations.
.. include:: Xilinx-XDMA-Build-Farm-Setup-Template.rst
.. include:: Xilinx-All-Bitstream-Template.rst

View File

@ -0,0 +1,20 @@
.. |fpga_name| replace:: Xilinx Vitis-enabled U250
.. |hwdb_entry_name| replace:: ``vitis_firesim_rocket_singlecore_no_nic``
.. |hwdb_entry_name_non_code| replace:: vitis_firesim_rocket_singlecore_no_nic
.. |builder_name| replace:: Xilinx Vitis
.. |bit_builder_path| replace:: ``bit-builder-recipes/vitis.yaml``
.. warning:: ⚠️ **We highly recommend using the XDMA-based U250 flow instead of this
Vitis-based flow. You can find the XDMA-based flow here:** :ref:`u250-standard-flow`.
The Vitis-based flow does not support DMA-based FireSim bridges (e.g.,
TracerV, Synthesizable Printfs, etc.), while the XDMA-based flows support
all FireSim features. If you're unsure, use the XDMA-based U250 flow
instead: :ref:`u250-standard-flow`
Building Your Own Hardware Designs
===================================================================
This section will guide you through building a |fpga_name| FPGA bitstream to run FireSim simulations.
.. include:: Xilinx-All-Bitstream-Template.rst

View File

@ -0,0 +1,72 @@
System Setup
----------------------------------
Here, we'll do some final one-time setup for your Build Farm Machines so that we
can build bitstreams for FireSim simulations automatically.
**These steps assume that you have already followed the earlier setup steps
required to run simulations.**
As noted earlier, it is highly recommended that you use Ubuntu 20.04 LTS as the
host operating system for all machine types in an on-premises setup, as this is
the OS recommended by Xilinx.
Also recall that we make a distinction between the Manager Machine, the Build
Farm Machine(s), and the Run Farm Machine(s). In a simple setup, these can
all be a single machine, in which case you should run the Build Farm Machine
setup steps below on your single machine.
1. Install Vivado for Builds
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**Machines:** Build Farm Machines.
Running builds for |fpga_name| in FireSim requires |vivado_with_version|.
Other versions are unlikely to work out-of-the-box.
On each Build Farm machine, do the following:
1. Install |vivado_with_version| from the `Xilinx Downloads Website <https://www.xilinx.com/support/download.html>`_. By default, Vivado will be installed to |vivado_default_install_path|. We recommend keeping this default. If you change it to something else, you will need to adjust the path in the rest of the setup steps.
2. Add the following to ``~/.bashrc`` so that ``vivado`` is available when ``ssh``-ing into the machine:
.. code-block:: bash
:substitutions:
source /tools/Xilinx/Vivado/|vivado_version_number_only|/settings64.sh
3. |board_package_install|
If you have multiple Build Farm Machines, you should repeat this process for each.
2. Verify Build Farm Machine environment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**Machines:** Manager Machine and Run Farm Machines
Finally, let's ensure that |vivado_with_version| is properly sourced in
your shell setup (i.e. ``.bashrc``) so that any shell on your Build Farm Machines
can use the corresponding programs. The environment variables should be
visible to any non-interactive shells that are spawned.
You can check this by running the following on the Manager Machine,
replacing ``BUILD_FARM_IP`` with ``localhost`` if your Build Farm machine
and Manager machine are the same machine, or replacing it with the Build Farm
machine's IP address if they are different machines.
.. code-block:: bash
ssh BUILD_FARM_IP printenv
Ensure that the output of the command shows that the |vivado_with_version| tools are
present in the printed environment variables (i.e., ``PATH`` and ``XILINX_VIVADO``).
If you have multiple Build Farm machines, you should repeat this process for
each Build Farm machine, replacing ``BUILD_FARM_IP`` with a different Build Farm Machine's
IP address.

View File

@ -0,0 +1,16 @@
.. |fpga_name| replace:: RHS Research Nitefury II
.. _fpga_name: https://rhsresearch.com/collections/rhs-public/products/nitefury-xilinx-artix-fpga-kit-in-nvme-ssd-form-factor-2280-key-m
.. |fpga_power_info| replace:: This step is not required for the Nitefury, since all power is delivered via M.2. or Thunderbolt.
.. |hwdb_entry_name| replace:: ``nitefury_firesim_rocket_singlecore_no_nic``
.. |platform_name| replace:: rhsresearch_nitefury_ii
.. |board_name| replace:: nitefury_ii
.. |tool_type| replace:: Xilinx Vivado
.. |tool_type_lab| replace:: Xilinx Vivado Lab
.. |example_var| replace:: ``XILINX_VIVADO``
.. |deploy_manager_code| replace:: ``RHSResearchNitefuryIIInstanceDeployManager``
.. |fpga_spi_part_number| replace:: ``s25fl256xxxxxx0-spi-x1_x2_x4``
.. |fpga_attach_prereq| replace:: into either an open M.2. slot on your machine or into an M.2. to Thunderbolt enclosure (then attach the enclosure to your system via a Thunderbolt cable). We have successfully used this enclosure: https://www.amazon.com/ORICO-Enclosure-Compatible-Thunderbolt-Type-C-M2V01/dp/B08R9DMFFT. Before permanently installing your Nitefury into your M.2. slot or enclosure, ensure that you have attached the ribbon cable that will be used for JTAG to the underside of the board (see step 4 below).
.. |jtag_help| replace:: JTAG. For the Nitefury, this requires attaching the 14-pin JTAG adapter included with the board to the board using the included ribbon cable, then attaching a USB to JTAG adapter such as the Digilent HS2: https://digilent.com/shop/jtag-hs2-programming-cable/.
.. |nitefury_patch_xdma| replace:: The directory you are now in contains the XDMA kernel module. For the Nitefury to work, we will need to make one modification to the driver. Find the line containing ``#define XDMA_ENGINE_XFER_MAX_DESC``. Change the value on this line from ``0x800`` to ``16``. Then, build and install the driver:
.. include:: Xilinx-XDMA-Template.rst

View File

@ -0,0 +1,16 @@
.. |fpga_name| replace:: Xilinx Alveo U250
.. _fpga_name: https://www.xilinx.com/products/boards-and-kits/alveo/u250.html
.. |fpga_power_info| replace:: For the U250, this is usually PCIe power coming directly from the system's PSU.
.. |hwdb_entry_name| replace:: ``alveo_u250_firesim_rocket_singlecore_no_nic``
.. |platform_name| replace:: xilinx_alveo_u250
.. |board_name| replace:: au250
.. |tool_type| replace:: Xilinx Vivado
.. |tool_type_lab| replace:: Xilinx Vivado Lab
.. |example_var| replace:: ``XILINX_VIVADO``
.. |deploy_manager_code| replace:: ``XilinxAlveoU250InstanceDeployManager``
.. |fpga_spi_part_number| replace:: ``mt25qu01g-spi-x1_x2_x4``
.. |fpga_attach_prereq| replace:: into an open PCIe slot in the machine.
.. |jtag_help| replace:: JTAG.
.. |nitefury_patch_xdma| replace:: The directory you are now in contains the XDMA kernel module. Now, let's build and install it:
.. include:: Xilinx-XDMA-Template.rst

View File

@ -0,0 +1,16 @@
.. |fpga_name| replace:: Xilinx Alveo U280
.. _fpga_name: https://www.xilinx.com/products/boards-and-kits/alveo/u280.html
.. |fpga_power_info| replace:: For the U280, this is usually PCIe power coming directly from the system's PSU.
.. |hwdb_entry_name| replace:: ``alveo_u280_firesim_rocket_singlecore_no_nic``
.. |platform_name| replace:: xilinx_alveo_u280
.. |board_name| replace:: au280
.. |tool_type| replace:: Xilinx Vivado
.. |tool_type_lab| replace:: Xilinx Vivado Lab
.. |example_var| replace:: ``XILINX_VIVADO``
.. |deploy_manager_code| replace:: ``XilinxAlveoU280InstanceDeployManager``
.. |fpga_spi_part_number| replace:: ``mt25qu01g-spi-x1_x2_x4``
.. |fpga_attach_prereq| replace:: into an open PCIe slot in the machine.
.. |jtag_help| replace:: JTAG.
.. |nitefury_patch_xdma| replace:: The directory you are now in contains the XDMA kernel module. Now, let's build and install it:
.. include:: Xilinx-XDMA-Template.rst

View File

@ -0,0 +1,16 @@
.. |fpga_name| replace:: Xilinx VCU118
.. _fpga_name: https://www.xilinx.com/products/boards-and-kits/vcu118.html
.. |fpga_power_info| replace:: For the VCU118, this is usually ATX 4-pin peripheral power (**NOT** PCIe power) from the system's PSU, attached to the FPGA via the "ATX Power Supply Adapter Cable" that comes with the VCU118.
.. |hwdb_entry_name| replace:: ``xilinx_vcu118_firesim_rocket_singlecore_4GB_no_nic``
.. |platform_name| replace:: xilinx_vcu118
.. |board_name| replace:: vcu118
.. |tool_type| replace:: Xilinx Vivado
.. |tool_type_lab| replace:: Xilinx Vivado Lab
.. |example_var| replace:: ``XILINX_VIVADO``
.. |deploy_manager_code| replace:: ``XilinxVCU118InstanceDeployManager``
.. |fpga_spi_part_number| replace:: ``mt25qu01g-spi-x1_x2_x4``
.. |fpga_attach_prereq| replace:: into an open PCIe slot in the machine. Also, ensure that the SW16 switches on the FPGA are set to ``0101`` to enable QSPI flashing over JTAG (i.e., ``position 1 = 0``, ``position 2 = 1``, ``position 3 = 0``, and ``position 4 = 1``. Having the switch set to the side of the position label indicates 0.)
.. |jtag_help| replace:: JTAG.
.. |nitefury_patch_xdma| replace:: The directory you are now in contains the XDMA kernel module. Now, let's build and install it:
.. include:: Xilinx-XDMA-Template.rst

View File

@ -1,7 +1,82 @@
Setting up your On-Premises Machine
===================================
.. |fpga_name| replace:: Xilinx Vitis-enabled U250
.. |vitis_version| replace:: 2022.1
.. |vitis_link| replace:: https://www.xilinx.com/products/design-tools/vitis/vitis-whats-new.html#20221
.. |platform_name| replace:: vitis
.. |tool_type| replace:: Xilinx XRT/Vitis
.. |example_var| replace:: XILINX_XRT
This tutorial is setting up a single node cluster (i.e. running FPGA bitstream builds and simulations on a single machine) for FireSim use.
.. |manager_machine| replace:: **Manager Machine**
.. |build_farm_machine| replace:: **Build Farm Machines**
.. |run_farm_machine| replace:: **Run Farm Machines**
Initial Setup/Installation
==============================
.. warning:: ⚠️ **We highly recommend using the XDMA-based U250 flow instead of this
Vitis-based flow. You can find the XDMA-based flow here:** :ref:`u250-standard-flow`.
The Vitis-based flow does not support DMA-based FireSim bridges (e.g.,
TracerV, Synthesizable Printfs, etc.), while the XDMA-based flows support
all FireSim features. If you're unsure, use the XDMA-based U250 flow
instead: :ref:`u250-standard-flow`
Background/Terminology
--------------------------
.. |mach_or_inst| replace:: Machine
.. |mach_or_inst_l| replace:: machines
.. |mach_details| replace:: your local desktop or server
.. |mach_or_inst2| replace:: local machines
.. |simple_setup| replace:: In the simplest setup, a single host machine (e.g. your desktop) can serve the function of all three of these: as the manager machine, the build farm machine (assuming Vivado is installed), and the run farm machine (assuming an FPGA is attached).
.. include:: ../../Terminology-Template.rst
FPGA and Tool Setup
------------------------------
Requirements and Installations
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
We require a base machine that is able to support a |fpga_name| and running Xilinx Vitis.
For the purposes of this guide, we assume you are running with a |fpga_name|.
Please refer to the minimum system requirements given in the following link: https://docs.xilinx.com/r/en-US/ug1301-getting-started-guide-alveo-accelerator-cards/Minimum-System-Requirements.
``sudo`` access is not needed for the machine except for when the |fpga_name| and corresponding software is installed.
Next, install the |fpga_name| as indicated: https://docs.xilinx.com/r/en-US/ug1301-getting-started-guide-alveo-accelerator-cards/Card-Installation-Procedures
We require the following programs/packages installed from the Xilinx website in addition to a physical |fpga_name| installation:
* Xilinx Vitis |vitis_version|
* Installation link: |vitis_link|
* Xilinx XRT and |fpga_name| board package (corresponding with Vitis |vitis_version|)
* Ensure you complete the "Installing the Deployment Software" and "Card Bring-Up and Validation" sections in the following link: https://docs.xilinx.com/r/en-US/ug1301-getting-started-guide-alveo-accelerator-cards/Installing-the-Deployment-Software
Setup Validation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
After installing the |fpga_name| using the Xilinx instructions and installing the specific versions of Vitis/XRT, let's verify that the |fpga_name| can be used for emulations.
Ensure that you can run the following XRT commands without errors:
.. code-block:: bash
:substitutions:
xbutil examine # obtain the BDF associated with your installed |fpga_name|
xbutil validate --device <CARD_BDF_INSTALLED> --verbose
The ``xbutil validate`` command runs simple tests to ensure that the FPGA can be properly flashed with a bitstream by using XRT.
.. Warning:: Anytime the host computer is rebooted you may need to re-run parts of the setup process (i.e. re-flash the shell).
Before continuing to FireSim simulations after a host computer reboot, ensure that the previously mentioned ``xbutil`` command is successful.
Now you're ready to continue with other FireSim setup!
Setting up your On-Premises Machine
--------------------------------------
This guide will walk you through setting up a single node cluster (i.e. running FPGA bitstream builds and simulations on a single machine) for FireSim use.
This single machine will serve as the "Manager Machine" that acts as a "head" node that all work will be completed on.
Finally, ensure that the |tool_type| tools are sourced in your shell setup (i.e. ``.bashrc`` and or ``.bash_profile``) so that any shell can use the corresponding programs.
@ -13,7 +88,7 @@ You can check this by ensuring that the output of the following command shows th
ssh localhost printenv
Other Miscellaneous Setup
-------------------------
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Additionally, you should be able to run ``ssh localhost`` without needing a password.
The FireSim manager program runs all commands by ``ssh``-ing into a BuildFarm/RunFarm machine given an IP address then running the command.
@ -51,7 +126,7 @@ Most likely you will need to follow the instructions `here <https://askubuntu.co
does not reside on an NFS mount.
Setting up the FireSim Repo
---------------------------
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
We're finally ready to fetch FireSim's sources. Run:
@ -140,16 +215,16 @@ path. Sourcing this the first time will take some time -- however each time afte
your FireSim directory and source this file again with the argument given.**
Final Environment Check
-----------------------
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Finally, lets verify that the environment variables are correctly setup for the tutorial. Run:
Finally, let's verify that the environment variables are correctly set up for the rest of this guide. Run:
.. code-block:: bash
echo $PATH
You should see that both the |tool_type| tools are located in the ``PATH`` are are **after**
the conda environment path. Next run:
the Conda environment path. Next run:
.. code-block:: bash
@ -168,7 +243,7 @@ Inspect that both the ``PATH`` and ``LD_LIBRARY_PATH`` are setup similarly to ru
locally (without ``ssh localhost``).
Completing Setup Using the Manager
----------------------------------
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The FireSim manager contains a command that will finish the rest of the FireSim setup process.
To run it, do the following:

View File

@ -0,0 +1,361 @@
Initial Setup/Installation
==============================
Background/Terminology
--------------------------
.. |manager_machine| replace:: **Manager Machine**
.. |build_farm_machine| replace:: **Build Farm Machines**
.. |run_farm_machine| replace:: **Run Farm Machines**
.. |mach_or_inst| replace:: Machine
.. |mach_or_inst_l| replace:: machines
.. |mach_details| replace:: your local desktop or server
.. |mach_or_inst2| replace:: local machines
.. |simple_setup| replace:: In the simplest setup, a single host machine (e.g. your desktop) can serve the function of all three of these: as the manager machine, the build farm machine (assuming Vivado is installed), and the run farm machine (assuming an FPGA is attached).
.. include:: ../../Terminology-Template.rst
System Setup
----------------------------------
The below sections outline what you need to install to run FireSim on each
machine type in a FireSim cluster. Note that the below three machine types
can all map to a single machine in your setup; in this case, you should follow
all the installation instructions on your single machine, without duplication
(i.e., don't re-run a step on the same machine if it is required on multiple
machine types).
.. warning::
**We highly recommend using Ubuntu 20.04 LTS as the host operating system for
all machine types in an on-premises setup, as this is the OS recommended by
Xilinx.**
1. Fix default ``.bashrc``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**Machines:** Manager Machine, Run Farm Machines, Build Farm Machines.
We need various parts of the ``~/.bashrc`` file to execute even in non-interactive mode.
To do so, edit your ``~/.bashrc`` file so that the following section is removed:
.. code-block:: bash
# If not running interactively, don't do anything
case $- in
*i*) ;;
*) return;;
esac
2. Enable password-less sudo
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**Machines:** Manager Machine and Run Farm Machines.
Enable passwordless sudo by running ``sudo visudo``, then adding
the following line at the end of the file, replacing ``YOUR_USERNAME_HERE``
with your actual username on the machine:
.. code-block:: bash
YOUR_USERNAME_HERE ALL=(ALL) NOPASSWD:ALL
Once you have done so, reboot the machines
and confirm that you are able to run ``sudo true`` without being
prompted for a password.
3. Install Vivado Lab and Cable Drivers
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**Machines:** Run Farm Machines.
Go to the `Xilinx Downloads Website <https://www.xilinx.com/support/download.html>`_ and download ``Vivado 2023.1: Lab Edition - Linux``.
Extract the downloaded ``.tar.gz`` file, then:
.. code-block:: bash
cd [EXTRACTED VIVADO LAB DIRECTORY]
sudo ./installLibs.sh
sudo ./xsetup --batch Install --agree XilinxEULA,3rdPartyEULA --edition "Vivado Lab Edition (Standalone)"
This will have installed Vivado Lab to ``/tools/Xilinx/Vivado_Lab/2023.1/``.
For ease of use, add the following to the end of your ``~/.bashrc``:
.. code-block:: bash
source /tools/Xilinx/Vivado_Lab/2023.1/settings64.sh
Then, open a new terminal or source your ``~/.bashrc``.
Next, install the cable drivers like so:
.. code-block:: bash
cd /tools/Xilinx/Vivado_Lab/2023.1/data/xicom/cable_drivers/lin64/install_script/install_drivers/
sudo ./install_drivers
4. Install the Xilinx XDMA and XVSEC drivers
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**Machines:** Run Farm Machines.
First, run the following to clone the XDMA kernel module source:
.. code-block:: bash
cd ~/ # or any directory you would like to work from
git clone https://github.com/Xilinx/dma_ip_drivers
cd dma_ip_drivers
git checkout 0e8d321
cd XDMA/linux-kernel/xdma
|nitefury_patch_xdma|
.. code-block:: bash
sudo make install
Now, test that the module can be inserted:
.. code-block:: bash
sudo insmod /lib/modules/$(uname -r)/extra/xdma.ko poll_mode=1
lsmod | grep -i xdma
The second command above should have produced output indicating that the XDMA
driver is loaded.
Next, we will do the same for the XVSEC driver, which is pulled from a separate
repository due to kernel version incompatibility:
.. code-block:: bash
cd ~/ # or any directory you would like to work from
git clone https://github.com/paulmnt/dma_ip_drivers dma_ip_drivers_xvsec
cd dma_ip_drivers_xvsec
git checkout 302856a
cd XVSEC/linux-kernel/
make clean all
sudo make install
Now, test that the module can be inserted:
.. code-block:: bash
sudo modprobe xvsec
lsmod | grep -i xvsec
The second command above should have produced output indicating that the XVSEC
driver is loaded.
Also, make sure you get output for the following (usually, ``/usr/local/sbin/xvsecctl``):
.. code-block:: bash
which xvsecctl
5. Install your FPGA(s)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**Machines:** Run Farm Machines.
Now, let's attach your |fpga_name|_ FPGA(s) to your Run Farm Machines:
1. Poweroff your machine.
2. Insert your |fpga_name|_ FPGA |fpga_attach_prereq|
3. Attach any additional power cables between the FPGA and the host machine. |fpga_power_info|
4. Attach the USB cable between the FPGA and the host machine for |jtag_help|
5. Boot the machine.
6. Obtain an existing bitstream tar file for your FPGA by opening the ``bitstream_tar`` URL listed
under |hwdb_entry_name| in the following file: :gh-file-ref:`deploy/sample-backup-configs/sample_config_hwdb.yaml`.
7. Extract the ``.tar.gz`` file to a known location. Inside, you will find
three files; the one we are currently interested in will be called
``firesim.mcs``. Note the full path of this ``firesim.mcs`` file for the
next step.
8. Open Vivado Lab and click "Open Hardware Manager". Then click "Open Target" and "Auto connect".
9. Right-click on your FPGA and click "Add Configuration Memory Device". For a |fpga_name|_, choose |fpga_spi_part_number|
as the Configuration Memory Part. Click "OK" when prompted to program the configuration memory device.
10. For Configuration file, choose the ``firesim.mcs`` file from step 7.
11. Uncheck "Verify" and click OK.
12. When programming the configuration memory device is completed, power off your machine fully (i.e., the FPGA should completely lose power).
13. Cold-boot the machine. A cold boot is required for the FPGA to be successfully re-programmed from its flash.
14. Once the machine has booted, run the following to ensure that your FPGA is set up properly:
.. code-block:: bash
lspci -vvv -d 10ee:903f
If successful, this should show an entry with Xilinx as the manufacturer and
two memory regions. There should be one entry
for each FPGA you've added to the Run Farm Machine.
6. Install sshd
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**Machines:** Manager Machine, Run Farm Machines, and Build Farm Machines
On Ubuntu, install ``openssh-server`` like so:
.. code-block:: bash
sudo apt install openssh-server
7. Set up SSH Keys
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**Machines:** Manager Machine.
On the manager machine, generate a keypair that you will use to ssh from the
manager machine into the manager machine (ssh localhost), run farm machines,
and build farm machines:
.. code-block:: bash
cd ~/.ssh
ssh-keygen -t ed25519 -C "firesim.pem" -f firesim.pem
[create passphrase]
Next, add this key to the ``authorized_keys`` file on the manager machine:
.. code-block:: bash
cd ~/.ssh
cat firesim.pem.pub >> authorized_keys
chmod 0600 authorized_keys
You should also copy this public key into the ``~/.ssh/authorized_keys`` files
on all of your Run Farm and Build Farm Machines.
Returning to the Manager Machine, let's set up an ``ssh-agent``:
.. code-block:: bash
cd ~/.ssh
ssh-agent -s > AGENT_VARS
source AGENT_VARS
ssh-add firesim.pem
If you reboot your machine (or otherwise kill the ``ssh-agent``), you
will need to re-run the above four commands before using FireSim.
If you open a new terminal (and ``ssh-agent`` is already running),
you can simply run ``source ~/.ssh/AGENT_VARS``.
Finally, confirm that you can now ``ssh localhost`` and ssh into your Run Farm
and Build Farm Machines without being prompted for a passphrase.
8. Install Guestmount
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**Machines:** Manager Machine and Run Farm Machines
Next, install the ``guestmount`` program:
.. code-block:: bash
sudo chmod +r /boot/vmlinuz-*
sudo apt install libguestfs-tools
sudo chmod +r /boot/vmlinuz-*
This is needed by a variety of FireSim steps that mount disk images in order to copy in/out results of simulations out of the images.
Using ``guestmount`` instead of the standard mount commands allows for users to perform these operations without requiring ``sudo`` (after this initial installation).
Let's double check that ``guestmount`` is functioning correctly on your system. To do so, we'll generate a dummy filesystem image:
.. code-block:: bash
cd ~/ # or any scratch area
mkdir sysroot-testing
cd sysroot-testing
mkdir sysroot
dd if=/dev/urandom of=sysroot/myfile bs=1024 count=1024
virt-make-fs --format=qcow2 --type=ext2 sysroot sysroot.qcow2
Ensure that this command completed without producing an error and that the output file ``sysroot.qcow2`` exists.
Assuming all of this completed successfully (i.e., no error from ``virt-make-fs``), you can delete the ``sysroot-testing`` directory,
since we will not need it any longer.
.. warning:: Due to prior issues we've seen with ``guestmount``, ensure that your FireSim repository
does not reside on an NFS mount.
9. Check Hard File Limit
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**Machine:** Manager Machine
Check the output of the following command:
.. code-block:: bash
ulimit -Hn
If the result is greater than or equal to 16384, you can continue on to "Setting up the FireSim Repo". Otherwise, run:
.. code-block:: bash
echo "* hard nofile 16384" | sudo tee --append /etc/security/limits.conf
Then, reboot your machine.
10. Verify Run Farm Machine environment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**Machines:** Manager Machine and Run Farm Machines
Finally, let's ensure that the |tool_type_lab| tools are properly sourced in
your shell setup (i.e. ``.bashrc``) so that any shell on your Run Farm Machines
can use the corresponding programs. The environment variables should be
visible to any non-interactive shells that are spawned.
You can check this by running the following on the Manager Machine,
replacing ``RUN_FARM_IP`` with ``localhost`` if your Run Farm machine
and Manager machine are the same machine, or replacing it with the Run Farm
machine's IP address if they are different machines.
.. code-block:: bash
ssh RUN_FARM_IP printenv
Ensure that the output of the command shows that the |tool_type_lab| tools are
present in the printed environment variables (i.e., ``PATH``).
If you have multiple Run Farm machines, you should repeat this process for
each Run Farm machine, replacing ``RUN_FARM_IP`` with a different Run Farm Machine's
IP address.
Congratulations! We've now set up your machine/cluster to run simulations. Click Next to continue with the guide.

View File

@ -0,0 +1,14 @@
|fpga_name| Getting Started Guide
============================================================================================
The getting started guides that follow this page will walk you through the complete (|flow_name|) flow for
getting an example FireSim simulation up and running using an on-premises |fpga_name_short|_ FPGA,
from scratch.
First, we'll set up your environment, then run a simulation of a single RISC-V Rocket-based
SoC booting Linux, using a pre-built bitstream. Next, we'll show you how to build your own FPGA
bitstreams for a custom hardware design. After you complete these guides, you can
look at the "Advanced Docs" in the sidebar to the left.
Here's a high-level outline of what we'll be doing in this guide:

View File

@ -0,0 +1,15 @@
.. |fpga_name| replace:: RHS Research Nitefury II XDMA-based
.. |fpga_name_short| replace:: RHS Research Nitefury II
.. _fpga_name_short: https://rhsresearch.com/collections/rhs-public/products/nitefury-xilinx-artix-fpga-kit-in-nvme-ssd-form-factor-2280-key-m
.. |flow_name| replace:: XDMA-based
.. |build_type| replace:: Xilinx Vivado
.. include:: Intro-Template.rst
.. toctree::
:maxdepth: 3
Initial-Setup/RHS-Research-Nitefury-II
Repo-Setup/RHS-Research-Nitefury-II
Running-Simulations/Running-Single-Node-Simulation-RHS-Research-Nitefury-II
Building-a-FireSim-Bitstream/RHS-Research-Nitefury-II

View File

@ -0,0 +1,11 @@
.. |fpga_name| replace:: RHS Research Nitefury II
.. _fpga_name: https://rhsresearch.com/collections/rhs-public/products/nitefury-xilinx-artix-fpga-kit-in-nvme-ssd-form-factor-2280-key-m
.. |hwdb_entry_name| replace:: ``nitefury_firesim_rocket_singlecore_no_nic``
.. |platform_name| replace:: rhsresearch_nitefury_ii
.. |board_name| replace:: nitefury_ii
.. |tool_type| replace:: Xilinx Vivado
.. |tool_type_lab| replace:: Xilinx Vivado Lab
.. |example_var| replace:: ``XILINX_VIVADO``
.. |deploy_manager_code| replace:: ``RHSResearchNitefuryIIInstanceDeployManager``
.. include:: Xilinx-XDMA-Template.rst

View File

@ -1,7 +1,11 @@
.. |fpga_name| replace:: Xilinx Alveo U250
.. _fpga_name: https://www.xilinx.com/products/boards-and-kits/alveo/u250.html
.. |hwdb_entry_name| replace:: ``alveo_u250_firesim_rocket_singlecore_no_nic``
.. |platform_name| replace:: xilinx_alveo_u250
.. |board_name| replace:: au250
.. |tool_type| replace:: Xilinx Vivado
.. |tool_type_lab| replace:: Xilinx Vivado Lab
.. |example_var| replace:: ``XILINX_VIVADO``
.. |deploy_manager_code| replace:: ``XilinxAlveoU250InstanceDeployManager``
.. include:: Xilinx-Alveo-Template-Part2.rst
.. include:: Xilinx-XDMA-Template.rst

View File

@ -1,7 +1,11 @@
.. |fpga_name| replace:: Xilinx Alveo U280
.. _fpga_name: https://www.xilinx.com/products/boards-and-kits/alveo/u280.html
.. |hwdb_entry_name| replace:: ``alveo_u280_firesim_rocket_singlecore_no_nic``
.. |platform_name| replace:: xilinx_alveo_u280
.. |board_name| replace:: au280
.. |tool_type| replace:: Xilinx Vivado
.. |tool_type_lab| replace:: Xilinx Vivado Lab
.. |example_var| replace:: ``XILINX_VIVADO``
.. |deploy_manager_code| replace:: ``XilinxAlveoU280InstanceDeployManager``
.. include:: Xilinx-Alveo-Template-Part2.rst
.. include:: Xilinx-XDMA-Template.rst

View File

@ -0,0 +1,11 @@
.. |fpga_name| replace:: Xilinx VCU118
.. _fpga_name: https://www.xilinx.com/products/boards-and-kits/vcu118.html
.. |hwdb_entry_name| replace:: ``xilinx_vcu118_firesim_rocket_singlecore_4GB_no_nic``
.. |platform_name| replace:: xilinx_vcu118
.. |board_name| replace:: vcu118
.. |tool_type| replace:: Xilinx Vivado
.. |tool_type_lab| replace:: Xilinx Vivado Lab
.. |example_var| replace:: ``XILINX_VIVADO``
.. |deploy_manager_code| replace:: ``XilinxVCU118InstanceDeployManager``
.. include:: Xilinx-XDMA-Template.rst

View File

@ -0,0 +1,157 @@
FireSim Repo Setup
==============================
.. |manager_machine| replace:: **Manager Machine**
.. |build_farm_machine| replace:: **Build Farm Machines**
.. |run_farm_machine| replace:: **Run Farm Machines**
.. |mach_or_inst| replace:: Machine
.. |mach_or_inst_l| replace:: machines
.. |mach_details| replace:: your local desktop or server
.. |mach_or_inst2| replace:: local machines
.. |simple_setup| replace:: In the simplest setup, a single host machine (e.g. your desktop) can serve the function of all three of these: as the manager machine, the build farm machine (assuming Vivado is installed), and the run farm machine (assuming an FPGA is attached).
Next, we'll clone FireSim on your Manager Machine and run a few final setup steps
using scripts in the repo.
Setting up the FireSim Repo
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**Machine:** From this point forward, run everything on your Manager Machine, unless otherwise instructed.
We're finally ready to fetch FireSim's sources. This should be done on your Manager Machine. Run:
.. code-block:: bash
:substitutions:
git clone https://github.com/firesim/firesim
cd firesim
# checkout latest official firesim release
# note: this may not be the latest release if the documentation version != "stable"
git checkout |overall_version|
Next, we will bootstrap the machine by installing Miniforge Conda, our software package manager, and set up a default software environment using Conda.
You should select a location where you want Conda to be installed. This can be an existing Miniforge Conda
install, or a directory (that does not exist) where you would like Conda to be installed.
Replace ``REPLACE_ME_USER_CONDA_LOCATION`` in the command below with your chosen path and run it:
.. code-block:: bash
./scripts/machine-launch-script.sh --prefix REPLACE_ME_USER_CONDA_LOCATION
Among other setup steps, the script will install Miniforge Conda (https://github.com/conda-forge/miniforge) and create a default environment called ``firesim``.
When prompted, you should allow the Conda installer to modify your ``~/.bashrc`` to automatically place you in the Conda environment when opening a new shell.
.. warning::
**Once the** ``machine-launch-script.sh`` **completes, ensure that you log out of the
machine / exit out of the terminal so that the** ``.bashrc`` **modifications can apply**.
After re-logging back into the machine, you should be in the ``firesim`` Conda environment.
Verify this by running:
.. code-block:: bash
conda env list
If you are not in the ``firesim`` environment and the environment exists, you can run the following to "activate" or enter the environment:
.. code-block:: bash
conda activate firesim
Next, return to your clone of the FireSim repo and run:
.. code-block:: bash
./build-setup.sh
The ``build-setup.sh`` script will validate that you are on a tagged branch,
otherwise it will prompt for confirmation. Then, it will automatically
initialize submodules and install the RISC-V tools and other dependencies.
Once ``build-setup.sh`` completes, run:
.. code-block:: bash
source sourceme-manager.sh --skip-ssh-setup
This will perform various environment setup steps, such as adding the RISC-V tools to your
path. Sourcing this the first time will take some time -- however each subsequent sourcing should be instantaneous.
.. warning::
**Every time you want to use FireSim, you should** ``cd`` **into
your FireSim directory and source** ``sourceme-manager.sh`` **again with the arguments shown above.**
Initializing FireSim Config Files
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The FireSim manager contains a command that will automatically provide a fresh
set of configuration files for a given platform.
To run it, do the following:
.. code-block:: bash
:substitutions:
firesim managerinit --platform |platform_name|
This will produce several initial configuration files, which we will edit in the next
section.
Configuring the FireSim manager to understand your Run Farm Machine setup
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
As our final setup step, we will edit FireSim's configuration files so that the
manager understands our Run Farm machine setup and the set of FPGAs attached to
each Run Farm machine.
Inside the cloned FireSim repo, open up the ``deploy/config_runtime.yaml`` file and set the following keys to the indicated values:
* ``default_simulation_dir`` should point to a temporary simulation directory of your choice on your Run Farm Machines. This is the directory that simulations will run out of.
* ``run_farm_hosts_to_use`` should be a list of ``- IP-address: machine_spec`` pairs, one pair for each of your Run Farm Machines. ``IP-address`` should be the IP address or hostname
of the system (that the Manager Machine can use to ssh into the Run Farm Machine) and the ``machine_spec`` should be a value from ``run_farm_host_specs`` in :gh-file-ref:`deploy/run-farm-recipes/externally_provisioned.yaml`. Each spec describes the number of FPGAs attached to a system and other properties about the system.
Here are two examples of how this could be configured:
**Example 1**: Your Run Farm has a single machine with one FPGA attached and this machine is also your Manager Machine:
.. code-block:: yaml
...
run_farm_hosts_to_use:
- localhost: one_fpgas_spec
...
**Example 2**: You have two Run Farm Machines (separate from your Manager Machine). The Run Farm Machines are accessible from your manager machine with the hostnames ``firesim-runner1.berkeley.edu`` and ``firesim-runner2.berkeley.edu``, each with eight FPGAs attached.
.. code-block:: yaml
...
run_farm_hosts_to_use:
- firesim-runner1.berkeley.edu: eight_fpgas_spec
- firesim-runner2.berkeley.edu: eight_fpgas_spec
...
* ``default_hw_config`` should be |hwdb_entry_name|
Then, run the following command so that FireSim can generate a mapping from the FPGA ID used for JTAG programming to the PCIe ID used to run simulations. If you ever change the physical layout of the machine (e.g., which PCIe slot the FPGAs are attached to), you will need to re-run this command.
.. code-block:: bash
:substitutions:
firesim enumeratefpgas
This will generate a database file in ``/opt/firesim-db.json`` on each Run Farm Machine that has this mapping.
Now you're ready to run your first FireSim simulation! Hit Next to continue with the guide.

View File

@ -0,0 +1,241 @@
Building and Deploying simulation infrastructure to the Run Farm Machines
----------------------------------------------------------------------------------
The manager automates the process of building and deploying all
components necessary to run your simulation on the Run Farm, including
programming FPGAs. To tell the manager to setup all of our simulation
infrastructure, run the following:
.. code-block:: bash
firesim infrasetup
For a complete run, you should expect output like the following:
.. code-block:: bash
$ firesim infrasetup
FireSim Manager. Docs: https://docs.fires.im
Running: infrasetup
Building FPGA software driver.
...
[localhost] Checking if host instance is up...
[localhost] Copying FPGA simulation infrastructure for slot: 0.
[localhost] Clearing all FPGA Slots.
The full log of this run is:
.../firesim/deploy/logs/2023-03-06--01-22-46-infrasetup-35ZP4WUOX8KUYBF3.log
Many of these tasks will take several minutes, especially on a clean copy of
the repo. The console output here contains the "user-friendly" version of the
output. If you want to see detailed progress as it happens, ``tail -f`` the
latest logfile in ``firesim/deploy/logs/``.
At this point, our single Run Farm machine has all the infrastructure
necessary to run a simulation, so let's launch our simulation!
Running the simulation
-----------------------------------------
Finally, let's run our simulation! To do so, run:
.. code-block:: bash
firesim runworkload
This command boots up a simulation and prints out the live status of the simulated
nodes every 10s. When you do this, you will initially see output like:
.. code-block:: bash
$ firesim runworkload
FireSim Manager. Docs: https://docs.fires.im
Running: runworkload
Creating the directory: .../firesim/deploy/results-workload/2023-03-06--01-25-34-linux-uniform/
[localhost] Checking if host instance is up...
[localhost] Starting FPGA simulation for slot: 0.
If you don't look quickly, you might miss it, since it will get replaced with a
live status page:
.. code-block:: text
FireSim Simulation Status @ 2018-05-19 00:38:56.062737
--------------------------------------------------------------------------------
This workload's output is located in:
.../firesim/deploy/results-workload/2018-05-19--00-38-52-linux-uniform/
This run's log is located in:
.../firesim/deploy/logs/2018-05-19--00-38-52-runworkload-JS5IGTV166X169DZ.log
This status will update every 10s.
--------------------------------------------------------------------------------
Instances
--------------------------------------------------------------------------------
Hostname/IP: localhost | Terminated: False
--------------------------------------------------------------------------------
Simulated Switches
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
Simulated Nodes/Jobs
--------------------------------------------------------------------------------
Hostname/IP: localhost | Job: linux-uniform0 | Sim running: True
--------------------------------------------------------------------------------
Summary
--------------------------------------------------------------------------------
1/1 instances are still running.
1/1 simulations are still running.
--------------------------------------------------------------------------------
This will only exit once all of the simulated nodes have powered off. So, let's let it
run and open another terminal on the manager machine. From there, ``cd`` into
your FireSim directory again and ``source sourceme-manager.sh --skip-ssh-setup``.
Next, let's ``ssh`` into the Run Farm machine. If your Run Farm and Manager Machines are
the same, replace ``RUN_FARM_IP_OR_HOSTNAME`` with ``localhost``, otherwise replace it
with your Run Farm Machine's IP or hostname.
.. code-block:: bash
source ~/.ssh/AGENT_VARS
ssh RUN_FARM_IP_OR_HOSTNAME
Next, we can directly attach to the console of the simulated system using ``screen``, run:
.. code-block:: bash
screen -r fsim0
Voila! You should now see Linux booting on the simulated system and then be prompted
with a Linux login prompt, like so:
.. code-block:: bash
[truncated Linux boot output]
[ 0.020000] VFS: Mounted root (ext2 filesystem) on device 254:0.
[ 0.020000] devtmpfs: mounted
[ 0.020000] Freeing unused kernel memory: 140K
[ 0.020000] This architecture does not have kernel memory protection.
mount: mounting sysfs on /sys failed: No such device
Starting logging: OK
Starting mdev...
mdev: /sys/dev: No such file or directory
modprobe: can't change directory to '/lib/modules': No such file or directory
Initializing random number generator... done.
Starting network: ip: SIOCGIFFLAGS: No such device
ip: can't find device 'eth0'
FAIL
Starting dropbear sshd: OK
Welcome to Buildroot
buildroot login:
You can ignore the messages about the network -- that is expected because we
are simulating a design without a NIC.
Now, you can login to the system! The username is ``root`` and there is no password.
At this point, you should be presented with a regular console,
where you can type commands into the simulation and run programs. For example:
.. code-block:: bash
Welcome to Buildroot
buildroot login: root
Password:
# uname -a
Linux buildroot 4.15.0-rc6-31580-g9c3074b5c2cd #1 SMP Thu May 17 22:28:35 UTC 2018 riscv64 GNU/Linux
#
At this point, you can run workloads as you'd like. To finish off this guide,
let's power off the simulated system and see what the manager does. To do so,
in the console of the simulated system, run ``poweroff -f``:
.. code-block:: bash
Welcome to Buildroot
buildroot login: root
Password:
# uname -a
Linux buildroot 4.15.0-rc6-31580-g9c3074b5c2cd #1 SMP Thu May 17 22:28:35 UTC 2018 riscv64 GNU/Linux
# poweroff -f
You should see output like the following from the simulation console:
.. code-block:: bash
# poweroff -f
[ 12.456000] reboot: Power down
Power off
time elapsed: 468.8 s, simulation speed = 88.50 MHz
*** PASSED *** after 41492621244 cycles
Runs 41492621244 cycles
[PASS] FireSim Test
SEED: 1526690334
Script done, file is uartlog
[screen is terminating]
You'll also notice that the manager polling loop exited! You'll see output like this
from the manager:
.. code-block:: text
FireSim Simulation Status @ 2018-05-19 00:46:50.075885
--------------------------------------------------------------------------------
This workload's output is located in:
.../firesim/deploy/results-workload/2018-05-19--00-38-52-linux-uniform/
This run's log is located in:
.../firesim/deploy/logs/2018-05-19--00-38-52-runworkload-JS5IGTV166X169DZ.log
This status will update every 10s.
--------------------------------------------------------------------------------
Instances
--------------------------------------------------------------------------------
Hostname/IP: 172.30.2.174 | Terminated: False
--------------------------------------------------------------------------------
Simulated Switches
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
Simulated Nodes/Jobs
--------------------------------------------------------------------------------
Hostname/IP: 172.30.2.174 | Job: linux-uniform0 | Sim running: False
--------------------------------------------------------------------------------
Summary
--------------------------------------------------------------------------------
1/1 instances are still running.
0/1 simulations are still running.
--------------------------------------------------------------------------------
FireSim Simulation Exited Successfully. See results in:
.../firesim/deploy/results-workload/2018-05-19--00-38-52-linux-uniform/
The full log of this run is:
.../firesim/deploy/logs/2018-05-19--00-38-52-runworkload-JS5IGTV166X169DZ.log
If you take a look at the workload output directory given in the manager output (in this case, ``.../firesim/deploy/results-workload/2018-05-19--00-38-52-linux-uniform/``), you'll see the following:
.. code-block:: bash
$ ls -la firesim/deploy/results-workload/2018-05-19--00-38-52-linux-uniform/*/*
-rw-rw-r-- 1 centos centos 797 May 19 00:46 linux-uniform0/memory_stats.csv
-rw-rw-r-- 1 centos centos 125 May 19 00:46 linux-uniform0/os-release
-rw-rw-r-- 1 centos centos 7316 May 19 00:46 linux-uniform0/uartlog
What are these files? They are specified to the manager in a configuration file
(:gh-file-ref:`deploy/workloads/linux-uniform.json`) as files that we want
automatically copied back from the Run Farm Machine into the ``results-workload`` directory on our manager machine, which is
useful for running benchmarks automatically. The
:ref:`defining-custom-workloads` section describes this process in detail.
Congratulations on running your first FireSim simulation! At this point, you can
check-out some of the advanced features of FireSim in the sidebar to the left.
For example, we expect that many people will be interested in the ability to
automatically run the SPEC17 benchmarks: :ref:`spec-2017`.
Click Next if you'd like to continue on to building your own bitstreams.

View File

@ -0,0 +1,39 @@
Running a Single Node Simulation
===================================
Now that we've completed all the basic setup steps, it's time to run
a simulation! In this section, we will simulate a single target node, for which
we will use a single |fpga_type|.
**Make sure you have sourced** ``sourceme-manager.sh --skip-ssh-setup`` **before running any of these commands.**
Building target software
------------------------
In this guide, we'll boot Linux on our
simulated node. To do so, we'll need to build our RISC-V SoC-compatible
Linux distro. For this guide, we will use a simple buildroot-based
distribution. We can build the Linux distribution like so:
.. code-block:: bash
# assumes you already cd'd into your firesim repo
# and sourced sourceme-manager.sh
#
# then:
cd sw/firesim-software
./init-submodules.sh
./marshal -v build br-base.json
Once this is completed, you'll have the following files:
- ``YOUR_FIRESIM_REPO/sw/firesim-software/images/firechip/br-base/br-base-bin`` - a bootloader + Linux
kernel image for the RISC-V SoC we will simulate.
- ``YOUR_FIRESIM_REPO/sw/firesim-software/images/firechip/br-base/br-base.img`` - a disk image for
the RISC-V SoC we will simulate
These files will be used to form base images to either build more complicated
workloads (see the :ref:`defining-custom-workloads` section) or directly as a
basic, interactive Linux distribution.

View File

@ -0,0 +1,7 @@
.. |fpga_type| replace:: RHS Research Nitefury II
.. |deploy_manager| replace:: RHSResearchNitefuryIIInstanceDeployManager
.. |deploy_manager_code| replace:: ``RHSResearchNitefuryIIInstanceDeployManager``
.. |runner| replace:: Xilinx Vivado
.. |hwdb_entry_name| replace:: ``nitefury_firesim_rocket_singlecore_no_nic``
.. include:: Running-Single-Node-Simulation-Template.rst

View File

@ -0,0 +1,45 @@
.. include:: Running-Sims-Top-Template.rst
Setting up the manager configuration
-------------------------------------
All runtime configuration options for the manager are located in
``YOUR_FIRESIM_REPO/deploy/config_runtime.yaml``. In this guide, we will explain only the
parts of this file necessary for our purposes. You can find full descriptions of
all of the parameters in the :ref:`manager-configuration-files` section.
Based on the changes we made earlier, this file will already have everything set
correctly to run a simulation.
Below we'll highlight a few of these lines to explain what is happening:
* At the top, you'll notice the ``run_farm`` mapping, which describes and specifies the machines to run simulations on.
* By default, we'll be using a ``base_recipe`` of ``run-farm-recipes/externally_provisioned.yaml``, which means that our
Run Farm machines are pre-configured, and do not require the manager to dynamically launch/terminate them (e.g., as we
would do for cloud instances).
* The ``default_platform`` has automatically been set for our FPGA board, to |deploy_manager_code|.
* The ``default_simulation_dir`` is the directory on the Run Farm Machines where simulations will run out of. The default is likely fine, but you can change it to any directory you have access to on the Run Farm machines.
* ``run_farm_hosts_to_use`` should be a list of ``- IP-address: machine_spec`` pairs,
one pair for each of your Run Farm Machines. ``IP-address`` should be the IP address
or hostname of the system (that the Manager Machine can use to ssh into the Run Farm
Machine) and the ``machine_spec`` should be a value from ``run_farm_host_specs``
in :gh-file-ref:`deploy/run-farm-recipes/externally_provisioned.yaml`. Each spec
describes the number of FPGAs attached to a system and other properties about the system. We configured this already in the previous step.
* The ``target_config`` section describes the system that we'd like to simulate.
* ``topology: no_net_config`` indicates that we're running simulations with no network between them.
* ``no_net_num_nodes: 1`` indicates that we'll be a simulation of a single SoC
* The ``default_hw_config`` will be set to a pre-built design (for our FPGA, |hwdb_entry_name|) with a single RISC-V Rocket core. This is usually not set by default, but we already set it in the previous step.
* The ``workload`` section describes the workload that we'd like to run on our simulated systems. In this case, we will leave it as the default, which will boot Linux on all SoCs in the simulation.
.. include:: Running-Sims-Bottom-Template.rst

View File

@ -0,0 +1,96 @@
.. include:: Running-Sims-Top-Template.rst
Setting up the manager configuration
------------------------------------------------------------
All runtime configuration options for the manager are set in a file called
``firesim/deploy/config_runtime.yaml``. In this guide, we will explain only the
parts of this file necessary for our purposes. You can find full descriptions of
all of the parameters in the :ref:`manager-configuration-files` section.
If you open up this file, you will see the following default config (assuming
you have not modified it):
.. include:: DOCS_EXAMPLE_config_runtime.yaml
:code: yaml
We'll need to modify a couple of these lines.
First, let's tell the manager to use the single |fpga_type| FPGA.
You'll notice that in the ``run_farm`` mapping which describes and specifies the machines to run simulations on.
First notice that the ``base_recipe`` maps to ``run-farm-recipes/externally_provisioned.yaml``.
This indicates to the FireSim manager that the machines allocated to run simulations will be provided by the user through IP addresses
instead of automatically launched and allocated (e.g. launching instances on-demand in AWS).
Let's modify the ``default_platform`` to be |deploy_manager_code| so that we can launch simulations using |runner|.
Next, modify the ``default_simulation_dir`` to a directory that you want to store temporary simulation collateral to.
When running simulations, this directory is used to store any temporary files that the simulator creates (e.g. a uartlog emitted by a Linux simulation).
Next, lets modify the ``run_farm_hosts_to_use`` mapping.
This maps IP addresses (i.e. ``localhost``) to a description/specification of the simulation machine.
In this case, we have only one |fpga_type| FPGA so we will change the description of ``localhost`` to ``one_fpga_spec``.
Now, let's verify that the ``target_config`` mapping will model the correct target design.
By default, it is set to model a single-node with no network.
It should look like the following:
.. code-block:: yaml
target_config:
topology: no_net_config
no_net_num_nodes: 1
link_latency: 6405
switching_latency: 10
net_bandwidth: 200
profile_interval: -1
# This references a section from config_hwdb.yaml
# In homogeneous configurations, use this to set the hardware config deployed
# for all simulators
default_hw_config: firesim_rocket_quadcore_no_nic_l2_llc4mb_ddr3
Note ``topology`` is set to
``no_net_config``, indicating that we do not want a network. Then,
``no_net_num_nodes`` is set to ``1``, indicating that we only want to simulate
one node. Lastly, the ``default_hw_config`` is
``firesim_rocket_quadcore_no_nic_l2_llc4mb_ddr3``.
Let's modify the ``default_hw_config`` (the target design) to "|hwdb_entry_name|".
This new hardware configuration does not
have a NIC and is pre-built for the |fpga_type| FPGA.
This hardware configuration models a Single-core Rocket Chip SoC and **no** network interface card.
We will leave the ``workload`` mapping unchanged here, since we do
want to run the buildroot-based Linux on our simulated system. The ``terminate_on_completion``
feature is an advanced feature that you can learn more about in the
:ref:`manager-configuration-files` section.
As a final sanity check, in the mappings we changed, the ``config_runtime.yaml`` file should now look like this (with ``PATH_TO_SIMULATION_AREA`` replaced with your simulation collateral temporary directory):
.. code-block:: text
:substitutions:
run_farm:
base_recipe: run-farm-recipes/externally_provisioned.yaml
recipe_arg_overrides:
default_platform: |deploy_manager|
default_simulation_dir: <PATH_TO_SIMULATION_AREA>
run_farm_hosts_to_use:
- localhost: one_fpga_spec
target_config:
topology: no_net_config
no_net_num_nodes: 1
link_latency: 6405
switching_latency: 10
net_bandwidth: 200
profile_interval: -1
default_hw_config: |hwdb_entry_name|
plusarg_passthrough: ""
workload:
workload_name: linux-uniform.json
terminate_on_completion: no
suffix_tag: null
.. include:: Running-Sims-Bottom-Template.rst

View File

@ -2,7 +2,6 @@
.. |deploy_manager| replace:: XilinxAlveoU250InstanceDeployManager
.. |deploy_manager_code| replace:: ``XilinxAlveoU250InstanceDeployManager``
.. |runner| replace:: Xilinx Vivado
.. |hwdb_entry| replace:: alveo_u250_firesim_rocket_singlecore_no_nic
.. |quintuplet| replace:: xilinx_alveo_u250-firesim-FireSim-FireSimRocketConfig-BaseXilinxAlveoConfig
.. |hwdb_entry_name| replace:: alveo_u250_firesim_rocket_singlecore_no_nic
.. include:: Running-Single-Node-Simulation-Template.rst

View File

@ -2,7 +2,6 @@
.. |deploy_manager| replace:: XilinxAlveoU280InstanceDeployManager
.. |deploy_manager_code| replace:: ``XilinxAlveoU280InstanceDeployManager``
.. |runner| replace:: Xilinx Vivado
.. |hwdb_entry| replace:: alveo_u280_firesim_rocket_singlecore_no_nic
.. |quintuplet| replace:: xilinx_alveo_u280-firesim-FireSim-FireSimRocketConfig-BaseXilinxAlveoConfig
.. |hwdb_entry_name| replace:: alveo_u280_firesim_rocket_singlecore_no_nic
.. include:: Running-Single-Node-Simulation-Template.rst

View File

@ -0,0 +1,7 @@
.. |fpga_type| replace:: Xilinx VCU118
.. |deploy_manager| replace:: XilinxVCU118InstanceDeployManager
.. |deploy_manager_code| replace:: ``XilinxVCU118InstanceDeployManager``
.. |runner| replace:: Xilinx Vivado
.. |hwdb_entry_name| replace:: ``xilinx_vcu118_firesim_rocket_singlecore_4GB_no_nic``
.. include:: Running-Single-Node-Simulation-Template.rst

View File

@ -2,13 +2,16 @@
.. |deploy_manager| replace:: VitisInstanceDeployManager
.. |deploy_manager_code| replace:: ``VitisInstanceDeployManager``
.. |runner| replace:: Xilinx XRT/Vitis
.. |hwdb_entry| replace:: vitis_firesim_rocket_singlecore_no_nic
.. |quintuplet| replace:: vitis-firesim-FireSim-FireSimRocketConfig-BaseVitisConfig
.. |hwdb_entry_name| replace:: vitis_firesim_rocket_singlecore_no_nic
.. include:: Running-Single-Node-Simulation-Template.rst
.. warning:: ⚠️ **We highly recommend using the XDMA-based U250 flow instead of this
Vitis-based flow. You can find the XDMA-based flow here:** :ref:`u250-standard-flow`.
The Vitis-based flow does not support DMA-based FireSim bridges (e.g.,
TracerV, Synthesizable Printfs, etc.), while the XDMA-based flows support
all FireSim features. If you're unsure, use the XDMA-based U250 flow
instead: :ref:`u250-standard-flow`
.. warning:: Currently, FireSim simulations with bridges that use the PCI-E DMA interface are not supported (i.e. TracerV, NIC, Dromajo, Printfs) with |fpga_type| FPGAs.
This will be added in a future FireSim release.
.. include:: Running-Single-Node-Simulation-Vitis-Template.rst
.. warning:: In some cases, simulation may fail because you might need to update the |fpga_type| DRAM offset that is currently hard coded in both the FireSim |runner| driver code and platform shim.
To verify this, run ``xclbinutil --info --input <YOUR_XCL_BIN>``, obtain the ``bank0`` ``MEM_DDR4`` offset.

View File

@ -0,0 +1,17 @@
.. |fpga_name| replace:: Xilinx Alveo U250 XDMA-based
.. |fpga_name_short| replace:: Xilinx Alveo U250
.. _fpga_name_short: https://www.xilinx.com/products/boards-and-kits/alveo/u250.html
.. |flow_name| replace:: XDMA-based
.. |build_type| replace:: Xilinx Vivado
.. _u250-standard-flow:
.. include:: Intro-Template.rst
.. toctree::
:maxdepth: 3
Initial-Setup/Xilinx-Alveo-U250
Repo-Setup/Xilinx-Alveo-U250
Running-Simulations/Running-Single-Node-Simulation-Xilinx-Alveo-U250
Building-a-FireSim-Bitstream/Xilinx-Alveo-U250

View File

@ -0,0 +1,15 @@
.. |fpga_name| replace:: Xilinx Alveo U280 XDMA-based
.. |fpga_name_short| replace:: Xilinx Alveo U280
.. _fpga_name_short: https://www.xilinx.com/products/boards-and-kits/alveo/u280.html
.. |flow_name| replace:: XDMA-based
.. |build_type| replace:: Xilinx Vivado
.. include:: Intro-Template.rst
.. toctree::
:maxdepth: 3
Initial-Setup/Xilinx-Alveo-U280
Repo-Setup/Xilinx-Alveo-U280
Running-Simulations/Running-Single-Node-Simulation-Xilinx-Alveo-U280
Building-a-FireSim-Bitstream/Xilinx-Alveo-U280

View File

@ -0,0 +1,15 @@
.. |fpga_name| replace:: Xilinx VCU118 XDMA-based
.. |fpga_name_short| replace:: Xilinx VCU118
.. _fpga_name_short: https://www.xilinx.com/products/boards-and-kits/vcu118.html
.. |flow_name| replace:: XDMA-based
.. |build_type| replace:: Xilinx Vivado
.. include:: Intro-Template.rst
.. toctree::
:maxdepth: 3
Initial-Setup/Xilinx-VCU118
Repo-Setup/Xilinx-VCU118
Running-Simulations/Running-Single-Node-Simulation-Xilinx-VCU118
Building-a-FireSim-Bitstream/Xilinx-VCU118

View File

@ -0,0 +1,41 @@
.. |fpga_name| replace:: (Experimental) Xilinx Alveo U250 Vitis-based
.. |fpga_name_short| replace:: Xilinx Alveo U250
.. _fpga_name_short: https://www.xilinx.com/products/boards-and-kits/alveo/u250.html
.. |flow_name| replace:: Vitis-based
.. |build_type| replace:: Xilinx Vitis
.. warning:: ⚠️ **We highly recommend using the XDMA-based U250 flow instead of this
Vitis-based flow. You can find the XDMA-based flow here:** :ref:`u250-standard-flow`.
The Vitis-based flow does not support DMA-based FireSim bridges (e.g.,
TracerV, Synthesizable Printfs, etc.), while the XDMA-based flows support
all FireSim features. If you're unsure, use the XDMA-based U250 flow
instead: :ref:`u250-standard-flow`
.. include:: Intro-Template.rst
#. **FPGA Setup**: Installing the FPGA board and relevant software.
#. **On-Premises Machine Setup**
#. Setting up a "Manager Machine" from which you will coordinate building
and deploying simulations locally.
#. **Single-node simulation guide**: This guide walks you through the
process of running a simulation locally on a single
|fpga_name_short|, using a pre-built, public bitstream.
#. **Building your own hardware designs guide (Chisel to FPGA Image)**:
This guide walks you through the full process of taking Rocket Chip RTL
and any custom RTL plugged into Rocket Chip and producing a FireSim bitstream
to plug into your simulations. This automatically runs Chisel elaboration,
FAME-1 Transformation, and the |build_type| FPGA flow.
Generally speaking, you only need to follow Step 4 if you're modifying Chisel
RTL or changing non-runtime configurable hardware parameters.
.. toctree::
:maxdepth: 3
Initial-Setup/Xilinx-Vitis-FPGAs
Running-Simulations/Running-Single-Node-Simulation-Xilinx-Vitis
Building-a-FireSim-Bitstream/Xilinx-Vitis

View File

@ -1,8 +0,0 @@
.. |fpga_name| replace:: Xilinx Alveo U250
.. |hwdb_entry_name| replace:: ``alveo_u250_firesim_rocket_singlecore_no_nic``
.. |hwdb_entry_name_non_code| replace:: alveo_u250_firesim_rocket_singlecore_no_nic
.. |bit_file_type| replace:: ``bitstream_tar``
.. |builder_name| replace:: Xilinx Vivado
.. |bit_builder_path| replace:: ``bit-builder-recipes/xilinx_alveo_u250.yaml``
.. include:: Xilinx-Bitstream-Template.rst

View File

@ -1,8 +0,0 @@
.. |fpga_name| replace:: Xilinx Alveo U280
.. |hwdb_entry_name| replace:: ``alveo_u280_firesim_rocket_singlecore_no_nic``
.. |hwdb_entry_name_non_code| replace:: alveo_u280_firesim_rocket_singlecore_no_nic
.. |bit_file_type| replace:: ``bitstream_tar``
.. |builder_name| replace:: Xilinx Vivado
.. |bit_builder_path| replace:: ``bit-builder-recipes/xilinx_alveo_u280.yaml``
.. include:: Xilinx-Bitstream-Template.rst

View File

@ -1,67 +0,0 @@
Building Your Own Hardware Designs
==================================
This section will guide you through building a |fpga_name| FPGA |bit_file_type| (FPGA image) for a FireSim simulation.
Build Recipes
---------------
We already provide for you a build recipe (i.e. hardware configuration) called |hwdb_entry_name| that was used to pre-build a |fpga_name| FPGA |bit_file_type|.
You can find this in the ``config_build_recipes.yaml`` file.
This configuration is a simple singlecore Rocket configuration with a single DRAM channel and no debugging features (as indicated by some of the variables like ``TARGET_CONFIG``).
Additionally, this configuration has a field called ``bit_builder_recipe`` pointing to |bit_builder_path|.
This file found in the :gh-file-ref:`deploy` tells the FireSim build system what combination of commands to run to build the |bit_file_type|.
Next, lets build the bitstream corresponding to the build recipe and specify the Build Farm to run on.
In the ``deploy/config_build.yaml`` file, you will notice at least two mappings: ``build_farm`` and ``builds_to_run``.
Let's first finishing setting up the the ``build_farm`` mapping which specifies the build machines that are available to build FPGA images.
First, notice that the ``base_recipe`` maps to ``build-farm-recipes/externally_provisioned.yaml``.
This indicates to the FireSim manager that the machines allocated to run builds will be provided by the user through IP addresses
instead of being automatically launched and allocated (e.g. launching instances on-demand in AWS).
Next, let's look at the ``build_farm_hosts`` list that has a single element ``localhost``.
This list indicates the IP addresses of machines already booted and ready to use for builds.
In our case, we are building locally so we provide our own IP address, ``localhost``.
Finally, let's look at and modify the ``default_build_dir`` mapping to a directory of your choice that will store
temporary |builder_name| build files during builds.
Continuing to the next section in the ``deploy/config_build.yaml`` file, you will notice that the ``builds_to_run``
section currently contains several lines, which
indicates to the build system that you want to run all of these builds on the machines provided, with the parameters listed in the relevant section of the
``deploy/config_build_recipes.yaml`` file.
To start out, let's build our simple design, |hwdb_entry_name|, that we previously added.
To do so, comment out all of the other build entries in ``deploy/config_build.yaml``, and uncomment the "- |hwdb_entry_name_non_code|" line.
So, you should end up with something like this (a line beginning with a ``#`` is a comment):
.. code-block:: text
:substitutions:
builds_to_run:
# this section references builds defined in config_build_recipes.yaml
# if you add a build here, it will be built when you run buildbitstream
# Many other commented lines...
- |hwdb_entry_name_non_code|
Running a Build
----------------------
Now, we can run a build like so:
.. code-block:: bash
firesim buildbitstream
This will run through the entire build process, taking the Chisel RTL
and producing an |fpga_name| FPGA |bit_file_type| that runs on the FPGA. This whole process will
usually take a few hours. When the build
completes, you will see a directory in
``deploy/results-build/``, named after your build parameter
settings, that contains all of the outputs of the |builder_name| build process.
Additionally, the manager will print out a path to a log file
that describes everything that happened, in-detail, during this run (this is a
good file to send us if you encounter problems).
Now that you know how to generate your own FPGA image, you can modify the target-design
to add your own features, then build a FireSim-compatible FPGA image automatically!
To learn more advanced FireSim features, you can choose a link under the "Advanced Docs" section to the left.

View File

@ -1,8 +0,0 @@
.. |fpga_name| replace:: Xilinx Vitis-enabled U250
.. |hwdb_entry_name| replace:: ``vitis_firesim_rocket_singlecore_no_nic``
.. |hwdb_entry_name_non_code| replace:: vitis_firesim_rocket_singlecore_no_nic
.. |bit_file_type| replace:: ``xclbin``
.. |builder_name| replace:: Xilinx Vitis
.. |bit_builder_path| replace:: ``bit-builder-recipes/vitis.yaml``
.. include:: Xilinx-Bitstream-Template.rst

View File

@ -1,99 +0,0 @@
FPGA Board Setup
===================
FPGA Setup
----------
.. warning:: Currently, FireSim only supports a single type of FPGA (i.e only |fpga_name| FPGAs) installed on a machine.
This includes not mixing the use of Xilinx Vitis/XRT-enabled FPGAs on the system.
.. Warning:: Power-users can skip this setup and just create the database file listed below by hand if you want to target specific fpgas.
We need to flash the |fpga_name| FPGA(s) SPI flash with a dummy XDMA-enabled design and determine the PCI-e ID (or BDF) associated with the serial number of the FPGA.
First, we need to flash the FPGA's SPI flash with the dummy XDMA-enabled design so that the PCI-e subsystem can be initially configured.
Afterwards, we will generate the mapping from FPGA serial numbers to BDFs.
We provide a set of scripts to do this.
First lets obtain the sample bitstream, let's find the URL to download the file to the machine with the FPGA.
Below find the HWDB entry called |hwdb_entry_name|.
.. literalinclude:: /../deploy/sample-backup-configs/sample_config_hwdb.yaml
:language: yaml
:start-after: DOCREF START: Xilinx Alveo HWDB Entries
:end-before: DOCREF END: Xilinx Alveo HWDB Entries
Look for the ``bitstream_tar: <URL>`` line within |hwdb_entry_name| and keep note of the URL.
We will replace the ``BITSTREAM_TAR`` bash variable below with that URL.
Next, lets unpack the ``tar`` archive and obtain the ``mcs`` file used to program the FPGA SPI flash.
.. code-block:: bash
:substitutions:
# unpack the file in any area
cd ~
BITSTREAM_TAR=<URL FROM BEFORE>
tar xvf $BITSTREAM_TAR
ls |platform_name|
You should see a ``mcs`` file use to program the SPI flash of the FPGA.
Next, lets flash the SPI flash modules of each |fpga_name| in the system with the dummy bitstream.
Open Xilinx Vivado (or Vivado Lab), connect to each FPGA and program the SPI flash.
You can refer to https://www.fpgadeveloper.com/how-to-program-configuration-flash-with-vivado-hardware-manager/ for examples on how to do this for various boards.
Next, **cold reboot** the computer.
This will reconfigure your PCI-E settings such that FireSim can detect the XDMA-enabled bitstream.
After the machine is rebooted, you may need to re-insert the XDMA kernel module.
Then verify that you can see the XDMA module with:
.. code-block:: bash
lsmod | grep -i xdma
Also, verify that the FPGA programming worked by looking at the ``lspci`` output.
For example, we should see ``Serial controller`` for BDF's that were flashed.
.. code-block:: bash
lspci | grep -i xilinx
# example output
04:00.0 Serial controller: Xilinx Corporation Device 903f (rev ff)
83:00.0 Serial controller: Xilinx Corporation Device 903f (rev ff)
If you don't see similar output, you might need to **warm reboot** your machine until you see the output.
.. Warning:: Anytime the host computer is rebooted you may need to re-run parts of the setup process (i.e. re-insert XDMA kernel module).
Before continuing to FireSim simulations after a host computer reboot, ensure that ``cat /proc/devices | grep xdma`` command is successful.
Also ensure that you see ``Serial controller`` for the BDF of the FPGA you would like to use in ``lspci | grep -i xilinx``.
Next, let's generate the mapping from FPGA serial numbers to the BDF.
Re-enter the FireSim repository and run the following commands to re-setup the repo after reboot.
.. code-block:: bash
:substitutions:
cd firesim
# rerunning this since the machine rebooted
source sourceme-manager.sh --skip-ssh-setup
Next, open up the ``deploy/config_runtime.yaml`` file and replace the following keys to be the following:
* ``default_platform`` should be |deploy_manager_code|
* ``default_simulation_dir`` should point to a temporary simulation directory of your choice
* ``default_hw_config`` should be |hwdb_entry_name|
Then, run the following command to generate a mapping from a PCI-E BDF to FPGA UID/serial number.
.. code-block:: bash
:substitutions:
firesim enumeratefpgas
This will generate a database file in ``/opt/firesim-db.json`` that has this mapping.
Now you're ready to continue with other FireSim setup!

View File

@ -1,56 +0,0 @@
FPGA Software Setup
===================
Requirements and Installations
------------------------------
We require a base machine that is able to support the |fpga_name| and running Xilinx Vivado.
Please refer to the minimum system requirements given in the following link: https://docs.xilinx.com/r/en-US/ug1301-getting-started-guide-alveo-accelerator-cards/Minimum-System-Requirements.
Next, install the U250 FPGA as indicated: https://docs.xilinx.com/r/en-US/ug1301-getting-started-guide-alveo-accelerator-cards/Card-Installation-Procedures
We require the following programs/packages installed from the Xilinx website in addition to a physical U250 installation:
* Vivado 2021.1 or 2022.2
* U250 board package (corresponding with Vivado 2021.1 or 2022.2)
* Ensure you complete the "Installing the Deployment Software" and "Card Bring-Up and Validation" sections in the following link: https://docs.xilinx.com/r/en-US/ug1301-getting-started-guide-alveo-accelerator-cards/Installing-the-Deployment-Software
* Ensure that the board package is installed to a Vivado accessible location: https://support.xilinx.com/s/article/The-board-file-location-with-the-latest-Vivado-tools?language=en_US
Importantly, using this FPGA with FireSim requires that you have ``sudo`` **passwordless** access to the machine with the FPGA.
This is needed to flash the FPGA bitstream onto the FPGA.
XDMA Setup
----------
To communicate with the FPGA over PCI-e, we need to install the Xilinx XDMA kernel module.
First, lets install the XDMA kernel module into a FireSim-known location:
.. code-block:: bash
cd /tmp # or any location you prefer
git clone https://github.com/Xilinx/dma_ip_drivers
cd dma_ip_drivers
git checkout 2022.1.5
cd XDMA/linux-kernel/xdma
sudo make clean && sudo make && sudo make install
Next, lets add the kernel module:
.. code-block:: bash
# the module should be installed in the following location
# by the `make install` previously run
sudo insmod /lib/modules/$(uname -r)/extra/xdma.ko poll_mode=1
By default, FireSim will refer to this location to check if the XDMA driver is loaded.
Verify that you can see the XDMA module with:
.. code-block:: bash
lsmod | grep -i xdma
.. warning:: After the machine is rebooted, you may need to re-insert the XDMA kernel module.
Now you're ready to continue with other FireSim setup!

View File

@ -1,6 +0,0 @@
.. |fpga_name| replace:: Xilinx Alveo U250
.. |hwdb_entry_name| replace:: ``alveo_u250_firesim_rocket_singlecore_no_nic``
.. |platform_name| replace:: xilinx_alveo_u250
.. |board_name| replace:: au250
.. include:: Xilinx-Alveo-Template.rst

View File

@ -1,6 +0,0 @@
.. |fpga_name| replace:: Xilinx Alveo U280
.. |hwdb_entry_name| replace:: ``alveo_u280_firesim_rocket_singlecore_no_nic``
.. |platform_name| replace:: xilinx_alveo_u280
.. |board_name| replace:: au280
.. include:: Xilinx-Alveo-Template.rst

View File

@ -1,45 +0,0 @@
.. |fpga_name| replace:: Xilinx Vitis-enabled U250
.. |vitis_version| replace:: 2022.1
.. |vitis_link| replace:: https://www.xilinx.com/products/design-tools/vitis/vitis-whats-new.html#20221
FPGA and Tool Setup
===================
Requirements and Installations
------------------------------
We require a base machine that is able to support a |fpga_name| and running Xilinx Vitis.
For the purposes of this tutorial, we assume you are running with a |fpga_name|.
Please refer to the minimum system requirements given in the following link: https://docs.xilinx.com/r/en-US/ug1301-getting-started-guide-alveo-accelerator-cards/Minimum-System-Requirements.
``sudo`` access is not needed for the machine except for when the |fpga_name| and corresponding software is installed.
Next, install the |fpga_name| as indicated: https://docs.xilinx.com/r/en-US/ug1301-getting-started-guide-alveo-accelerator-cards/Card-Installation-Procedures
We require the following programs/packages installed from the Xilinx website in addition to a physical |fpga_name| installation:
* Xilinx Vitis |vitis_version|
* Installation link: |vitis_link|
* Xilinx XRT and |fpga_name| board package (corresponding with Vitis |vitis_version|)
* Ensure you complete the "Installing the Deployment Software" and "Card Bring-Up and Validation" sections in the following link: https://docs.xilinx.com/r/en-US/ug1301-getting-started-guide-alveo-accelerator-cards/Installing-the-Deployment-Software
Setup Validation
----------------
After installing the |fpga_name| using the Xilinx instructions and installing the specific versions of Vitis/XRT, let's verify that the |fpga_name| can be used for emulations.
Ensure that you can run the following XRT commands without errors:
.. code-block:: bash
:substitutions:
xbutil examine # obtain the BDF associated with your installed |fpga_name|
xbutil validate --device <CARD_BDF_INSTALLED> --verbose
The ``xbutil validate`` command runs simple tests to ensure that the FPGA can be properly flashed with a bitstream by using XRT.
.. Warning:: Anytime the host computer is rebooted you may need to re-run parts of the setup process (i.e. re-flash the shell).
Before continuing to FireSim simulations after a host computer reboot, ensure that the previously mentioned ``xbutil`` command is successful.
Now you're ready to continue with other FireSim setup!

View File

@ -1,5 +0,0 @@
.. |platform_name| replace:: xilinx_alveo_u250
.. |tool_type| replace:: Xilinx Vivado
.. |example_var| replace:: XILINX_VIVADO
.. include:: ./Setting-Up-Template.rst

View File

@ -1,5 +0,0 @@
.. |platform_name| replace:: xilinx_alveo_u280
.. |tool_type| replace:: Xilinx Vivado
.. |example_var| replace:: XILINX_VIVADO
.. include:: ./Setting-Up-Template.rst

View File

@ -1,5 +0,0 @@
.. |platform_name| replace:: vitis
.. |tool_type| replace:: Xilinx XRT/Vitis
.. |example_var| replace:: XILINX_XRT
.. include:: ./Setting-Up-Template.rst

View File

@ -1,16 +0,0 @@
|fpga_name| Getting Started
=======================================
The tutorials that follow this page will guide you through the complete flow for
getting an example FireSim simulation up and running using an on-premise |fpga_name| FPGA.
This tutorial is setting up a single node on-premise cluster (i.e. running FPGA bitstream builds and simulations on a single machine) for FireSim use.
This single machine will serve as the "Manager Machine" that acts as a "head" node that all work will be completed on.
At the end of this
tutorial, you'll have a simulation that simulates a single quad-core Rocket
Chip-based node with a 4 MB last level cache, 16 GB DDR3, and no NIC.
The final tutorial
will show you how to build your own FPGA images with customized hardware.
After you complete these tutorials, you can look at the "Advanced Docs"
in the sidebar to the left.
Here's a high-level outline of what we'll be doing in our tutorials:

View File

@ -1,412 +0,0 @@
Running a Single Node Simulation
===================================
Now that we've completed the setup of our manager machine, it's time to run
a simulation! In this section, we will simulate **1 target node**, for which we
will need a single |fpga_type|.
**Make sure you have sourced** ``sourceme-manager.sh --skip-ssh-setup`` **before running any of these commands.**
Building target software
------------------------
In these instructions, we'll assume that you want to boot Linux on your
simulated node. To do so, we'll need to build our FireSim-compatible RISC-V
Linux distro. For this tutorial, we will use a simple buildroot-based
distribution. You can do this like so:
.. code-block:: bash
cd firesim/sw/firesim-software
./init-submodules.sh
./marshal -v build br-base.json
Once this is completed, you'll have the following files:
- ``firesim/sw/firesim-software/images/firechip/br-base/br-base-bin`` - a bootloader + Linux
kernel image for the nodes we will simulate.
- ``firesim/sw/firesim-software/images/firechip/br-base/br-base.img`` - a disk image for
each the nodes we will simulate
These files will be used to form base images to either build more complicated
workloads (see the :ref:`defining-custom-workloads` section) or to copy around
for deploying.
Setting up the manager configuration
-------------------------------------
All runtime configuration options for the manager are set in a file called
``firesim/deploy/config_runtime.yaml``. In this guide, we will explain only the
parts of this file necessary for our purposes. You can find full descriptions of
all of the parameters in the :ref:`manager-configuration-files` section.
If you open up this file, you will see the following default config (assuming
you have not modified it):
.. include:: DOCS_EXAMPLE_config_runtime.yaml
:code: yaml
We'll need to modify a couple of these lines.
First, let's tell the manager to use the single |fpga_type| FPGA.
You'll notice that in the ``run_farm`` mapping which describes and specifies the machines to run simulations on.
First notice that the ``base_recipe`` maps to ``run-farm-recipes/externally_provisioned.yaml``.
This indicates to the FireSim manager that the machines allocated to run simulations will be provided by the user through IP addresses
instead of automatically launched and allocated (e.g. launching instances on-demand in AWS).
Let's modify the ``default_platform`` to be |deploy_manager_code| so that we can launch simulations using |runner|.
Next, modify the ``default_simulation_dir`` to a directory that you want to store temporary simulation collateral to.
When running simulations, this directory is used to store any temporary files that the simulator creates (e.g. a uartlog emitted by a Linux simulation).
Next, lets modify the ``run_farm_hosts_to_use`` mapping.
This maps IP addresses (i.e. ``localhost``) to a description/specification of the simulation machine.
In this case, we have only one |fpga_type| FPGA so we will change the description of ``localhost`` to ``one_fpga_spec``.
Now, let's verify that the ``target_config`` mapping will model the correct target design.
By default, it is set to model a single-node with no network.
It should look like the following:
.. code-block:: yaml
target_config:
topology: no_net_config
no_net_num_nodes: 1
link_latency: 6405
switching_latency: 10
net_bandwidth: 200
profile_interval: -1
# This references a section from config_hwdb.yaml
# In homogeneous configurations, use this to set the hardware config deployed
# for all simulators
default_hw_config: firesim_rocket_quadcore_no_nic_l2_llc4mb_ddr3
Note ``topology`` is set to
``no_net_config``, indicating that we do not want a network. Then,
``no_net_num_nodes`` is set to ``1``, indicating that we only want to simulate
one node. Lastly, the ``default_hw_config`` is
``firesim_rocket_quadcore_no_nic_l2_llc4mb_ddr3``.
Let's modify the ``default_hw_config`` (the target design) to "|hwdb_entry|".
This new hardware configuration does not
have a NIC and is pre-built for the |fpga_type| FPGA.
This hardware configuration models a Single-core Rocket Chip SoC and **no** network interface card.
We will leave the ``workload`` mapping unchanged here, since we do
want to run the buildroot-based Linux on our simulated system. The ``terminate_on_completion``
feature is an advanced feature that you can learn more about in the
:ref:`manager-configuration-files` section.
As a final sanity check, in the mappings we changed, the ``config_runtime.yaml`` file should now look like this (with ``PATH_TO_SIMULATION_AREA`` replaced with your simulation collateral temporary directory):
.. code-block:: text
:substitutions:
run_farm:
base_recipe: run-farm-recipes/externally_provisioned.yaml
recipe_arg_overrides:
default_platform: |deploy_manager|
default_simulation_dir: <PATH_TO_SIMULATION_AREA>
run_farm_hosts_to_use:
- localhost: one_fpga_spec
target_config:
topology: no_net_config
no_net_num_nodes: 1
link_latency: 6405
switching_latency: 10
net_bandwidth: 200
profile_interval: -1
default_hw_config: |hwdb_entry|
plusarg_passthrough: ""
workload:
workload_name: linux-uniform.json
terminate_on_completion: no
suffix_tag: null
Launching a Simulation!
-----------------------------
Now that we've told the manager everything it needs to know in order to run
our single-node simulation, let's actually run it!
Starting the Run Farm
^^^^^^^^^^^^^^^^^^^^^^^^^
First, we will tell the manager to launch our Run Farm with a single machine called ``localhost``. Run:
.. code-block:: bash
firesim launchrunfarm
In this case, since we are already running the machine with the FPGA (``localhost``),
this command should not launch any machine and should be quick.
You should expect output like the following:
.. code-block:: bash
$ firesim launchrunfarm
FireSim Manager. Docs: https://docs.fires.im
Running: launchrunfarm
WARNING: Skipping launchrunfarm since run hosts are externally provisioned.
The full log of this run is:
.../firesim/deploy/logs/2023-03-06--00-20-37-launchrunfarm-24T0KOGRHBMSHAV5.log
Setting up the simulation infrastructure
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The manager will also take care of building and deploying all software
components necessary to run your simulation. The manager will also handle
flashing FPGAs. To tell the manager to setup our simulation infrastructure,
let's run:
.. code-block:: bash
firesim infrasetup
For a complete run, you should expect output like the following:
.. code-block:: bash
$ firesim infrasetup FireSim Manager. Docs: https://docs.fires.im
Running: infrasetup
Building FPGA software driver for |quintuplet|
...
[localhost] Checking if host instance is up...
[localhost] Copying FPGA simulation infrastructure for slot: 0.
[localhost] Clearing all FPGA Slots.
The full log of this run is:
.../firesim/deploy/logs/2023-03-06--01-22-46-infrasetup-35ZP4WUOX8KUYBF3.log
Many of these tasks will take several minutes, especially on a clean copy of
the repo. The console output here contains the "user-friendly" version of the
output. If you want to see detailed progress as it happens, ``tail -f`` the
latest logfile in ``firesim/deploy/logs/``.
At this point, our single Run Farm ``localhost`` machine has all the infrastructure
necessary to run a simulation.
So, let's launch our simulation!
Running a simulation!
^^^^^^^^^^^^^^^^^^^^^^^^^
Finally, let's run our simulation! To do so, run:
.. code-block:: bash
firesim runworkload
This command boots up a simulation and prints out the live status of the simulated
nodes every 10s. When you do this, you will initially see output like:
.. code-block:: bash
$ firesim runworkload
FireSim Manager. Docs: https://docs.fires.im
Running: runworkload
Creating the directory: .../firesim/deploy/results-workload/2023-03-06--01-25-34-linux-uniform/
[localhost] Checking if host instance is up...
[localhost] Starting FPGA simulation for slot: 0.
If you don't look quickly, you might miss it, since it will get replaced with a
live status page:
.. code-block:: text
FireSim Simulation Status @ 2018-05-19 00:38:56.062737
--------------------------------------------------------------------------------
This workload's output is located in:
.../firesim/deploy/results-workload/2018-05-19--00-38-52-linux-uniform/
This run's log is located in:
.../firesim/deploy/logs/2018-05-19--00-38-52-runworkload-JS5IGTV166X169DZ.log
This status will update every 10s.
--------------------------------------------------------------------------------
Instances
--------------------------------------------------------------------------------
Hostname/IP: localhost | Terminated: False
--------------------------------------------------------------------------------
Simulated Switches
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
Simulated Nodes/Jobs
--------------------------------------------------------------------------------
Hostname/IP: localhost | Job: linux-uniform0 | Sim running: True
--------------------------------------------------------------------------------
Summary
--------------------------------------------------------------------------------
1/1 instances are still running.
1/1 simulations are still running.
--------------------------------------------------------------------------------
This will only exit once all of the simulated nodes have completed simulations. So, let's let it
run and open another terminal to the manager machine. From there, ``cd`` into
your FireSim directory again and ``source sourceme-manager.sh --skip-ssh-setup``.
Next, let's ``ssh`` into the simulation machine.
In this case, since we are running the simulation on the same machine (i.e. ``localhost``)
we can run the following:
.. code-block:: bash
ssh localhost
Next, we can directly attach to the console of the simulated system using ``screen``, run:
.. code-block:: bash
screen -r fsim0
Voila! You should now see Linux booting on the simulated system and then be prompted
with a Linux login prompt, like so:
.. code-block:: bash
[truncated Linux boot output]
[ 0.020000] VFS: Mounted root (ext2 filesystem) on device 254:0.
[ 0.020000] devtmpfs: mounted
[ 0.020000] Freeing unused kernel memory: 140K
[ 0.020000] This architecture does not have kernel memory protection.
mount: mounting sysfs on /sys failed: No such device
Starting logging: OK
Starting mdev...
mdev: /sys/dev: No such file or directory
modprobe: can't change directory to '/lib/modules': No such file or directory
Initializing random number generator... done.
Starting network: ip: SIOCGIFFLAGS: No such device
ip: can't find device 'eth0'
FAIL
Starting dropbear sshd: OK
Welcome to Buildroot
buildroot login:
You can ignore the messages about the network -- that is expected because we
are simulating a design without a NIC.
Now, you can login to the system! The username is ``root``.
At this point, you should be presented with a regular console,
where you can type commands into the simulation and run programs. For example:
.. code-block:: bash
Welcome to Buildroot
buildroot login: root
Password:
# uname -a
Linux buildroot 4.15.0-rc6-31580-g9c3074b5c2cd #1 SMP Thu May 17 22:28:35 UTC 2018 riscv64 GNU/Linux
#
At this point, you can run workloads as you'd like. To finish off this tutorial,
let's power off the simulated system and see what the manager does. To do so,
in the console of the simulated system, run ``poweroff -f``:
.. code-block:: bash
Welcome to Buildroot
buildroot login: root
Password:
# uname -a
Linux buildroot 4.15.0-rc6-31580-g9c3074b5c2cd #1 SMP Thu May 17 22:28:35 UTC 2018 riscv64 GNU/Linux
# poweroff -f
You should see output like the following from the simulation console:
.. code-block:: bash
# poweroff -f
[ 12.456000] reboot: Power down
Power off
time elapsed: 468.8 s, simulation speed = 88.50 MHz
*** PASSED *** after 41492621244 cycles
Runs 41492621244 cycles
[PASS] FireSim Test
SEED: 1526690334
Script done, file is uartlog
[screen is terminating]
You'll also notice that the manager polling loop exited! You'll see output like this
from the manager:
.. code-block:: text
FireSim Simulation Status @ 2018-05-19 00:46:50.075885
--------------------------------------------------------------------------------
This workload's output is located in:
.../firesim/deploy/results-workload/2018-05-19--00-38-52-linux-uniform/
This run's log is located in:
.../firesim/deploy/logs/2018-05-19--00-38-52-runworkload-JS5IGTV166X169DZ.log
This status will update every 10s.
--------------------------------------------------------------------------------
Instances
--------------------------------------------------------------------------------
Hostname/IP: 172.30.2.174 | Terminated: False
--------------------------------------------------------------------------------
Simulated Switches
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
Simulated Nodes/Jobs
--------------------------------------------------------------------------------
Hostname/IP: 172.30.2.174 | Job: linux-uniform0 | Sim running: False
--------------------------------------------------------------------------------
Summary
--------------------------------------------------------------------------------
1/1 instances are still running.
0/1 simulations are still running.
--------------------------------------------------------------------------------
FireSim Simulation Exited Successfully. See results in:
.../firesim/deploy/results-workload/2018-05-19--00-38-52-linux-uniform/
The full log of this run is:
.../firesim/deploy/logs/2018-05-19--00-38-52-runworkload-JS5IGTV166X169DZ.log
If you take a look at the workload output directory given in the manager output (in this case, ``.../firesim/deploy/results-workload/2018-05-19--00-38-52-linux-uniform/``), you'll see the following:
.. code-block:: bash
$ ls -la firesim/deploy/results-workload/2018-05-19--00-38-52-linux-uniform/*/*
-rw-rw-r-- 1 centos centos 797 May 19 00:46 linux-uniform0/memory_stats.csv
-rw-rw-r-- 1 centos centos 125 May 19 00:46 linux-uniform0/os-release
-rw-rw-r-- 1 centos centos 7316 May 19 00:46 linux-uniform0/uartlog
What are these files? They are specified to the manager in a configuration file
(:gh-file-ref:`deploy/workloads/linux-uniform.json`) as files that we want
automatically copied back from the temporary simulation directory into the ``results-workload`` directory (on our manager machine - which is also ``localhost`` for this tutorial) after we run a simulation, which is
useful for running benchmarks automatically. The
:ref:`defining-custom-workloads` section describes this process in detail.
For now, let's wrap-up our tutorial by terminating the Run Farm that we launched.
To do so, run:
.. code-block:: bash
firesim terminaterunfarm
Which should present you with the following:
.. code-block:: bash
$ firesim terminaterunfarm
FireSim Manager. Docs: https://docs.fires.im
Running: terminaterunfarm
WARNING: Skipping terminaterunfarm since run hosts are externally provisioned.
The full log of this run is:
.../firesim/deploy/logs/2023-03-06--01-34-45-terminaterunfarm-YFXAJCRGF8KF4LQ3.log
Since we are re-using an existing machine that is already booted, this command should do nothing and be quick.
Congratulations on running your first FireSim simulation! At this point, you can
check-out some of the advanced features of FireSim in the sidebar to the left
(for example, we expect that many people will be interested in the ability to
automatically run the SPEC17 benchmarks: :ref:`spec-2017`).

View File

@ -1,21 +0,0 @@
#. **FPGA Software Setup**: Installing the relevant FPGA software.
#. **On-Premises Machine Setup**
#. Setting up a "Manager Machine" from which you will coordinate building
and deploying simulations locally.
#. **FPGA Board Setup**: Finish initial programming/setting up FPGA boards.
#. **Single-node simulation tutorial**: This tutorial guides you through the
process of running one simulation locally consisting of a single
|fpga_name|, using our pre-built public FireSim |bit_type| bitstream.
#. **Building your own hardware designs tutorial (Chisel to FPGA Image)**:
This tutorial guides you through the full process of taking Rocket Chip RTL
and any custom RTL plugged into Rocket Chip and producing a FireSim bitstream
to plug into your simulations. This automatically runs Chisel elaboration,
FAME-1 Transformation, and the |build_type| FPGA flow.
Generally speaking, you only need to follow Step 5 if you're modifying Chisel
RTL or changing non-runtime configurable hardware parameters.

View File

@ -1,16 +0,0 @@
.. |fpga_name| replace:: Xilinx Alveo U250
.. |bit_type| replace:: ``bitstream_tar``
.. |build_type| replace:: Xilinx Vivado
.. include:: Intro-Template.rst
.. include:: Xilinx-Alveo-Outline-Template.rst
.. toctree::
:maxdepth: 3
FPGA-Setup/Xilinx-Alveo-U250
Initial-Setup/Setting-Up-Xilinx-Alveo-U250
FPGA-Setup/Xilinx-Alveo-U250-Part2
Running-Simulations/Running-Single-Node-Simulation-Xilinx-Alveo-U250
Building-a-FireSim-Bitstream/Xilinx-Alveo-U250

View File

@ -1,16 +0,0 @@
.. |fpga_name| replace:: Xilinx Alveo U280
.. |bit_type| replace:: ``bitstream_tar``
.. |build_type| replace:: Xilinx Vivado
.. include:: Intro-Template.rst
.. include:: Xilinx-Alveo-Outline-Template.rst
.. toctree::
:maxdepth: 3
FPGA-Setup/Xilinx-Alveo-U280
Initial-Setup/Setting-Up-Xilinx-Alveo-U280
FPGA-Setup/Xilinx-Alveo-U280-Part2
Running-Simulations/Running-Single-Node-Simulation-Xilinx-Alveo-U280
Building-a-FireSim-Bitstream/Xilinx-Alveo-U280

View File

@ -1,33 +0,0 @@
.. |fpga_name| replace:: Xilinx Vitis-enabled U250
.. |bit_type| replace:: ``xclbin``
.. |build_type| replace:: Xilinx Vitis
.. include:: Intro-Template.rst
#. **FPGA Setup**: Installing the FPGA board and relevant software.
#. **On-Premises Machine Setup**
#. Setting up a "Manager Machine" from which you will coordinate building
and deploying simulations locally.
#. **Single-node simulation tutorial**: This tutorial guides you through the
process of running one simulation locally consisting of a single
|fpga_name|, using our pre-built public FireSim |bit_type| bitstream.
#. **Building your own hardware designs tutorial (Chisel to FPGA Image)**:
This tutorial guides you through the full process of taking Rocket Chip RTL
and any custom RTL plugged into Rocket Chip and producing a FireSim bitstream
to plug into your simulations. This automatically runs Chisel elaboration,
FAME-1 Transformation, and the |build_type| FPGA flow.
Generally speaking, you only need to follow Step 4 if you're modifying Chisel
RTL or changing non-runtime configurable hardware parameters.
.. toctree::
:maxdepth: 3
FPGA-Setup/Xilinx-Vitis-FPGAs
Initial-Setup/Setting-Up-Xilinx-Vitis
Running-Simulations/Running-Single-Node-Simulation-Xilinx-Vitis
Building-a-FireSim-Bitstream/Xilinx-Vitis

View File

@ -0,0 +1,63 @@
Before we jump into setting up FireSim, it is important to clarify several terms
that we will use throughout the rest of this documentation.
First, to disambiguate between the hardware being simulated and the computers doing
the simulating, we define:
**Target**
The design and environment being simulated. Commonly, a
group of one or more RISC-V SoCs with or without a network between them.
**Host**
The computers/FPGAs executing the FireSim simulation -- the **Run Farm** below.
We frequently prefix words with these terms. For example, software can run
on the simulated RISC-V system (*target*-software) or on a host x86 machine (*host*-software).
.. figure:: ../../../img/firesim_env.png
:alt: FireSim Infrastructure Setup
FireSim Infrastructure Diagram
**FireSim Manager** (``firesim``)
This program (available on your path as ``firesim``
once we source necessary scripts) automates the work required to launch FPGA
builds and run simulations. Most users will only have to interact with the
manager most of the time. If you're familiar with tools like Vagrant or Docker, the ``firesim``
command is just like the ``vagrant`` and ``docker`` commands, but for FPGA simulators
instead of VMs/containers.
Machines used to build and run FireSim simulations are broadly classified into
three groups:
|manager_machine|
This is the main host machine (e.g., |mach_details|) that you will "do work"
on. This is where you'll clone your copy of FireSim and use the FireSim
Manager to deploy builds/simulations from.
|build_farm_machine|
These are a collection of |mach_or_inst2| ("build farm |mach_or_inst_l|")
that are used by the FireSim manager to run FPGA bitstream builds. The
manager will automatically ship all sources necessary to run builds to these
|mach_or_inst_l| and will run the Verilog to FPGA bitstream build process on
them.
|run_farm_machine|
These are a collection of |mach_or_inst2| ("run farm |mach_or_inst_l|")
with FPGAs attached that the manager manages and deploys simulations onto.
You can use multiple Run Farms in parallel to run multiple separate
simulations in parallel.
|simple_setup|
One final piece of terminology will also be referenced throughout these
docs:
**Golden Gate**
The FIRRTL compiler in FireSim that converts target RTL into a decoupled
simulator. Formerly named MIDAS.

View File

@ -10,6 +10,7 @@ import shutil
import os
import subprocess
import sys
import time
from sphinx.util import logging
logger = logging.getLogger(__name__)
@ -27,8 +28,11 @@ logger = logging.getLogger(__name__)
# -- Project information -----------------------------------------------------
project = u'FireSim'
copyright = u'2018, Sagar Karandikar, Howard Mao, Donggyu Kim, David Biancolin, Alon Amid, and Berkeley Architecture Research'
author = u'Sagar Karandikar, Howard Mao, Donggyu Kim, David Biancolin, Alon Amid, and Berkeley Architecture Research'
this_year = time.strftime("%Y")
copyright = u'2018-' + this_year + ' Sagar Karandikar, David Biancolin, Abraham Gonzalez, Howard Mao, Donggyu Kim, Alon Amid, and Berkeley Architecture Research'
author = u'Sagar Karandikar, David Biancolin, Abraham Gonzalez, Howard Mao, Donggyu Kim, Alon Amid, and Berkeley Architecture Research'
on_rtd = os.environ.get("READTHEDOCS") == "True"
on_gha = os.environ.get("GITHUB_ACTIONS") == "true"
@ -117,7 +121,7 @@ language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store', '**/*-Template.rst']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
@ -195,7 +199,7 @@ latex_elements = {
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'FireSim.tex', u'FireSim Documentation',
u'Sagar Karandikar, Howard Mao, \\\\ Donggyu Kim, David Biancolin, \\\\ Alon Amid, \\\\ Berkeley Architecture Research', 'manual'),
u'Sagar Karandikar, David Biancolin, \\\\ Abraham Gonzalez, Howard Mao, \\\\ Donggyu Kim, Alon Amid, \\\\ Berkeley Architecture Research', 'manual'),
]

View File

@ -1,7 +1,7 @@
Welcome to FireSim's documentation (version "|version|")!
===========================================================
New to FireSim? Jump to the :ref:`firesim-basics` page for more info.
New to FireSim? Jump to the :doc:`/FireSim-Basics` page for more info.
.. toctree::
@ -9,10 +9,12 @@ New to FireSim? Jump to the :ref:`firesim-basics` page for more info.
:caption: Getting Started:
FireSim-Basics
Getting-Started-Guides/AWS-EC2-F1-Tutorial/index
Getting-Started-Guides/On-Premises-FPGA-Tutorial/Xilinx-Alveo-U250-FPGAs
Getting-Started-Guides/On-Premises-FPGA-Tutorial/Xilinx-Alveo-U280-FPGAs
Getting-Started-Guides/On-Premises-FPGA-Tutorial/Xilinx-Vitis-FPGAs
Getting-Started-Guides/AWS-EC2-F1-Getting-Started/index
Getting-Started-Guides/On-Premises-FPGA-Getting-Started/Xilinx-Alveo-U250-FPGAs
Getting-Started-Guides/On-Premises-FPGA-Getting-Started/Xilinx-Alveo-U280-FPGAs
Getting-Started-Guides/On-Premises-FPGA-Getting-Started/Xilinx-VCU118-FPGAs
Getting-Started-Guides/On-Premises-FPGA-Getting-Started/RHS-Research-Nitefury-II-FPGAs
Getting-Started-Guides/On-Premises-FPGA-Getting-Started/Xilinx-Vitis-FPGAs
.. toctree::
:maxdepth: 3

View File

@ -177,15 +177,16 @@ set -o pipefail
$SUDO bash ./install_conda.sh -b -p "$CONDA_INSTALL_PREFIX" $conda_install_extra
rm ./install_conda.sh
# get most up-to-date conda version
"${DRY_RUN_ECHO[@]}" $SUDO "$CONDA_EXE" update $DRY_RUN_OPTION -y -n base -c conda-forge conda
# see https://conda-forge.org/docs/user/tipsandtricks.html#multiple-channels
# for more information on strict channel_priority
"${DRY_RUN_ECHO[@]}" $SUDO "$CONDA_EXE" config --system --set channel_priority flexible
# By default, don't mess with people's PS1, I personally find it annoying
# by default, don't mess with people's PS1, I personally find it annoying
"${DRY_RUN_ECHO[@]}" $SUDO "$CONDA_EXE" config --system --set changeps1 false
# don't automatically activate the 'base' environment when intializing shells
# don't automatically activate the 'base' environment when initializing shells
"${DRY_RUN_ECHO[@]}" $SUDO "$CONDA_EXE" config --system --set auto_activate_base false
# don't automatically update conda to avoid https://github.com/conda-forge/conda-libmamba-solver-feedstock/issues/2
"${DRY_RUN_ECHO[@]}" $SUDO "$CONDA_EXE" config --system --set auto_update_conda false
# automatically use the ucb-bar channel for specific packages https://anaconda.org/ucb-bar/repo
"${DRY_RUN_ECHO[@]}" $SUDO "$CONDA_EXE" config --system --add channels ucb-bar
@ -204,7 +205,7 @@ set -o pipefail
# initialize conda in the system-wide rcfiles
conda_init_extra_args=(--no-user --system)
fi
# run conda-init and look at it's output to insert 'conda activate $CONDA_ENV_NAME' into the
# run conda-init and look at its output to insert 'conda activate $CONDA_ENV_NAME' into the
# block that conda-init will update if ever conda is installed to a different prefix and
# this is rerun.
$SUDO "${CONDA_EXE}" init $DRY_RUN_OPTION "${conda_init_extra_args[@]}" $CONDA_SHELL_TYPE 2>&1 | \
@ -292,11 +293,13 @@ set -o pipefail
argcomplete_extra_args=( --dest "${BASH_COMPLETION_COMPAT_DIR}" )
else
# if we're aren't installing into a system directory, then initialize argcomplete
# if we aren't installing into a system directory, then initialize argcomplete
# with --user so that it goes into the home directory
argcomplete_extra_args=( --user )
fi
"${DRY_RUN_ECHO[@]}" $SUDO "${CONDA_ENV_BIN}/activate-global-python-argcomplete" "${argcomplete_extra_args[@]}"
set +o pipefail
"${DRY_RUN_ECHO[@]}" yes | $SUDO "${CONDA_ENV_BIN}/activate-global-python-argcomplete" "${argcomplete_extra_args[@]}"
set -o pipefail
# emergency fix for buildroot open files limit issue:
if [[ "$INSTALL_TYPE" == system ]]; then

@ -1 +1 @@
Subproject commit 9cfeec034ab36def11ac23fb3315605f93272e2b
Subproject commit 336f225143590f9060ebe7101e96f235f96db985