core_ibex dv build system refactor

As well as completely removing the existing non-cosim flow, this commit
significantly refactors the build system to be less reliant on the makefile.

While we still use the Makefile, it is relegated to only providing scheduling
and dependency calculations between the different build steps.
This is possible by moving all of the build metadata into a file on-disk, which
is populated at the start of a new regression, then read and written to by the
different scripts executing the build. Each build step only needs to be passed
the location of this metadata at the top-level, and it can then import all
the information it requires to calculate and perform the next build stage.

This allows better observability into the build, as it is trivial to add new
data to this file, which is also provided as a human-readable yaml version.
It should also allow easier integration into different build systems, as the
dependency on Make is much weaker.

The file metadata.py and test_run_result.py contain the definitions for
these metadata objects. metadata.py defines an object for the whole
regression, while test_run_result.py defines objects for each individual test
performed.

The file riscvdv_interface.py has been created to better isolate the interface
with that project.
The file setup_imports.py has been created to centralize the importing of
python modules from other projects (riscvdv/ot_lowrisc_ip etc.).
Existing python code has been tidied to better conform to PEP8 standard
formatting, and to be more python in general such as using pathlib.Path.
This commit is contained in:
Harry Callahan 2022-07-06 22:54:59 +01:00 committed by hcallahan-lowrisc
parent 2f8dfa9dfe
commit 111d84f549
35 changed files with 2510 additions and 1904 deletions

0
__init__.py Normal file
View File

View File

@ -4,13 +4,11 @@
.SUFFIXES:
GEN_DIR := $(realpath ../../../vendor/google_riscv-dv)
TOOLCHAIN := ${RISCV_TOOLCHAIN}
export IBEX_ROOT := $(realpath ../../../)
EXT_DIR := riscv_dv_extension
# Explicitly ask for the bash shell
SHELL := bash
SHELL := bash
###############################################################################
# CONFIGURATION KNOBS
# Seed for instruction generator and RTL simulation
#
@ -21,169 +19,209 @@ SHELL := bash
# start passing again without fixing the bug).
SEED := $(shell echo $$RANDOM)
# This is the top-level output directory. Everything we generate goes in
# here.
OUT := out
# Needed for tcl files that are used with Cadence tools.
export dv_root := $(realpath ../../../vendor/lowrisc_ip/dv)
export DUT_TOP := dut
# Enable waveform dumping
WAVES := 0
# Enable coverage dump
COV := 0
# Enable cosimulation flow
COSIM := 1
# RTL simulator
# RTL simulator (xlm, vcs, questa, dsim, )
SIMULATOR := xlm
# ISS (spike, ovpsim)
ISS := spike
# Test name (default: full regression)
TEST := all
TESTLIST := $(EXT_DIR)/testlist.yaml
TESTLIST := riscv_dv_extension/testlist.yaml
# Verbose logging
VERBOSE :=
VERBOSE := 0
# Number of iterations for each test, assign a non-empty value to override the
# iteration count in the test list
ITERATIONS :=
# Generator timeout limit in seconds
TIMEOUT := 1800
# Pass/fail signature address at the end of test
# Pass/fail signature address at the end of test (see riscv_dv handshake documentation)
SIGNATURE_ADDR := 8ffffffc
### Ibex top level parameters ###
IBEX_CONFIG := opentitan
###############################################################################
all: collect_results $(if $(filter 1,$(COV)),merge_cov,)
# Build Stages
.PHONY: instr_gen_build
.PHONY: instr_gen_run
.PHONY: instr_gen_compile
.PHONY: rtl_tb_compile
.PHONY: rtl_sim_run
.PHONY: check_logs
.PHONY: riscv_dv_fcov
.PHONY: merge_cov
.PHONY: collect_results
###############################################################################
# This is the top-level output directory. Everything we generate goes in
# here.
OUT := out
# Derived directories from $(OUT), used for stuff that's built once or
# stuff that gets run for each seed, respectively. Using OUT-DIR on
# the way avoids ugly double slashes if $(OUT) happens to end in a /.
OUT-DIR := $(dir $(OUT)/)
BUILD-DIR := $(OUT-DIR)build
RUN-DIR := $(OUT-DIR)run
METADATA-DIR = $(OUT-DIR)metadata
# This expands to '@' if VERBOSE is 0 or not set, and to the empty
# string otherwise. Prefix commands with it in order that they only
# get printed when VERBOSE.
verb = $(if $(filter-out 0,$(VERBOSE)),,@)
# This is a list of directories that are automatically generated by some
# targets. To ensure the directory has been built, add an order-only dependency
# (with the pipe symbol before it) on the directory name and add the directory
# to this list.
gen-dirs := $(BUILD-DIR)
$(gen-dirs): %:
mkdir -p $@
SHELL=/bin/bash
###############################################################################
# Environment variables
GEN_DIR := $(realpath ../../../vendor/google_riscv-dv)
TOOLCHAIN := ${RISCV_TOOLCHAIN}
EXT_DIR := riscv_dv_extension
export IBEX_ROOT := $(realpath ../../../)
export PRJ_DIR := $(realpath ../../..)
export LOWRISC_IP_DIR := $(realpath ${PRJ_DIR}/vendor/lowrisc_ip)
all: core_config sim
# Needed for tcl files that are used with Cadence tools.
export dv_root := $(realpath ../../../vendor/lowrisc_ip/dv)
export DUT_TOP := dut
core_config:
cp $(EXT_DIR)/riscv_core_setting.sv.default $(EXT_DIR)/riscv_core_setting.sv
if [[ "$(IBEX_CONFIG)" == "small" ]] || [[ "$(IBEX_CONFIG)" == "experimental-branch-predictor" ]]; then \
patch -u $(EXT_DIR)/riscv_core_setting.sv -i $(EXT_DIR)/riscv_core_setting.nopmp.sv.patch; \
fi
cd ..
# Setup the necessary paths for all python scripts to find all other relevant modules.
PYTHONPATH := $(shell python3 -c 'from scripts.setup_imports import get_pythonpath; get_pythonpath()')
# export PYTHONPATH := $(PYTHONPATH) ## Why doesn't this work?
instr: iss_sim
sim: post_compare $(if $(filter 1,$(COV)),merge_cov,)
###############################################################################
.PHONY: clean
clean:
rm -f $(EXT_DIR)/riscv_core_setting.sv
rm -rf $(OUT-DIR)
# This is a list of directories that are automatically generated by some
# targets. To ensure the directory has been built, add a order-only dependency
# (with the pipe symbol before it) on the directory name and add the directory
# to this list.
gen-dirs := $(BUILD-DIR)
$(gen-dirs): %:
mkdir -p $@
.PHONY: core_config
core_config:
@cp $(EXT_DIR)/riscv_core_setting.sv.default $(EXT_DIR)/riscv_core_setting.sv
@if [[ "$(IBEX_CONFIG)" == "small" ]] || \
[[ "$(IBEX_CONFIG)" == "experimental-branch-predictor" ]]; \
then \
patch -u $(EXT_DIR)/riscv_core_setting.sv -i $(EXT_DIR)/riscv_core_setting.nopmp.sv.patch; \
fi
###############################################################################
# Utility functions.
#
# If VS is a list of variable names, P is a path and X is a string, then $(call
# dump-vars,P,X,VS) will expand to a list of 'file' commands that write each
# variable to P in Makefile syntax, but with "last-X-" prepended. At the start
# of the file, we also define last-X-vars-loaded to 1. You can use this to
# check whether there was a dump file at all.
#
# Note that this doesn't work by expanding to a command. Instead, *evaluating*
# dump-vars causes the variables to be dumped.
dump-var = $(file >>$(1),last-$(2)-$(3) := $($(3)))
dump-vars = $(file >$(1),last-$(2)-vars-loaded := .) \
$(foreach name,$(3),$(call dump-var,$(1),$(2),$(name)))
# Setup the metadata for the regression, which can then be accessed by
# all python scripts and testcases
# It needs to run before anything else.
new-metadata-file := $(shell env PYTHONPATH=$(PYTHONPATH) python3 ./scripts/metadata.py \
--op "create_metadata" \
--dir-metadata $(METADATA-DIR) \
--dir-out $(OUT-DIR) \
--args-list "\
SEED=$(SEED) WAVES=$(WAVES) COV=$(COV) COSIM=$(COSIM) SIMULATOR=$(SIMULATOR) \
ISS=$(ISS) TEST=$(TEST) VERBOSE=$(VERBOSE) ITERATIONS=$(ITERATIONS) \
SIGNATURE_ADDR=$(SIGNATURE_ADDR) IBEX_CONFIG=$(IBEX_CONFIG)")
# equal checks whether two strings are equal, evaluating to '.' if they are and
# '' otherwise.
both-empty = $(if $(1),,$(if $(2),,.))
find-find = $(if $(and $(findstring $(1),$(2)),$(findstring $(2),$(1))),.,)
equal = $(or $(call both-empty,$(1),$(2)),$(call find-find,$(1),$(2)))
# var-differs is used to check whether a variable has changed since it was
# dumped. If it has changed, the function evaluates to '.' (with some
# whitespace) and prints a message to the console; if not, it evaluates to ''.
#
# Call it as $(call var-differs,X,TGT,V).
var-differs = \
$(if $(call equal,$(strip $($(3))),$(strip $(last-$(1)-$(3)))),,\
.$(info Repeating $(2) because variable $(3) has changed value.))
# vars-differ is used to check whether several variables have the same value as
# they had when they were dumped. If we haven't loaded the dumpfile, it
# silently evaluates to '!'. Otherwise, if all the variables match, it
# evaluates to '.'. If not, it evaluates to '.' and prints some messages to the
# console explaining why a rebuild is happening.
#
# Call it as $(call vars-differ,X,TGT,VS).
vars-differ-lst = $(foreach v,$(3),$(call var-differs,$(1),$(2),$(v)))
vars-differ-sp = \
$(if $(last-$(1)-vars-loaded),\
$(if $(strip $(call vars-differ-lst,$(1),$(2),$(3))),.,),\
!)
vars-differ = $(strip $(call vars-differ-sp,$(1),$(2),$(3)))
# A phony target which can be used to force recompilation.
.PHONY: FORCE
FORCE:
# vars-prereq is empty if every variable in VS matches the last run (loaded
# with tag X), otherwise it is set to FORCE (which will force a recompile and
# might print a message to the console explaining why we're rebuilding TGT).
#
# Call it as $(call vars-prereq,X,TGT,VS)
vars-prereq = $(if $(call vars-differ,$(call strip,$(1)),$(2),$(3)),FORCE,)
# Convert VERBOSE, COV, WAVE and COSIM to "store_true" arguments
verb-arg := $(if $(filter-out 0,$(VERBOSE)),--verbose,)
cov-arg := $(if $(filter 1,$(COV)),--en_cov,)
wave-arg := $(if $(filter 1,$(WAVES)),--en_wave,)
cosim-arg := $(if $(filter 1,$(COSIM)),--en_cosim,)
### TODO ##
# Evaluate input variables to more-cleverly schedule partial-rebuilds
# This allows us to use Make to handle build scheduling and to calculate rebuilds,
# while keeping all the structured-data in the land of Python.
define get-metadata-variable
env PYTHONPATH=$(PYTHONPATH) python3 ./scripts/metadata.py \
--op "print_field" \
--dir-metadata $(METADATA-DIR) \
--field $(1)
endef
define get-meta
$(shell $(call get-metadata-variable, $(1)))
endef
# This is how you can get variables from the python metadata easily...
testvar := $(call get-meta,"ibex_root")
### TODO ###
###############################################################################
# Get a list of tests and seeds
#
# Run list_tests.py to list the things we need to run in the format
# TESTNAME.SEED and store it in a variable.
tests-and-seeds := \
$(shell ./list_tests.py \
--start_seed $(SEED) \
--test "$(TEST)" \
$(if $(ITERATIONS),--iterations $(ITERATIONS),) \
--ibex-config $(IBEX_CONFIG))
# Here we express the different build artifacts that the Makefile uses to
# establish the dependency tree, as well as which jobs depend on which
# top-level configuration knobs when deciding what to rebuild.
# Use build artifacts as targets where appropriate, otherwise use stamp-files.
tests-and-seeds := $(shell env PYTHONPATH=$(PYTHONPATH) python3 ./scripts/metadata.py \
--op "tests_and_seeds" \
--dir-metadata $(METADATA-DIR) $(new-metadata-file))
ts-dirs = $(foreach ts,$(tests-and-seeds),$(RUN-DIR)/$(ts)/)
test-asms = $(addsuffix test.S,$(ts-dirs))
test-bins = $(addsuffix test.bin,$(ts-dirs))
rtl-sim-logs = $(addsuffix $(rtl-sim-logfile),$(ts-dirs))
comp-results = $(addsuffix trr.yaml,$(ts-dirs))
rtl-sim-logfile := rtl_sim.log
###
INSTR-GEN-BUILD-STAMP = $(METADATA-DIR)/instr.gen.build.stamp
instr_gen_build: $(METADATA-DIR)/instr.gen.build.stamp
instr-gen-build-var-deps := SIMULATOR SIGNATURE_ADDR # Rebuild if these change
instr_gen_run: $(test-asms)
instr_gen_compile: $(test-bins)
TB-COMPILE-STAMP = $(METADATA-DIR)/tb.compile.stamp
rtl_tb_compile: $(METADATA-DIR)/tb.compile.stamp
rtl-tb-compile-var-deps := SIMULATOR COV WAVES COSIM # Rebuild if these change
rtl_sim_run: $(rtl-sim-logs)
check_logs: $(comp-results)
FCOV-STAMP = $(METADATA-DIR)/fcov.stamp
riscv_dv_fcov: $(METADATA-DIR)/fcov.stamp
MERGE-COV-STAMP = $(METADATA-DIR)/merge.cov.stamp
merge_cov: $(METADATA-DIR)/merge.cov.stamp
REGR-LOG-STAMP = $(METADATA-DIR)/regr.log.stamp
collect_results: $(METADATA-DIR)/regr.log.stamp
###############################################################################
# Other groups of files we may depend on are...
riscv-dv-files := $(shell find $(GEN_DIR) -type f)
# A variable containing a file list for the riscv-dv vendored-in module.
# Depending on these files gives a safe over-approximation that will ensure we
# rebuild things if that module changes.
all-verilog = \
$(shell find ../../../rtl -name '*.v' -o -name '*.sv' -o -name '*.svh') \
$(shell find ../.. -name '*.v' -o -name '*.sv' -o -name '*.svh')
all-cpp = \
$(shell find ../.. -name '*.cc' -o -name '*.h')
# The compiled ibex testbench (obviously!) also depends on the design and the
# DV code. The clever way of doing this would be to look at a dependency
# listing generated by the simulator as a side-effect of doing the compile (a
# bit like using the -M flags with a C compiler). Unfortunately, that doesn't
# look like it's particularly easy, so we'll just depend on every .v, .sv or
# .svh file in the dv or rtl directories. Note that this variable is set with
# '=', rather than ':='. This means that we don't bother running the find
# commands unless we need the compiled testbench.
# Define a variable that contains the output directories for all the
# test/seed combinations
ts-dirs := $(foreach ts,$(tests-and-seeds),$(RUN-DIR)/$(ts)/)
###############################################################################
###############################################################################
# Build the Random Instruction Generator
#
# This depends on the vendored in code in $(GEN_DIR). It also depends on the
###############################################################################
include util.mk # VARIABLE DUMPING UTILS
###############################################################################
######## EXAMPLE OF VARIABLE DUMPING ############
# This target depends on the vendored in code in $(GEN_DIR). It also depends on the
# values of the following Makefile variables (we want to regenerate things if,
# for example, the simulator changes).
instr-gen-build-var-deps := SIMULATOR SIGNATURE_ADDR
# To achieve this variable tracking, we dump each of the variables to a Makefile
# fragment and try to load it up the next time around. This done with the
# utility function "dump-vars" at the end of the recipe.
@ -194,7 +232,7 @@ instr-gen-build-var-deps := SIMULATOR SIGNATURE_ADDR
# First, load up the saved variable values from the last time around. If this
# fails, it's no problem: we'll assume that the previous run either doesn't
# exist or something went wrong.
ig-build-vars-path := $(BUILD-DIR)/.instr-gen.vars.mk
ig-build-vars-path := $(BUILD-DIR)/.instr_gen.vars.mk
-include $(ig-build-vars-path)
# Next, compare the current variables to those we just loaded. This uses the
@ -210,52 +248,38 @@ instr-gen-build-vars-prereq = \
$(instr-gen-build-var-deps))
# Finally, $(instr-gen-build-vars-prereq) becomes a dependency of our target.
################## END EXAMPLE ###################
riscv-dv-files := $(shell find $(GEN_DIR) -type f)
# A variable containing a file list for the riscv-dv vendored-in module.
# Depending on these files gives a safe over-approximation that will ensure we
# rebuild things if that module changes.
# Note that this is defined with ":=". As a result, we'll always run the find
# command exactly once. Wasteful if we're trying to make clean, but much better
# than running it for every target otherwise.
$(BUILD-DIR)/instr-gen/.compile.stamp: \
$(instr-gen-build-vars-prereq) \
$(riscv-dv-files) scripts/build-instr-gen.py | $(BUILD-DIR)
$(verb)scripts/build-instr-gen.py \
$(verb-arg) \
--simulator $(SIMULATOR) \
--ibex-config $(IBEX_CONFIG) \
--end-signature-addr $(SIGNATURE_ADDR) \
--output $(BUILD-DIR)/instr-gen
$(METADATA-DIR)/instr.gen.build.stamp: \
$(instr-gen-build-vars-prereq) $(riscv-dv-files) core_config \
scripts/build_instr_gen.py \
| $(BUILD-DIR)
@echo Building randomized test generator
$(verb)env PYTHONPATH=$(PYTHONPATH) \
scripts/build_instr_gen.py \
--dir-metadata $(METADATA-DIR)
$(call dump-vars,$(ig-build-vars-path),gen,$(instr-gen-build-var-deps))
@touch $@
.PHONY: instr_gen_build
instr_gen_build: $(BUILD-DIR)/instr-gen/.compile.stamp
###############################################################################
# Run the random instruction generator
#
test-asms := $(addsuffix test.S,$(ts-dirs))
# Make use of static-pattern rules
# https://www.gnu.org/software/make/manual/html_node/Static-Usage.html#Static-Usage
#
# targets …: target-pattern: prereq-patterns …
# recipe
# …
$(test-asms): \
$(RUN-DIR)/%/test.S: \
$(BUILD-DIR)/instr-gen/.compile.stamp \
$(TESTLIST) \
scripts/run-instr-gen.py
$(verb)scripts/run-instr-gen.py \
$(verb-arg) \
--simulator $(SIMULATOR) \
--end-signature-addr $(SIGNATURE_ADDR) \
--output-dir $(@D) \
--gen-build-dir $(BUILD-DIR)/instr-gen \
--ibex-config $(IBEX_CONFIG) \
$(INSTR-GEN-BUILD-STAMP) $(TESTLIST) scripts/run_instr_gen.py
@echo Running randomized test generator to create assembly file $@
$(verb)env PYTHONPATH=$(PYTHONPATH) \
scripts/run_instr_gen.py \
--dir-metadata $(METADATA-DIR) \
--test-dot-seed $*
.PHONY: instr_gen_run
instr_gen_run: $(test-asms)
###############################################################################
# Compile the generated assembly programs
#
@ -267,180 +291,102 @@ instr_gen_run: $(test-asms)
# uses the .bin. In the Makefile, we just track the .bin to represent
# both.
test-bins := $(addsuffix test.bin,$(ts-dirs))
$(test-bins): \
$(RUN-DIR)/%/test.bin: \
$(RUN-DIR)/%/test.S scripts/compile-generated-test.py
$(verb)scripts/compile-generated-test.py \
$(verb-arg) \
--input $(RUN-DIR)/$*/test.S \
--output $@ \
--ibex-config $(IBEX_CONFIG) \
$(RUN-DIR)/%/test.bin: $(RUN-DIR)/%/test.S \
scripts/compile_generated_test.py
@echo Compiling generated test assembly to create binary at $@
$(verb)env PYTHONPATH=$(PYTHONPATH) \
scripts/compile_generated_test.py \
--dir-metadata $(METADATA-DIR) \
--test-dot-seed $*
.PHONY: instr_gen_compile
instr_gen_compile: $(test-bins)
###############################################################################
# Run the instruction set simulator
#
# This (obviously) depends on having compiled the generated programs, so we
# don't have to worry about variables that affect the 'gen' stage. The only
# other variable that's going to affect things is the actual choice of ISS. We
# cheat and include it in the path.
iss-sim-logs := $(addsuffix $(ISS).log,$(ts-dirs))
$(iss-sim-logs): \
$(RUN-DIR)/%/$(ISS).log: \
$(RUN-DIR)/%/test.bin scripts/run-iss.py
$(verb)scripts/run-iss.py \
$(verb-arg) \
--ibex-config $(IBEX_CONFIG) \
--iss=$(ISS) \
--input=$(RUN-DIR)/$*/test.o \
--output=$@
.PHONY: iss_run
iss_run: $(iss-sim-logs)
###############################################################################
# Compile ibex core TB
#
# Note that this doesn't depend on the seed: the DUT doesn't depend on which
# test we're running!
#
# It does, however, depend on various variables. These are listed in
# tb-compile-var-deps. See the 'gen' stage for more verbose explanations of how
# the variable dumping works.
#
# The compiled ibex testbench (obviously!) also depends on the design and the
# DV code. The clever way of doing this would be to look at a dependency
# listing generated by the simulator as a side-effect of doing the compile (a
# bit like using the -M flags with a C compiler). Unfortunately, that doesn't
# look like it's particularly easy, so we'll just depend on every .v, .sv or
# .svh file in the dv or rtl directories. Note that this variable is set with
# '=', rather than ':='. This means that we don't bother running the find
# commands unless we need the compiled testbench.
all-verilog = \
$(shell find ../../../rtl -name '*.v' -o -name '*.sv' -o -name '*.svh') \
$(shell find ../.. -name '*.v' -o -name '*.sv' -o -name '*.svh')
tb-compile-var-deps := SIMULATOR COV WAVES COSIM
tb-compile-vars-path := $(BUILD-DIR)/.tb.vars.mk
-include $(tb-compile-vars-path)
tb-compile-vars-prereq = $(call vars-prereq,comp,compiling TB,$(tb-compile-var-deps))
tb-compile-vars-prereq = $(call vars-prereq,comp,compiling TB,$(rtl-tb-compile-var-deps))
$(BUILD-DIR)/tb/.compile.stamp: \
$(tb-compile-vars-prereq) $(all-verilog) $(risc-dv-files) \
scripts/compile-tb.py yaml/rtl_simulation.yaml \
$(METADATA-DIR)/tb.compile.stamp: \
$(tb-compile-vars-prereq) $(all-verilog) $(all-cpp) $(risc-dv-files) \
scripts/compile_tb.py yaml/rtl_simulation.yaml \
| $(BUILD-DIR)
$(verb)scripts/compile-tb.py \
$(verb-arg) \
--ibex-config $(IBEX_CONFIG) \
--output=$(BUILD-DIR)/tb \
--shared-cov-dir=$(RUN-DIR)/shared_cov \
--simulator=$(SIMULATOR) \
$(cov-arg) $(wave-arg) $(cosim-arg)
$(call dump-vars,$(tb-compile-vars-path),comp,$(tb-compile-var-deps))
@echo Building RTL testbench
$(verb)env PYTHONPATH=$(PYTHONPATH) \
scripts/compile_tb.py \
--dir-metadata $(METADATA-DIR)
$(call dump-vars,$(tb-compile-vars-path),comp,$(rtl-tb-compile-var-deps))
@touch $@
.PHONY: rtl_tb_compile
rtl_tb_compile: $(BUILD-DIR)/tb/.compile.stamp
###############################################################################
# Run ibex RTL simulation with generated programs
rtl-sim-logs := $(addsuffix rtl.log,$(ts-dirs))
# Run ibex RTL simulation with randomly-generated program and uvm stimulus
$(rtl-sim-logs): \
$(RUN-DIR)/%/rtl.log: \
$(BUILD-DIR)/tb/.compile.stamp $(RUN-DIR)/%/test.bin scripts/run-rtl.py
@echo Running RTL simulation at $@
$(verb)scripts/run-rtl.py \
--ibex-config $(IBEX_CONFIG) \
--simulator $(SIMULATOR) \
--shared-cov-dir=$(RUN-DIR)/shared_cov \
$(cov-arg) $(wave-arg) \
--signature-addr $(SIGNATURE_ADDR) \
--test-dot-seed $* \
--binary $(RUN-DIR)/$*/test.bin \
--rtl-sim-dir $(BUILD-DIR)/tb \
--out-dir $(@D)
.PHONY: rtl_sim_run
rtl_sim_run: $(rtl-sim-logs)
$(RUN-DIR)/%/$(rtl-sim-logfile): \
$(TB-COMPILE-STAMP) $(RUN-DIR)/%/test.bin scripts/run_rtl.py
@echo Running RTL simulation at $(@D)
$(verb)env PYTHONPATH=$(PYTHONPATH) \
scripts/run_rtl.py \
--dir-metadata $(METADATA-DIR) \
--test-dot-seed $*
###############################################################################
# Compare ISS and RTL sim results
#
# For a given TEST/SEED pair, the ISS and RTL logs appear at:
#
# $(RUN-DIR)/$(TEST).$(SEED)/$(ISS).log
# $(RUN-DIR)/$(TEST).$(SEED)/trace_core_00000000.log
#
# The comparison script compares these and writes to a result file at
#
# $(RUN-DIR)/$(TEST).$(SEED)/test-result.yml
comp-results := $(addsuffix test-result.yml,$(ts-dirs))
# Gather RTL sim results, and parse logs for errors
$(comp-results): \
$(RUN-DIR)/%/test-result.yml: \
$(RUN-DIR)/%/rtl.log compare.py
@echo Comparing traces for $*
$(verb)./compare.py \
--test-dot-seed $* \
--iss $(ISS) \
--iss-trace $(@D)/$(ISS).log \
--rtl-log $(@D)/rtl.log \
--rtl-trace $(@D)/trace_core_00000000.log \
$(cosim-arg) --cosim-trace $(@D)/spike_cosim.log \
--binary $(@D)/test.o \
--compare-log $(@D)/compare.log \
--output $@
$(RUN-DIR)/regr.log: collect_results.py $(comp-results)
@echo "Collecting up results (report at $@)"
$(verb)./collect_results.py -o $(@D) $(comp-results)
.PHONY: post_compare
post_compare: $(RUN-DIR)/regr.log
$(RUN-DIR)/%/trr.yaml: \
$(RUN-DIR)/%/$(rtl-sim-logfile) scripts/check_logs.py
@echo Collecting simulation results and checking logs of testcase at $@
$(verb)env PYTHONPATH=$(PYTHONPATH) \
scripts/check_logs.py \
--dir-metadata $(METADATA-DIR) \
--test-dot-seed $*
###############################################################################
# Generate RISCV-DV functional coverage
# TODO(udi) - add B extension
$(RUN-DIR)/fcov/.fcov.stamp: $(comp-results)
$(verb)python3 ${GEN_DIR}/cov.py \
--core ibex \
--dir $(RUN-DIR) \
-o $(RUN-DIR)/fcov \
--simulator $(SIMULATOR) \
--opts "--gen_timeout 1000" \
--isa rv32imcb \
--custom_target $(EXT_DIR)
@ # Bookkeeping
@touch $@
.PHONY: riscv_dv_fcov
riscv_dv_fcov: $(RUN-DIR)/fcov/.fcov.stamp
$(METADATA-DIR)/fcov.stamp: $(comp-results) \
scripts/get_fcov.py
@echo Generating RISCV_DV functional coverage
$(verb)env PYTHONPATH=$(PYTHONPATH) \
scripts/get_fcov.py \
--dir-metadata $(METADATA-DIR)
@touch $@
###############################################################################
# Merge all output coverage directories into the <out>/rtl_sim directory
#
# Merge all output coverage directories
# Any coverage databases generated from the riscv_dv_fcov target will be merged
# as well.
$(RUN-DIR)/coverage/.merge.stamp: \
$(RUN-DIR)/fcov/.fcov.stamp \
scripts/merge-cov.py
$(verb)scripts/merge-cov.py \
$(verb-arg) \
--working-dir=$(RUN-DIR) \
--simulator=$(SIMULATOR)
@ # Bookkeeping
$(METADATA-DIR)/merge.cov.stamp: $(FCOV-STAMP) \
scripts/merge_cov.py
@echo Merging all recorded coverage data into a single report
$(verb)env PYTHONPATH=$(PYTHONPATH) \
scripts/merge_cov.py \
--dir-metadata $(METADATA-DIR)
@touch $@
.PHONY: merge_cov
merge_cov: $(RUN-DIR)/coverage/.merge.stamp
###############################################################################
# Generate the summarized regression log
$(METADATA-DIR)/regr.log.stamp: scripts/collect_results.py $(comp-results)
@echo Collecting up results of tests into report regr.log
$(verb)env PYTHONPATH=$(PYTHONPATH) \
./scripts/collect_results.py \
--dir-metadata $(METADATA-DIR)
@touch $@
###############################################################################
# Extras (for convenience)
.PHONY: prettify
prettify:
@./scripts/prettify.sh
.PHONY: dump
dump:
@./scripts/objdump.sh

View File

View File

@ -1,223 +0,0 @@
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import junit_xml
import os.path
import sys
import yaml
from scripts.test_run_result import (TestRunResult, test_run_result_fields,
check_test_run_result)
from typing import List, TextIO
def parse_test_run_result(path: str) -> TestRunResult:
try:
with open(path) as yaml_file:
test_run_result_dict = yaml.load(yaml_file, Loader=yaml.SafeLoader)
loaded_fields = test_run_result_dict.keys()
if set(loaded_fields) != set(test_run_result_fields):
raise RuntimeError(f'Error loading YAML at {path}: does not '
'contain the correct set of fields')
trr = TestRunResult(**test_run_result_dict)
try:
check_test_run_result(trr)
except AssertionError:
raise RuntimeError(f'Error loading YAML at path {path}: '
'field types were incorrect')
return trr
except (IOError, yaml.YAMLError) as e:
raise RuntimeError(f'Error loading YAML at path {path}: {e}')
def build_broken_test_run_result(err: str) -> TestRunResult:
return TestRunResult(
name='unknown',
idx=0,
seed=0,
binary=None,
uvm_log=None,
rtl_trace=None,
rtl_trace_csv=None,
iss_trace=None,
iss_trace_csv=None,
comparison_log=None,
passed=False,
failure_message=err
)
def box_comment(line: str) -> str:
hr = '#' * 80
return hr + '\n# ' + line + '\n' + hr
def gen_summary_line(passing_tests: List[TestRunResult], failing_tests:
List[TestRunResult]) -> str:
'''Generate a string summarising test results'''
total_tests = len(passing_tests) + len(failing_tests)
pass_pct = (len(passing_tests) / total_tests) * 100
return f'{pass_pct:0.2f}% PASS {len(passing_tests)} PASSED, ' \
f'{len(failing_tests)} FAILED'
def gen_test_run_result_text(test_run_result: TestRunResult) -> str:
'''Generate a string describing a TestRunResult.
The string includes details of logs, binary run and the failure message if
the test did not pass.'''
test_name_idx = f'{test_run_result.name}.{test_run_result.seed}'
test_underline = '-' * len(test_name_idx)
info_lines = [test_name_idx, test_underline]
if (test_run_result.binary):
info_lines.append(f'Test binary: {test_run_result.binary}')
if (test_run_result.uvm_log):
info_lines.append(f'UVM log: {test_run_result.uvm_log}')
if (test_run_result.rtl_trace):
info_lines.append(f'RTL trace: {test_run_result.rtl_trace}')
if (test_run_result.iss_trace):
info_lines.append(f'ISS trace: {test_run_result.iss_trace}')
if (test_run_result.cosim_trace):
info_lines.append(f'cosim trace: {test_run_result.cosim_trace}')
if (test_run_result.comparison_log):
info_lines.append(f'Comparison log: {test_run_result.comparison_log}')
if (test_run_result.passed):
info_lines.append('')
info_lines.append('[PASSED]')
else:
info_lines.append('')
info_lines.append(test_run_result.failure_message)
return '\n'.join(info_lines) + '\n'
def output_results_text(passing_tests: List[TestRunResult], failing_tests:
List[TestRunResult], dest: TextIO):
'''Write results in text form to dest'''
if failing_tests:
print(box_comment('Details of failing tests'), file=dest)
for trr in failing_tests:
print(gen_test_run_result_text(trr), file=dest)
if passing_tests:
print(box_comment('Details of passing tests'), file=dest)
for trr in passing_tests:
print(gen_test_run_result_text(trr), file=dest)
dest.write('\n')
print(gen_summary_line(passing_tests, failing_tests), file=dest)
def output_run_results_junit_xml(passing_tests: List[TestRunResult],
failing_tests: List[TestRunResult],
junit_dest: TextIO,
junit_merged_dest: TextIO):
'''Write results to JUnit XML
Two versions are produced: a normal version and a merged version. In the
normal version there is a test suite per unique test name with a different
test case per seed run. In the merged version there is a single test case
under the test suite with information for the individual runs merged
together. This is to aid use of the Azure Pipelines JUnit dashboard, which
doesn't neatly handle the test suite/test case hierarchy
'''
all_tests = passing_tests + failing_tests
test_suite_info = {}
for trr in all_tests:
# test_case_info contains a tuple per unique test name. The first
# element is a list of junit_xml.TestCase, one per test run with that
# name. The other merges together all of the test outputs to produce
# the merged output.
unmerged, merged = \
test_suite_info.setdefault(trr.name, ([], {'stdout': '',
'failures': ''}))
result_text = gen_test_run_result_text(trr)
# Create a test case for the TestRunResult. stdout holds the text
# describing the run. Add the same text to failures if the test failed.
test_case = junit_xml.TestCase(f'{trr.name}.{trr.seed}')
test_case.stdout = result_text
merged['stdout'] += result_text + '\n'
if not trr.passed:
test_case.add_failure_info(output=result_text)
merged['failures'] += result_text
unmerged.append(test_case)
# Output the normal JUnit XML
test_suites = [junit_xml.TestSuite(name, test_cases) for
name, (test_cases, _) in test_suite_info.items()]
junit_dest.write(junit_xml.to_xml_report_string(test_suites))
# Output the merged version of the JUnit XML
merged_test_suites = []
for name, (_, merged_test_info) in test_suite_info.items():
test_case = junit_xml.TestCase(name)
test_case.stdout = merged_test_info['stdout']
test_case.add_failure_info(output=merged_test_info['failures'])
merged_test_suites.append(junit_xml.TestSuite(name, [test_case]))
junit_merged_dest.write(junit_xml.to_xml_report_string(merged_test_suites))
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('--output_dir', '-o', required=True)
parser.add_argument('test_run_result', nargs='*')
args = parser.parse_args()
passing_tests = []
failing_tests = []
for test_run_result_path in args.test_run_result:
try:
test_run_result = parse_test_run_result(test_run_result_path)
if test_run_result.passed:
passing_tests.append(test_run_result)
else:
failing_tests.append(test_run_result)
except RuntimeError as e:
failing_tests.append(build_broken_test_run_result(str(e)))
regr_log_path = os.path.join(args.output_dir, 'regr.log')
junit_xml_path = os.path.join(args.output_dir, 'regr_junit.xml')
junit_xml_merged_path = os.path.join(args.output_dir,
'regr_junit_merged.xml')
with open(regr_log_path, 'w', encoding='UTF-8') as outfile:
output_results_text(passing_tests, failing_tests, outfile)
with open(junit_xml_path, 'w', encoding='UTF-8') as junit_xml, \
open(junit_xml_merged_path, 'w', encoding='UTF-8') as \
junit_merged_xml:
output_run_results_junit_xml(passing_tests, failing_tests, junit_xml,
junit_merged_xml)
print(gen_summary_line(passing_tests, failing_tests))
# Succeed if no tests failed
return 1 if failing_tests else 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,278 +0,0 @@
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
'''
A script to compare an ISS and RTL run to make sure nothing has diverged.
'''
import argparse
import os
import sys
from typing import Dict, Optional, TextIO, Tuple, Union
from scripts.scripts_lib import read_test_dot_seed
from scripts.test_entry import TestEntry, get_test_entry
from scripts.test_run_result import TestRunResult
_CORE_IBEX = os.path.normpath(os.path.join(os.path.dirname(__file__)))
_IBEX_ROOT = os.path.normpath(os.path.join(_CORE_IBEX, '../../..'))
_RISCV_DV_ROOT = os.path.join(_IBEX_ROOT, 'vendor/google_riscv-dv')
_OLD_SYS_PATH = sys.path
# Import riscv_trace_csv and lib from _DV_SCRIPTS before putting sys.path back
# as it started.
try:
sys.path = ([os.path.join(_CORE_IBEX, 'riscv_dv_extension'),
os.path.join(_RISCV_DV_ROOT, 'scripts')] +
sys.path)
from spike_log_to_trace_csv import process_spike_sim_log # type: ignore
from ovpsim_log_to_trace_csv import process_ovpsim_sim_log # type: ignore
from instr_trace_compare import compare_trace_csv # type: ignore
from ibex_log_to_trace_csv import (process_ibex_sim_log, # type: ignore
check_ibex_uvm_log)
finally:
sys.path = _OLD_SYS_PATH
_CompareResult = Tuple[bool, Optional[str], Dict[str, str]]
def compare_test_run(test: TestEntry,
seed: int,
iss: str,
rtl_log: str,
rtl_trace: str,
iss_trace: str,
en_cosim: bool,
cosim_trace: str,
binary: str,
compare_log: str) -> TestRunResult:
'''Compare results for a single run of a single test
Here, test is a dictionary describing the test (read from the testlist YAML
file). seed is the seed that was run. iss is the chosen instruction set
simulator (currently supported: spike and ovpsim).
rtl_log is the log file generated by the RTL simulation. rtl_trace and
iss_trace are the traces of instructions executed generated by the RTL
simulation and ISS, respectively. This function generates CSV files at
rtl_trace + '.csv' and iss_trace + '.csv'.
binary is the path to an ELF file with the code that was executed.
compare_log is the path where we should write a log describing the
comparison operation.
Returns a _CompareResult with a pass/fail flag, together with some
information about the run (to be written to the log file).
'''
test_name = test['test']
assert isinstance(test_name, str)
kv_data = {
'name': test_name,
'seed': seed,
'binary': binary,
'uvm_log': rtl_log,
'rtl_trace': rtl_trace,
'rtl_trace_csv': rtl_trace + '.csv',
'iss_trace': None,
'iss_trace_csv': None,
'en_cosim': en_cosim,
'cosim_trace': None,
'cosim_trace_csv': None,
'comparison_log': None,
'passed': False,
'failure_message': None
}
# Have a look at the UVM log.
# Report a failure if an issue is seen in the log.
try:
uvm_pass, uvm_log_lines = check_ibex_uvm_log(rtl_log)
except IOError as e:
kv_data['failure_message'] = str(e)
kv_data['failure_message'] += \
'\n[FAILED] Could not open simulation log'
return TestRunResult(**kv_data)
if not uvm_pass:
kv_data['failure_message'] = '\n'.join(uvm_log_lines)
kv_data['failure_message'] += '\n[FAILED]: sim error seen'
return TestRunResult(**kv_data)
# Both the cosim and non-cosim flows produce a trace from the ibex_tracer,
# so process that file for errors.
try:
# Convert the RTL log file to a trace CSV.
process_ibex_sim_log(kv_data['rtl_trace'],
kv_data['rtl_trace_csv'])
except (OSError, RuntimeError) as e:
kv_data['failure_message'] = \
'[FAILED]: Log processing failed: {}'.format(e)
return TestRunResult(**kv_data)
if en_cosim and not test.get('ignore_cosim_log', False):
# Process the cosim logfile to check for errors
kv_data['cosim_trace'] = cosim_trace
kv_data['cosim_trace_csv'] = cosim_trace + '.csv'
try:
if iss == "spike":
process_spike_sim_log(kv_data['cosim_trace'],
kv_data['cosim_trace_csv'])
else:
raise RuntimeError('Unsupported simulator for cosim')
except (OSError, RuntimeError) as e:
kv_data['failure_message'] = \
'[FAILED]: Log processing failed: {}'.format(e)
return TestRunResult(**kv_data)
# The comparison has already passed, since we passed the simulation
kv_data['passed'] = True
return TestRunResult(**kv_data)
else:
# no_post_compare skips the final ISS v RTL log check, so if we've reached
# here we're done when no_post_compare is set.
no_post_compare = test.get('no_post_compare', False)
assert isinstance(no_post_compare, bool)
if no_post_compare:
kv_data['passed'] = True
return TestRunResult(**kv_data)
# There were no UVM errors. Process the log file from the ISS. Note that
# the filename is a bit odd-looking. This is silly, but it ensures that
# riscv-dv's cov.py script won't pick it up for architectural coverage.
kv_data['iss_trace'] = iss_trace
kv_data['iss_trace_csv'] = iss_trace + '-csv'
try:
if iss == "spike":
process_spike_sim_log(kv_data['iss_trace'],
kv_data['iss_trace_csv'])
else:
assert iss == 'ovpsim' # (should be checked by argparse)
process_ovpsim_sim_log(kv_data['iss_trace'],
kv_data['iss_trace_csv'])
except (OSError, RuntimeError) as e:
kv_data['failure_message'] = \
'[FAILED]: Log processing failed: {}'.format(e)
return TestRunResult(**kv_data)
kv_data['comparison_log'] = compare_log
# Delete any existing file at compare_log
# (the compare_trace_csv function would append to it, which is rather
# confusing).
try:
os.remove(compare_log)
except FileNotFoundError:
pass
compare_result = \
compare_trace_csv(kv_data['rtl_trace_csv'],
kv_data['iss_trace_csv'],
"ibex", iss, compare_log,
**test.get('compare_opts', {}))
try:
compare_log_file = open(compare_log)
compare_log_contents = compare_log_file.read()
compare_log_file.close()
except IOError as e:
kv_data['failure_message'] = \
'[FAILED]: Could not read compare log: {}'.format(e)
return TestRunResult(**kv_data)
# Rather oddly, compare_result is a string. The comparison passed if it
# starts with '[PASSED]: ' and failed otherwise.
compare_passed = compare_result.startswith('[PASSED]: ')
kv_data['passed'] = compare_passed
if not compare_passed:
assert compare_result.startswith('[FAILED]: ')
kv_data['failure_message'] = ('RTL / ISS trace comparison failed\n' +
compare_log_contents)
return TestRunResult(**kv_data)
return TestRunResult(**kv_data)
# If any of these characters are present in a string output it in multi-line
# mode. This will either be because the string contains newlines or other
# characters that would otherwise need escaping
_YAML_MULTILINE_CHARS = ['[', ']', ':', "'", '"', '\n']
def yaml_format(val: Union[int, str, bool]) -> str:
'''Format a value for yaml output.
For int, str and bool value can just be converted to str with special
handling for some string
'''
# If val is a multi-line string
if isinstance(val, str) and any(c in val for c in _YAML_MULTILINE_CHARS):
# Split into individual lines and output them after a suitable yaml
# multi-line string indicator ('|-') indenting each line.
lines = val.split('\n')
return '|-\n' + '\n'.join([f' {line}' for line in lines])
if val is None:
return ''
return str(val)
def on_result(result: TestRunResult, output: TextIO) -> None:
kv_data = result._asdict()
klen = 1
for k in kv_data:
klen = max(klen, len(k))
for k, v in kv_data.items():
kpad = ' ' * (klen - len(k))
output.write(f'{k}:{kpad} {yaml_format(v)}\n')
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('--test-dot-seed',
type=read_test_dot_seed,
required=True)
parser.add_argument('--iss', required=True, choices=['spike', 'ovpsim'])
parser.add_argument('--iss-trace', required=True)
parser.add_argument('--rtl-log', required=True)
parser.add_argument('--rtl-trace', required=True)
parser.add_argument('--en_cosim', required=False, action='store_true')
parser.add_argument('--cosim-trace', required=True)
parser.add_argument('--binary', required=True)
parser.add_argument('--compare-log', required=True)
parser.add_argument('--output', required=True)
args = parser.parse_args()
testname, seed = args.test_dot_seed
entry = get_test_entry(testname)
result = compare_test_run(entry, seed, args.iss,
args.rtl_log,
args.rtl_trace, args.iss_trace,
args.en_cosim, args.cosim_trace,
args.binary, args.compare_log)
with open(args.output, 'w', encoding='UTF-8') as outfile:
on_result(result, outfile)
# Always return 0 (success), even if the test failed. We've successfully
# generated a comparison log either way and we don't want to stop Make from
# gathering them all up for us.
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,164 +0,0 @@
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import os
import sys
from typing import Dict, List, Optional, Tuple
_CORE_IBEX = os.path.normpath(os.path.join(os.path.dirname(__file__)))
_IBEX_ROOT = os.path.normpath(os.path.join(_CORE_IBEX, '../../..'))
_RISCV_DV_ROOT = os.path.join(_IBEX_ROOT, 'vendor/google_riscv-dv')
_OLD_SYS_PATH = sys.path
# Import riscv_trace_csv and lib from _DV_SCRIPTS before putting sys.path back
# as it started.
try:
sys.path = ([os.path.join(_CORE_IBEX, 'riscv_dv_extension'),
os.path.join(_IBEX_ROOT, 'util'),
os.path.join(_RISCV_DV_ROOT, 'scripts')] +
sys.path)
from lib import process_regression_list # type: ignore
from ibex_config import parse_config # type: ignore
finally:
sys.path = _OLD_SYS_PATH
_TestEntry = Dict[str, object]
_TestEntries = List[_TestEntry]
def filter_tests_by_config(cfg: str, test_list: _TestEntries) -> _TestEntries:
'''Filter out any unsupported tests from being executed.
This function will parse the set of RTL parameters required by a given
test (if any) and ensure that those parameters are supported by the
selected core config.
Doing this allows the run flow to be smarter about running regressions
with different configs (useful for CI flows).
Arguments:
cfg: string name of the ibex config being tested, should match a
config name from ibex_configs.yaml.
test_list: list of test entry objects parsed from the YAML testlist
Returns:
filtered_test_list: a list of test entry objects, filtered such that
all tests incompatible with the specified ibex
config have been removed.
e.g. if the "small" config has been specified, this
function will filter out all tests that require
B-extension and PMP parameters
'''
filtered_test_list = []
config = parse_config(cfg, os.path.join(_IBEX_ROOT, "ibex_configs.yaml"))
for test in test_list:
good = True
if "rtl_params" in test:
param_dict = test['rtl_params']
assert isinstance(param_dict, dict)
for p, p_val in param_dict.items():
config_val = config.params.get(p, None)
# Throw an error if required RTL parameters in the testlist
# have been formatted incorrectly (typos, wrong parameters,
# etc)
if config_val is None:
raise ValueError('Parameter {} not found in config {}'
.format(p, cfg))
# Ibex has some enum parameters, so as a result some tests are
# able to run with several of these parameter values (like
# bitmanipulation tests). If this is the case, the testlist
# will specify all legal enum values, check if any of them
# match the config.
if isinstance(p_val, list):
good = (config_val in p_val)
else:
good = (p_val == config_val)
# If there is any parameter mismatch, we can terminate
# immediately and exclude the test from being executed
if not good:
break
if good:
filtered_test_list.append(test)
return filtered_test_list
def get_tests_and_counts(ibex_config: str,
test: Optional[str],
iterations: Optional[int]) -> List[Tuple[str, int]]:
'''Get a list of tests and the number of iterations to run of each
ibex_config should be the name of the Ibex configuration to be tested.
If test is provided, it gives the test or tests (as a comma separated
string) to narrow to. Use the special name "all" to run all the tests.
If iterations is provided, it should be a positive number and overrides the
number of iterations for each test.
'''
if iterations is not None and iterations <= 0:
raise ValueError('iterations should be positive if set')
rv_test = test if test is not None else 'all'
rv_iterations = iterations or 0
# Get all the tests that match the test argument, scaling as necessary with
# the iterations argument.
matched_list = [] # type: _TestEntries
testlist = os.path.join(_CORE_IBEX, 'riscv_dv_extension', 'testlist.yaml')
process_regression_list(testlist, rv_test, rv_iterations,
matched_list, _RISCV_DV_ROOT)
if not matched_list:
raise RuntimeError("Cannot find {} in {}".format(test, testlist))
# Filter tests by the chosen configuration
matched_list = filter_tests_by_config(ibex_config, matched_list)
# Convert to desired output format (and check for well-formedness)
ret = []
for test in matched_list:
name = test['test']
iterations = test['iterations']
assert isinstance(name, str) and isinstance(iterations, int)
assert iterations > 0
ret.append((name, iterations))
return ret
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('--start_seed', type=int, default=1)
parser.add_argument('--test', required=True)
parser.add_argument('--iterations', type=int)
parser.add_argument('--ibex-config', required=True)
args = parser.parse_args()
if args.iterations is not None and args.iterations <= 0:
raise RuntimeError('Bad --iterations argument: must be positive')
if args.start_seed < 0:
raise RuntimeError('Bad --start_seed argument: must be non-negative')
for name, iterations in get_tests_and_counts(args.ibex_config,
args.test,
args.iterations):
for iteration in range(iterations):
print('{}.{}'.format(name, args.start_seed + iteration))
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -24,6 +24,7 @@ try:
get_imm_hex_val)
from lib import RET_FATAL, gpr_to_abi, sint_to_hex, convert_pseudo_instr
import logging
logger = logging.getLogger(__name__)
finally:
sys.path = _OLD_SYS_PATH
@ -193,6 +194,7 @@ def check_ibex_uvm_log(uvm_log):
passed = False
failed = False
error_linenum = None
log_out = []
with open(uvm_log, "r") as log:
@ -204,10 +206,12 @@ def check_ibex_uvm_log(uvm_log):
# (erronously) repeated multiple times with different results.
test_result_seen = False
for line in log:
if ('UVM_ERROR' in line or 'UVM_FATAL' in line or 'Error' in line) \
for linenum, line in enumerate(log, 1):
if ('UVM_ERROR' in line or
'UVM_FATAL' in line or
'Error' in line) \
and not test_result_seen:
log_out.append(line.strip())
error_linenum = linenum
failed = True
if 'RISC-V UVM TEST PASSED' in line:
@ -219,10 +223,20 @@ def check_ibex_uvm_log(uvm_log):
failed = True
break
# If we saw PASSED and FAILED, that's a bit odd. But we should treat the
# test as having failed.
if failed:
passed = False
if failed:
# If we saw PASSED and FAILED, that's a bit odd. But we should treat the
# test as having failed.
passed = False
# If we know where the line marking the error is ... :
# - Extract a useful subset of log lines for a short summary of the error
# (-5, +5 lines around the detected error line above)
if error_linenum is not None:
log.seek(0) # Needed to enumerate( over) the log a second time.
log_out = ["{0}{1}: {2}".format("[E] " if (linenum == error_linenum) else
" ",
linenum, line.strip())
for linenum, line in enumerate(log, 1)
if linenum in range(error_linenum-5, error_linenum+5)]
return (passed, log_out)

View File

@ -1,51 +0,0 @@
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import os
import shutil
import sys
from scripts_lib import (run_one, start_riscv_dv_run_cmd,
get_config, get_isas_for_config)
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--simulator', required=True)
parser.add_argument('--end-signature-addr', required=True)
parser.add_argument('--output', required=True)
parser.add_argument('--ibex-config', required=True)
args = parser.parse_args()
# Delete the output directory if it existed to ensure a clean build, then
# create it. (The creation step is needed so that we can write our log file
# in the directory from the outset).
try:
shutil.rmtree(args.output)
except FileNotFoundError:
pass
os.makedirs(args.output, exist_ok=True)
cfg = get_config(args.ibex_config)
isa, iss_isa = get_isas_for_config(cfg)
cmd = (start_riscv_dv_run_cmd(args.verbose) +
['--co', '--steps=gen',
'--simulator', args.simulator,
'--output', args.output,
'--isa', isa,
'--end_signature_addr', args.end_signature_addr])
log_path = os.path.join(args.output, 'build.log')
return run_one(args.verbose, cmd, redirect_stdstreams=log_path)
if __name__ == '__main__':
sys.exit(main())

View File

@ -0,0 +1,53 @@
#!/usr/bin/env python3
"""Build the random instruction generator, if it requires building."""
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import os
import shutil
import sys
import pathlib
from scripts_lib import run_one, format_to_cmd
import riscvdv_interface
from metadata import RegressionMetadata, LockedMetadata
import logging
logger = logging.getLogger(__name__)
def _main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('--dir-metadata', type=pathlib.Path, required=True)
args = parser.parse_args()
with LockedMetadata(args.dir_metadata, __file__) as md:
# Delete the output directory if it existed to ensure a clean build, then
# create it. (The creation step is needed so that we can write our log file
# in the directory from the outset).
try:
shutil.rmtree(md.dir_instruction_generator)
except FileNotFoundError:
pass
md.dir_instruction_generator.mkdir(exist_ok=True, parents=True)
md.riscvdv_build_stdout = md.dir_instruction_generator/'build_stdout.log'
md.riscvdv_build_cmds = [format_to_cmd(
riscvdv_interface.get_run_cmd(md.verbose) +
['--co', '--steps=gen',
'--simulator', md.simulator,
'--output', md.dir_instruction_generator,
'--isa', md.isa_ibex,
'--end_signature_addr', md.signature_addr])]
retcode = run_one(md.verbose, md.riscvdv_build_cmds[0], redirect_stdstreams=md.riscvdv_build_stdout)
return retcode
if __name__ == '__main__':
sys.exit(_main())

View File

@ -0,0 +1,99 @@
#!/usr/bin/env python3
"""
Collect all the logfiles for a single test, and check for errors.
Pass/fail criteria is determined by any errors found.
"""
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import sys
import pathlib
from test_entry import read_test_dot_seed
from test_run_result import TestRunResult
from spike_log_to_trace_csv import process_spike_sim_log # type: ignore
from ibex_log_to_trace_csv import (process_ibex_sim_log, # type: ignore
check_ibex_uvm_log)
import logging
logger = logging.getLogger(__name__)
def compare_test_run(trr: TestRunResult) -> TestRunResult:
"""Compare results for a single run of a single test.
Use any log-processing scripts available to check for errors.
"""
# Have a look at the UVM log. Report a failure if an issue is seen.
try:
logger.debug(f"About to do Log processing: {trr.rtl_log}")
uvm_pass, uvm_log_lines = check_ibex_uvm_log(trr.rtl_log)
except IOError as e:
trr.passed = False
trr.failure_message = f"[FAILED] Could not open simulation log: {e}\n"
return trr
if not uvm_pass:
trr.failure_message = f"\n[FAILURE]: sim error seen in '{trr.rtl_log.name}'\n"
if uvm_log_lines:
trr.failure_message += \
"---------------*LOG-EXTRACT*----------------\n" + \
"\n".join(uvm_log_lines) + "\n" + \
"--------------------------------------------\n"
return trr
# Both the cosim and non-cosim flows produce a trace from the ibex_tracer,
# so process that file for errors.
try:
# Convert the RTL log file to a trace CSV.
logger.debug(f"About to do Log processing: {trr.rtl_trace}")
process_ibex_sim_log(trr.rtl_trace, trr.dir_test/'rtl_trace.csv')
except (OSError, RuntimeError) as e:
trr.passed = False
trr.failure_message = f"[FAILED]: Log processing failed: {e}"
return trr
# Process the cosim logfile to check for errors
try:
if trr.iss_cosim == "spike":
process_spike_sim_log(trr.iss_cosim_trace, trr.dir_test/'cosim_trace.csv')
else:
raise RuntimeError('Unsupported simulator for cosim')
except (OSError, RuntimeError) as e:
trr.passed = False
trr.failure_message = f"[FAILED]: Log processing failed: {e}"
return trr
# The traces are compared at runtime, and trigger a uvm_fatal() if mismatched.
# If we got this far then the test has passed.
trr.passed = True
return trr
def _main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('--dir-metadata',
type=pathlib.Path, required=True)
parser.add_argument('--test-dot-seed',
type=read_test_dot_seed, required=True)
args = parser.parse_args()
tds = args.test_dot_seed
trr = TestRunResult.construct_from_metadata_dir(args.dir_metadata, f"{tds[0]}.{tds[1]}")
trr = compare_test_run(trr)
trr.export(write_yaml=True)
# Always return 0 (success), even if the test failed. We've successfully
# generated a comparison log either way and we don't want to stop Make from
# gathering them all up for us.
return 0
if __name__ == '__main__':
sys.exit(_main())

View File

@ -0,0 +1,212 @@
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import junit_xml
import sys
import io
import pathlib
from pathlib import Path
import dataclasses
from metadata import RegressionMetadata, LockedMetadata
from test_run_result import TestRunResult
import scripts_lib as ibex_lib
from typing import List, TextIO
import logging
logger = logging.getLogger(__name__)
def box_comment(line: str) -> str:
hr = '#' * 80
return hr + '\n# ' + line + '\n' + hr
def gen_summary_line(passing_tests: List[TestRunResult], failing_tests:
List[TestRunResult]) -> str:
'''Generate a string summarising test results'''
total_tests = len(passing_tests) + len(failing_tests)
pass_pct = (len(passing_tests) / total_tests) * 100
return f'{pass_pct:0.2f}% PASS {len(passing_tests)} PASSED, ' \
f'{len(failing_tests)} FAILED'
def gen_test_run_result_text(trr: TestRunResult) -> str:
'''Generate a string describing a TestRunResult.
The string includes details of logs, binary run and the failure message if
the test did not pass.'''
test_name_idx = f'{trr.testname}.{trr.seed}'
test_underline = '-' * len(test_name_idx)
info_lines: List[str] = [test_name_idx, test_underline]
# Filter out relevant fields, and print as relative to the dir_test for readability
lesskeys = {k: str(v.relative_to(trr.dir_test)) # Improve readability
for k, v in dataclasses.asdict(trr).items()
if k in ['binary', 'rtl_log', 'rtl_trace', 'iss_cosim_trace']}
strdict = ibex_lib.format_dict_to_printable_dict(lesskeys)
trr_yaml = io.StringIO()
ibex_lib.pprint_dict(strdict, trr_yaml)
trr_yaml.seek(0)
for line in trr_yaml.readlines():
info_lines.append(line.strip('\n'))
if (trr.passed):
info_lines.append('\n[PASSED]')
else:
info_lines.append(str(trr.failure_message))
return '\n' + '\n'.join(info_lines) + '\n'
def output_results_text(passing_tests: List[TestRunResult],
failing_tests: List[TestRunResult],
dest: TextIO):
'''Write results in text form to dest'''
print(box_comment('Details of failing tests'), file=dest)
if not bool(failing_tests):
print("No failing tests. Nice job!", file=dest)
for trr in failing_tests:
print(gen_test_run_result_text(trr), file=dest)
print(box_comment('Details of passing tests'), file=dest)
if not bool(passing_tests):
print("No passing tests. Hmmmm...", file=dest)
for trr in passing_tests:
print(gen_test_run_result_text(trr), file=dest)
def output_run_results_junit_xml(passing_tests: List[TestRunResult],
failing_tests: List[TestRunResult],
junit_dest: TextIO,
junit_merged_dest: TextIO):
'''Write results to JUnit XML
Two versions are produced: a normal version and a merged version. In the
normal version there is a test suite per unique test name with a different
test case per seed run. In the merged version there is a single test case
under the test suite with information for the individual runs merged
together. This is to aid use of the Azure Pipelines JUnit dashboard, which
doesn't neatly handle the test suite/test case hierarchy
'''
all_tests = passing_tests + failing_tests
test_suite_info = {}
for trr in all_tests:
# test_case_info contains a tuple per unique test name. The first
# element is a list of junit_xml.TestCase, one per test run with that
# name. The other merges together all of the test outputs to produce
# the merged output.
unmerged, merged = \
test_suite_info.setdefault(trr.testname, ([], {'stdout': '',
'failures': ''}))
result_text = gen_test_run_result_text(trr)
# Create a test case for the TestRunResult. stdout holds the text
# describing the run. Add the same text to failures if the test failed.
test_case = junit_xml.TestCase(f'{trr.testname}.{trr.seed}')
test_case.stdout = result_text
merged['stdout'] += result_text + '\n'
if not trr.passed:
test_case.add_failure_info(output=result_text)
merged['failures'] += result_text
unmerged.append(test_case)
# Output the normal JUnit XML
test_suites = [junit_xml.TestSuite(name, test_cases) for
name, (test_cases, _) in test_suite_info.items()]
junit_dest.write(junit_xml.to_xml_report_string(test_suites))
# Output the merged version of the JUnit XML
merged_test_suites = []
for name, (_, merged_test_info) in test_suite_info.items():
test_case = junit_xml.TestCase(name)
test_case.stdout = merged_test_info['stdout']
test_case.add_failure_info(output=merged_test_info['failures'])
merged_test_suites.append(junit_xml.TestSuite(name, [test_case]))
junit_merged_dest.write(junit_xml.to_xml_report_string(merged_test_suites))
def main() -> int:
"""Collect all test results into summary files.
Locate all the individual test results, and combine them into higher level
summaries, while parsing for errors and other salient information.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--dir-metadata',
type=pathlib.Path, required=True)
args = parser.parse_args()
with LockedMetadata(args.dir_metadata, __file__) as md:
summary_dict = {}
passing_tests = []
failing_tests = []
for f in md.tests_pickle_files:
try:
trr = TestRunResult.construct_from_pickle(f)
summary_dict[f"{trr.testname}.{trr.seed}"] = ('PASS' if trr.passed else 'FAILED')
if trr.passed:
passing_tests.append(trr)
else:
failing_tests.append(trr)
except RuntimeError as e:
failing_tests.append(
TestRunResult(
name='broken_test',
failure_message=str(e)
))
md.regr_log = md.dir_run/'regr.log'
md.regr_log_junit = md.dir_run/'regr_junit.xml'
md.regr_log_junit_merged = md.dir_run/'regr_junit_merged.xml'
# Write results as junit_xml
with open(md.regr_log_junit,
'w',
encoding='UTF-8') as junit_xml,\
open(md.regr_log_junit_merged,
'w',
encoding='UTF-8') as junit_merged_xml:
output_run_results_junit_xml(passing_tests, failing_tests,
junit_xml,
junit_merged_xml)
# Write results as regr.log (custom logfile format)
with open(md.regr_log, 'w', encoding='UTF-8') as outfile:
# Print a summary line right at the top of the file
outfile.write(gen_summary_line(passing_tests, failing_tests))
outfile.write('\n')
# Print a short TEST.SEED PASS/FAILED summary
summary_yaml = io.StringIO()
ibex_lib.pprint_dict(summary_dict, summary_yaml)
summary_yaml.seek(0)
outfile.write(summary_yaml.getvalue())
outfile.write('\n')
# Print a longer summary with some more information
output_results_text(passing_tests, failing_tests, outfile)
# Print a summary line to the terminal
print(gen_summary_line(passing_tests, failing_tests))
# Succeed if no tests failed
return 1 if failing_tests else 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,101 +0,0 @@
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import os
import shlex
import sys
import tempfile
from scripts_lib import (read_test_dot_seed, start_riscv_dv_run_cmd,
get_config, get_isas_for_config, run_one)
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--input', required=True)
parser.add_argument('--output', required=True)
parser.add_argument('--ibex-config', required=True)
parser.add_argument('--test-dot-seed',
type=read_test_dot_seed, required=True)
args = parser.parse_args()
cfg = get_config(args.ibex_config)
isa, iss_isa = get_isas_for_config(cfg)
testname, seed = args.test_dot_seed
if not args.output.endswith('.bin'):
raise RuntimeError("Output argument must end with .bin: "
f"got {args.output!r}")
out_base = args.output[:-4]
out_riscv_dv_path = os.path.join(os.path.dirname(args.output),
'compile.riscv-dv.log')
out_obj_path = out_base + '.o'
# Run riscv-dv to get a list of commands that it would run to try to
# compile and convert the files in question. These will need some massaging
# to match our paths, but we can't generate the commands by hand because
# there are several test-specific options that might appear.
with tempfile.TemporaryDirectory() as td:
placeholder = os.path.join(td, '@@PLACEHOLDER@@')
orig_list = os.path.join(td, 'orig-cmds.list')
dv_ret = run_one(False,
start_riscv_dv_run_cmd(args.verbose) +
['--verbose',
'--output', placeholder,
'--steps=gcc_compile',
'--test', testname,
'--start_seed', str(seed),
'--iterations', '1',
'--isa', isa,
'--debug', orig_list],
redirect_stdstreams=out_riscv_dv_path)
if dv_ret:
return dv_ret
orig_cmds = []
with open(orig_list) as orig_file:
for line in orig_file:
line = line.strip()
if not line:
continue
orig_cmds.append(shlex.split(line))
# Do the massaging. We intentionally used "@@PLACEHOLDER@@" as a path in
# our call to riscv-dv, which should let us find all the things that matter
# easily.
rewrites = [
(f"{placeholder}/asm_test/{testname}_0.S", args.input),
(f"{placeholder}/asm_test/{testname}_0.o", out_obj_path),
(f"{placeholder}/asm_test/{testname}_0.bin", args.output)
]
new_cmds = []
for cmd in orig_cmds:
new_cmd = []
for word in cmd:
for old, new in rewrites:
word = word.replace(old, new)
if placeholder in word:
raise RuntimeError("Couldn't replace every copy of "
f"placeholder in {cmd}")
new_cmd.append(word)
new_cmds.append(new_cmd)
# Finally, run all the commands
for cmd in new_cmds:
ret = run_one(args.verbose, cmd)
if ret != 0:
return ret
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,104 +0,0 @@
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import os
import sys
import subprocess
from ibex_cmd import get_compile_opts
from scripts_lib import THIS_DIR, run_one, subst_vars
from sim_cmd import get_simulator_cmd
def _get_iss_pkgconfig_flags(specifiers, iss_pc, simulator):
_flags = subprocess.check_output(
args=(['pkg-config'] + specifiers + iss_pc),
universal_newlines=True,
).strip()
if simulator == 'xlm':
# See xcelium documentation for the -Wld syntax for passing
# flags to the linker. Passing -rpath,<path> options is tricky
# because commas are parsed strangely between xrun and the xmsc
# tool, and its easy for the options to arrive malformed. Use
# the following hack to get it through.
if '-Wl' in _flags: # This should be in LDFLAGS only
_flags = "'-Xlinker {}'".format(_flags.replace('-Wl,', ''))
return _flags
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--ibex-config', required=True)
parser.add_argument('--output', required=True)
parser.add_argument('--shared-cov-dir', required=True)
parser.add_argument('--simulator', required=True)
parser.add_argument('--en_cov', action='store_true')
parser.add_argument('--en_wave', action='store_true')
parser.add_argument('--en_cosim', action='store_true')
args = parser.parse_args()
expected_env_vars = ['PRJ_DIR', 'LOWRISC_IP_DIR']
for var in expected_env_vars:
if os.getenv(var) is None:
raise RuntimeError(f'The environment variable {var!r} is not set.')
core_ibex = os.path.normpath(os.path.join(THIS_DIR, '..'))
os.makedirs(args.output, exist_ok=True)
subst_vars_dict = {
'core_ibex': core_ibex,
'out': args.output,
'cmp_opts': get_compile_opts(args.ibex_config,
args.simulator)
}
# Find the correct flags for the tb to link against the compiled ISS
spike_iss_pc = ['riscv-riscv', 'riscv-disasm', 'riscv-fdt']
iss_pkgconfig_dict = {
'ISS_CFLAGS' : ['--cflags'],
'ISS_LDFLAGS' : ['--libs-only-other'],
'ISS_LIBS' : ['--libs-only-l', '--libs-only-L'],
}
if args.en_cosim:
try:
subprocess.check_output(['pkg-config', '--exists'] + spike_iss_pc)
except subprocess.CalledProcessError as err:
raise RuntimeError(
f'Failed to find {spike_iss_pc} pkg-config packages. '
f'Did you set the PKG_CONFIG_PATH correctly?') from err
subst_vars_dict.update(
{k: _get_iss_pkgconfig_flags(v,
spike_iss_pc,
args.simulator)
for k, v in iss_pkgconfig_dict.items()})
if args.en_cov:
subst_vars_dict.update({'shared_cov_dir': args.shared_cov_dir})
enables = {
'cov_opts': args.en_cov,
'wave_opts': args.en_wave,
'cosim_opts': args.en_cosim
}
compile_cmds, _ = get_simulator_cmd(args.simulator, enables)
for pre_cmd in compile_cmds:
cmd = subst_vars(pre_cmd, subst_vars_dict)
retcode = run_one(args.verbose, ['sh', '-c', cmd],
redirect_stdstreams='/dev/null')
if retcode:
return retcode
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -0,0 +1,101 @@
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import os
import shlex
import sys
import tempfile
import pathlib
from scripts_lib import run_one, format_to_cmd
import riscvdv_interface
from test_entry import read_test_dot_seed
from metadata import RegressionMetadata
from test_run_result import TestRunResult
def _main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('--dir-metadata', type=pathlib.Path, required=True)
parser.add_argument('--test-dot-seed', type=read_test_dot_seed, required=True)
args = parser.parse_args()
tds = args.test_dot_seed
md = RegressionMetadata.construct_from_metadata_dir(args.dir_metadata)
trr = TestRunResult.construct_from_metadata_dir(args.dir_metadata, f"{tds[0]}.{tds[1]}")
# Run riscv-dv to get a list of commands that it would run to try to
# compile and convert the files in question. These will need some massaging
# to match our paths, but we can't generate the commands by hand because
# there are several test-specific options that might appear.
with tempfile.TemporaryDirectory() as td:
placeholder = os.path.join(td, '@@PLACEHOLDER@@')
orig_list = os.path.join(td, 'orig-cmds.list')
cmd = (riscvdv_interface.get_run_cmd(bool(md.verbose)) +
['--verbose',
'--output', placeholder,
'--steps=gcc_compile',
'--test', trr.testname,
'--start_seed', str(trr.seed),
'--iterations', '1',
'--isa', md.isa_ibex,
'--debug', orig_list])
trr.compile_asm_gen_log = trr.dir_test / 'compile_gen.riscv-dv.log'
trr.compile_asm_gen_cmds = [format_to_cmd(cmd)]
dv_ret = run_one(md.verbose, trr.compile_asm_gen_cmds[0],
redirect_stdstreams=trr.compile_asm_gen_log)
if dv_ret:
return dv_ret
orig_cmds = []
with open(orig_list) as orig_file:
for line in orig_file:
line = line.strip()
if not line:
continue
orig_cmds.append(shlex.split(line))
# Do the massaging. We intentionally used "@@PLACEHOLDER@@" as a path in
# our call to riscv-dv, which should let us find all the things that matter
# easily.
trr.objectfile = trr.dir_test / 'test.o'
trr.binary = trr.dir_test / 'test.bin'
rewrites = [
(f"{placeholder}/asm_test/{trr.testname}_0.S", str(trr.assembly)),
(f"{placeholder}/asm_test/{trr.testname}_0.o", str(trr.objectfile)),
(f"{placeholder}/asm_test/{trr.testname}_0.bin", str(trr.binary))
]
new_cmds = []
for cmd in orig_cmds:
new_cmd = []
for word in cmd:
for old, new in rewrites:
word = word.replace(old, new)
if placeholder in word:
raise RuntimeError("Couldn't replace every copy of "
f"placeholder in {cmd}")
new_cmd.append(word)
new_cmds.append(new_cmd)
# Finally, run all the commands
trr.compile_asm_log = trr.dir_test / 'compile.riscv-dv.log'
trr.compile_asm_cmds = [format_to_cmd(cmd) for cmd in new_cmds]
trr.export(write_yaml=True)
for cmd in trr.compile_asm_cmds:
ret = run_one(md.verbose, cmd)
if ret != 0:
return ret
if __name__ == '__main__':
sys.exit(_main())

View File

@ -0,0 +1,108 @@
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import os
import sys
import subprocess
import pathlib
from metadata import RegressionMetadata, LockedMetadata
from ibex_cmd import get_compile_opts
from scripts_lib import run_one
import riscvdv_interface
import logging
logger = logging.getLogger(__name__)
def _get_iss_pkgconfig_flags(specifiers, iss_pc, simulator):
_flags = subprocess.check_output(
args=(['pkg-config'] + specifiers + iss_pc),
universal_newlines=True,
).strip()
if simulator == 'xlm':
# See xcelium documentation for the -Wld syntax for passing
# flags to the linker. Passing -rpath,<path> options is tricky
# because commas are parsed strangely between xrun and the xmsc
# tool, and its easy for the options to arrive malformed. Use
# the following hack to get it through.
if '-Wl' in _flags: # This should be in LDFLAGS only
_flags = "'-Xlinker {}'".format(_flags.replace('-Wl,', ''))
return _flags
def _main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('--dir-metadata', type=pathlib.Path, required=True)
args = parser.parse_args()
# We have some required environment variables down in various scripts
# that are easier to set here at a high level part of the build.
# It would be nice to be more explicit somehow...
expected_env_vars = ['PRJ_DIR', 'LOWRISC_IP_DIR']
for var in expected_env_vars:
if os.getenv(var) is None:
raise RuntimeError(f'The environment variable {var!r} is not set.')
with LockedMetadata(args.dir_metadata, __file__) as md:
md.dir_tb.mkdir(exist_ok=True, parents=True)
md.tb_build_log = md.dir_tb/'compile_tb.log'
subst_vars_dict = {
'core_ibex': md.ibex_dv_root,
'tb_dir': md.dir_tb,
'tb_build_log': md.tb_build_log,
'cmp_opts': get_compile_opts(md.ibex_config,
md.simulator),
'dir_shared_cov': (md.dir_shared_cov if md.cov else ''),
}
# Locate the spike .pc files to allow us to link against it when building
spike_iss_pc = ['riscv-riscv', 'riscv-disasm', 'riscv-fdt']
iss_pkgconfig_dict = {
'ISS_CFLAGS' : ['--cflags'],
'ISS_LDFLAGS' : ['--libs-only-other'],
'ISS_LIBS' : ['--libs-only-l', '--libs-only-L'],
}
md.envvar_PKG_CONFIG_PATH = dict(os.environ).get('PKG_CONFIG_PATH')
try:
subprocess.check_output(['pkg-config', '--exists'] + spike_iss_pc)
except subprocess.CalledProcessError as err:
raise RuntimeError(
f'Failed to find {spike_iss_pc} pkg-config packages. '
f'Did you set the PKG_CONFIG_PATH correctly?') from err
subst_vars_dict.update(
{k: _get_iss_pkgconfig_flags(v,
spike_iss_pc,
md.simulator)
for k, v in iss_pkgconfig_dict.items()})
md.tb_build_stdout = md.dir_tb/'compile_tb_stdstreams.log'
md.tb_build_cmds = riscvdv_interface.get_tool_cmds(
yaml_path=md.ibex_riscvdv_simulator_yaml,
simulator=md.simulator,
cmd_type='compile',
user_enables={
'cov_opts': md.cov,
'wave_opts': md.waves,
'cosim_opts': True # Always enable now post_compare is deprecated
},
user_subst_options=subst_vars_dict)
# Write all compile-tb output into a single logfile
with md.tb_build_stdout.open('wb') as compile_fd:
for cmd in md.tb_build_cmds:
compile_fd.write(f"Running compile_tb command :\n{' '.join(cmd)}\n".encode())
retcode = run_one(md.verbose, cmd, redirect_stdstreams=compile_fd)
if retcode:
return retcode
return 0
if __name__ == '__main__':
sys.exit(_main())

View File

@ -0,0 +1,37 @@
#!/usr/bin/env python3
"""Get the riscv_dv functional coverage results."""
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import sys
import argparse
import pathlib
import shutil
from metadata import RegressionMetadata, LockedMetadata
from scripts_lib import run_one
import riscvdv_interface
import logging
logger = logging.getLogger(__name__)
def _main():
parser = argparse.ArgumentParser()
parser.add_argument('--dir-metadata', type=pathlib.Path, required=True)
args = parser.parse_args()
with LockedMetadata(args.dir_metadata, __file__) as md:
md.dir_fcov.mkdir(exist_ok=True, parents=True)
md.riscvdv_fcov_cmds = [riscvdv_interface.get_cov_cmd(md)]
md.riscvdv_fcov_stdout = md.dir_fcov/'riscvdv_fcov_stdout.log'
retcode = run_one(md.verbose, md.riscvdv_fcov_cmds[0], md.riscvdv_fcov_stdout)
return retcode
if __name__ == '__main__':
sys.exit(_main())

View File

@ -2,11 +2,15 @@
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import os
import subprocess
from typeguard import typechecked
_THIS_DIR = os.path.normpath(os.path.join(os.path.dirname(__file__)))
_IBEX_ROOT = os.path.normpath(os.path.join(_THIS_DIR, 4 * '../'))
from setup_imports import _IBEX_ROOT
import ibex_config
from ibex_config import Config, parse_config
import logging
logger = logging.getLogger(__name__)
# For each simulator, a tuple
#
@ -25,13 +29,13 @@ class GenError(Exception):
pass
def run_ibex_config(config_name: str, output_type: str) -> str:
script_path = os.path.join(_IBEX_ROOT, 'util', 'ibex_config.py')
yaml_path = os.path.join(_IBEX_ROOT, 'ibex_configs.yaml')
def _run_ibex_config(config_name: str, output_type: str) -> str:
script_path = _IBEX_ROOT/'util'/'ibex_config.py'
yaml_path = _IBEX_ROOT/'ibex_configs.yaml'
ibex_config_cmd = [
script_path,
'--config_filename', yaml_path,
str(script_path),
'--config_filename', str(yaml_path),
config_name,
output_type,
'--ins_hier_path', 'core_ibex_tb_top',
@ -49,7 +53,7 @@ def run_ibex_config(config_name: str, output_type: str) -> str:
return proc.stdout.strip()
def get_x_opts(config_name: str, simulator: str, stage: str) -> str:
def _get_x_opts(config_name: str, simulator: str, stage: str) -> str:
try:
needs_compile_opts, needs_sim_opts = SIM_CFGS[simulator]
except KeyError:
@ -70,12 +74,110 @@ def get_x_opts(config_name: str, simulator: str, stage: str) -> str:
output_type = (f'{simulator}_{stage}_opts'
if specify_which_opts else f'{simulator}_opts')
return run_ibex_config(config_name, output_type)
return _run_ibex_config(config_name, output_type)
def get_compile_opts(config_name: str, simulator: str) -> str:
return get_x_opts(config_name, simulator, 'compile')
return _get_x_opts(config_name, simulator, 'compile')
def get_sim_opts(config_name: str, simulator: str) -> str:
return get_x_opts(config_name, simulator, 'sim')
return _get_x_opts(config_name, simulator, 'sim')
def get_config(cfg_name: str) -> Config:
yaml_path = _IBEX_ROOT/"ibex_configs.yaml"
return parse_config(cfg_name, yaml_path)
def get_isas_for_config(cfg: Config) -> tuple[str, str]:
"""Get ISA and ISS_ISA keys for the given Ibex config"""
# NOTE: This logic should match the code in the get_isa_string() function
# in core_ibex/tests/core_ibex_base_test.sv: keep them in sync!
has_multiplier = cfg.rv32m != 'ibex_pkg::RV32MNone'
base_isa = 'rv32{}{}c'.format('e' if cfg.rv32e else 'i',
'm' if has_multiplier else '')
bitmanip_mapping = {
'ibex_pkg::RV32BNone': [],
'ibex_pkg::RV32BBalanced': ['Zba', 'Zbb', 'Zbs', 'XZbf', 'XZbt'],
'ibex_pkg::RV32BOTEarlGrey': ['Zba', 'Zbb', 'Zbc', 'Zbs',
'XZbf', 'XZbp', 'XZbr', 'XZbt'],
'ibex_pkg::RV32BFull': ['Zba', 'Zbb', 'Zbc', 'Zbs',
'XZbe', 'XZbf', 'XZbp', 'XZbr', 'XZbt']
}
bitmanip_isa = bitmanip_mapping.get(cfg.rv32b)
if bitmanip_isa is None:
raise ValueError(f'Unknown RV32B value ({cfg.rv32b}) in config YAML')
has_bitmanip = cfg.rv32b != 'ibex_pkg::RV32BNone'
toolchain_isa = base_isa + ('b' if has_bitmanip else '')
return (toolchain_isa, '_'.join([base_isa] + bitmanip_isa))
_TestEntry = dict[str, object]
_TestEntries = list[_TestEntry]
@typechecked
def filter_tests_by_config(cfg: ibex_config.Config,
test_list: _TestEntries) -> _TestEntries:
"""Filter out any unsupported tests from being executed.
e.g. if the "small" config has been specified, this function will filter
out all tests that require B-extension and PMP parameters
This function will parse the set of RTL parameters required by a given
test (if any) and ensure that those parameters are supported by the
selected core config.
Doing this allows the run flow to be smarter about running regressions
with different configs (useful for CI flows).
Arguments:
cfg: ibex_config.Config object of built system
test_list: list of test entry objects parsed from the YAML testlist
Returns:
filtered_test_list: a list of test entry objects, filtered such that
all tests incompatible with the specified ibex
config have been removed.
"""
filtered_test_list = []
for test in test_list:
if "rtl_params" not in test:
# We currently only exclude tests by mismatching 'rtl_params', so if
# that key is missing then the test is accepted by default.
filtered_test_list.append(test)
else:
param_dict = test['rtl_params']
assert isinstance(param_dict, dict)
for p, p_val in param_dict.items():
config_val = cfg.params.get(p, None)
# Throw an error if required RTL parameters in the testlist
# have been formatted incorrectly (typos, wrong parameters,
# etc)
if config_val is None:
raise ValueError('Parameter {} not found in config {}'
.format(p, cfg))
# Ibex has some enum parameters, so as a result some tests are
# able to run with several of these parameter values (like
# bitmanipulation tests). If this is the case, the testlist
# will specify all legal enum values, check if any of them
# match the config.
if ((isinstance(p_val, list) and (config_val not in p_val)) or
(isinstance(p_val, str) and (config_val != p_val))):
logger.warning(
f"Rejecting test {test['test']}, 'rtl_params' specified "
" not compatible with ibex_config")
break
# The test is accepted if we got this far
filtered_test_list.append(test)
return filtered_test_list

View File

@ -1,22 +1,23 @@
#!/usr/bin/env python3
"""Helper-scripts to merge coverage databases across multiple tests."""
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
"""Regression script for running the Spike UVM testbench"""
import argparse
import logging
import os
import shutil
import sys
import pathlib
from typing import Set
from metadata import RegressionMetadata
from setup_imports import _IBEX_ROOT
from scripts_lib import run_one
_THIS_DIR = os.path.normpath(os.path.join(os.path.dirname(__file__)))
_IBEX_ROOT = os.path.normpath(os.path.join(_THIS_DIR, 4 * '../'))
def find_cov_dirs(start_dir: str, simulator: str) -> Set[str]:
assert simulator in ['xlm', 'vcs']
@ -35,6 +36,10 @@ def find_cov_dirs(start_dir: str, simulator: str) -> Set[str]:
logging.info("Found coverage database (vdb) at %s" % vdb_path)
cov_dirs.add(vdb_path)
if not cov_dirs:
logging.info(f"No coverage found for {simulator}")
return 1
return cov_dirs
@ -51,8 +56,7 @@ def merge_cov_vcs(cov_dir: str, verbose: bool, cov_dirs: Set[str]) -> int:
def merge_cov_xlm(cov_dir: str, verbose: bool, cov_dirs: Set[str]) -> int:
xcelium_scripts = os.path.join(_IBEX_ROOT,
'vendor/lowrisc_ip/dv/tools/xcelium')
xcelium_scripts = _IBEX_ROOT/'vendor/lowrisc_ip/dv/tools/xcelium'
# The merge TCL code uses a glob to find all available scopes and previous
# runs. In order to actually get the databases we need to go up once so
@ -106,39 +110,23 @@ def merge_cov_xlm(cov_dir: str, verbose: bool, cov_dirs: Set[str]) -> int:
def main():
'''Entry point when run as a script'''
parser = argparse.ArgumentParser()
parser.add_argument("--working-dir")
parser.add_argument("--simulator")
parser.add_argument("--verbose", action="store_true")
parser.add_argument('--dir-metadata', type=pathlib.Path, required=True)
args = parser.parse_args()
md = RegressionMetadata.construct_from_metadata_dir(args.dir_metadata)
if args.simulator not in ['xlm', 'vcs']:
raise ValueError(f'Unsupported simulator: {args.simulator}.')
if md.simulator not in ['xlm', 'vcs']:
raise ValueError(f'Unsupported simulator for merging coverage: {args.simulator}')
output_dir = os.path.join(args.working_dir, 'coverage')
# If output_dir exists, delete it: we'll re-generate its contents in a sec
# and we don't want to accidentally pick them up as part of the merge.
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
# Now make output_dir again (but empty)
os.makedirs(output_dir)
md.dir_cov.mkdir(exist_ok=True, parents=True)
# Compile a list of all directories that contain coverage databases
cov_dirs = find_cov_dirs(args.working_dir, args.simulator)
if not cov_dirs:
logging.info(f"No coverage found for {args.simulator}.")
return 1
cov_dirs = find_cov_dirs(str(md.dir_run), md.simulator)
merge_funs = {
'vcs': merge_cov_vcs,
'xlm': merge_cov_xlm
}
return merge_funs[args.simulator](output_dir, args.verbose, cov_dirs)
return merge_funs[md.simulator](str(md.dir_cov), md.verbose, cov_dirs)
if __name__ == '__main__':

View File

@ -0,0 +1,463 @@
#!/usr/bin/env python3
"""Hold build metadata/configuration in a central location."""
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import sys
import os
from types import *
import pathlib
import pickle
import typing
from typing import Optional, Union
from enum import Enum
import argparse
import shlex
import dataclasses
from dataclasses import field
from typeguard import typechecked
import portalocker
import signal
import setup_imports
import scripts_lib
import ibex_cmd
import ibex_config
import lib as riscvdv_lib
from test_run_result import TestRunResult
import logging
logger = logging.getLogger(__name__)
@typechecked
@dataclasses.dataclass
class RegressionMetadata(scripts_lib.testdata_cls):
"""Holds metadata about the current builds.
Optional fields mean that they haven't yet been populated.
"""
dir_out: pathlib.Path = pathlib.Path()
dir_metadata: pathlib.Path = pathlib.Path()
pickle_file : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
yaml_file : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
raw_args_str: str = ' ' # The arguments passed in to the constructor
raw_args_dict: dict = field(default_factory=dict)
seed: int = 1 # starting seed
waves: bool = False
cov: bool = False
cosim: bool = True
simulator: str = ' '
iss: str = ' '
test: str = ' '
verbose: bool = False
iterations: Optional[int] = None
signature_addr: str = ' '
ibex_config: str = ' '
tests_and_counts: list[tuple[str, int]] = field(default_factory=list)
isa_ibex: Optional[str] = None
isa_iss: Optional[str] = None
# Files that control the regression, specify configurations, tests, etc
ibex_configs : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
ibex_riscvdv_simulator_yaml : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
ibex_riscvdv_customtarget : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
ibex_riscvdv_testlist : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
ibex_riscvdv_csr : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
# Build logs and commands
riscvdv_build_log : Optional[pathlib.Path] = None
riscvdv_build_stdout : Optional[pathlib.Path] = None
riscvdv_build_cmds : Optional[list[list[str]]] = None
tb_build_log : Optional[pathlib.Path] = None
tb_build_stdout : Optional[pathlib.Path] = None
tb_build_cmds : Optional[list[list[str]]] = None
riscvdv_fcov_log : Optional[pathlib.Path] = None
riscvdv_fcov_stdout : Optional[pathlib.Path] = None
riscvdv_fcov_cmds : Optional[list[list[str]]] = None
regr_log : Optional[pathlib.Path] = None
regr_log_junit : Optional[pathlib.Path] = None
regr_log_junit_merged : Optional[pathlib.Path] = None
environment_variables : dict = field(init=False, compare=False, default_factory=dict)
# Project directories (which contain above files and results)
ibex_root : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
riscvdv_root : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
ot_lowrisc_ip : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
ot_xcelium_cov_scripts : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
ibex_dv_root : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
dir_build : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
dir_instruction_generator : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
dir_tb : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
dir_run : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
dir_cov : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
dir_fcov : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
dir_shared_cov : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
dir_cov_merged : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
dir_cov_report : pathlib.Path = field(init=False, compare=False, default_factory=pathlib.Path)
tests_pickle_files: Optional[list[pathlib.Path]] = None
def __post_init__(self):
"""Construct all the dependent metadata."""
self._setup_directories()
self.pickle_file = self.dir_metadata/'metadata.pickle'
self.yaml_file = self.dir_metadata/'metadata.yaml'
self.ibex_configs = self.ibex_root/'ibex_configs.yaml'
self.ot_xcelium_cov_scripts = self.ot_lowrisc_ip/'dv'/'tools'/'xcelium'
self.ibex_riscvdv_simulator_yaml = self.ibex_dv_root/'yaml'/'rtl_simulation.yaml'
self.ibex_riscvdv_customtarget = self.ibex_dv_root/'riscv_dv_extension'
self.ibex_riscvdv_testlist = self.ibex_riscvdv_customtarget/'testlist.yaml'
self.ibex_riscvdv_csr = self.ibex_riscvdv_customtarget/'csr_description.yaml'
self.environment_variables = dict(os.environ)
def _get_ibex_metadata(self):
"""Get the desired ibex_config parameters.
# Any extra derivative data can be setup here.
"""
if self.iterations is not None and self.iterations <= 0:
raise RuntimeError('Bad --iterations argument: must be positive')
if self.seed < 0:
raise RuntimeError('Bad --start_seed argument: must be non-negative')
cfg = ibex_cmd.get_config(self.ibex_config)
self.isa_ibex, self.isa_iss = ibex_cmd.get_isas_for_config(cfg)
self.tests_and_counts = self.get_tests_and_counts()
def _setup_directories(self):
"""Set the directory variables which contain all other build factors."""
self.ibex_root = setup_imports._IBEX_ROOT
self.riscvdv_root = setup_imports._RISCV_DV
self.ot_lowrisc_ip = setup_imports._OT_LOWRISC_IP
self.ibex_dv_root = setup_imports._CORE_IBEX
self.dir_build = self.dir_out/'build'
self.dir_instruction_generator = self.dir_build/'instr_gen'
self.dir_tb = self.dir_build/'tb'
self.dir_run = self.dir_out/'run'
self.dir_cov = self.dir_run/'coverage'
self.dir_fcov = self.dir_cov/'fcov'
self.dir_shared_cov = self.dir_cov/'shared_cov'
self.dir_cov_merged = self.dir_cov/'merged'
self.dir_cov_report = self.dir_cov/'report'
@classmethod
def arg_list_initializer(cls,
dir_metadata: pathlib.Path,
dir_out: pathlib.Path,
args_list: str):
"""Initialize fields from an input str of 'KEY=VALUE KEY2=VALUE2' form.
Usings args_list: str is convenient for constructing from a higher level,
such as a makefile.
dir_metadata/dir_out are always required.
dir_metadata -> Where build metadata is stored and reconstructed from.
dir_out -> Where the build takes place.
dir_metadata can be outside of dir_out, but placing it inside of dir_out
makes cleanup for a new build easy. ('rm -rf dir_out/')
Returns a constructed RegressionMetadata object.
"""
if dir_out is pathlib.Path():
raise RuntimeError("self.dir_metadata must be initialized)")
if dir_metadata is pathlib.Path():
raise RuntimeError("self.dir_metadata must be initialized)")
dummy_obj = RegressionMetadata()
dummy = dataclasses.asdict(dummy_obj)
logger.debug(dummy) # Useful to see types of all the k,v pairs
# Any fields declared in the class initialization (see above) can be populated
# by constructing a dict with keys matching the fields, and then passing **dict
# to the construction of the class. We do this here to populate from 'args_list'.
args_dict = {}
args_dict['raw_args_str'] = args_list
args_dict['raw_args_dict'] = {k: v for k, v in
(pair.split('=', maxsplit=1)
for pair in shlex.split(args_list))}
kv_tuples = (pair.split('=', maxsplit=1) for pair in shlex.split(args_list))
kv_dict = {k.lower(): v for k, v in kv_tuples}
for f in dataclasses.fields(dummy_obj):
if f.name in kv_dict:
key = f.name
val = kv_dict[f.name]
logger.debug(f"Attempting to set {key} in metadata object")
logger.debug(f"Type of key '{key}' is {f.type}, value is {type(val)}")
# There should be a better way to do typecasting...
# i.e. how to check that the value of any k:v pair passed to
# --args-list can be typecast from str to the typehint of
# the matching class variable defined above.
# Eg. args_dict[key] = cast(f.type, val)
# logger.error(f"{pair},{key},{val},{type(val),{type(dummy[key])}}")
if f.type is str:
args_dict[key] = str(val)
elif f.type is int:
args_dict[key] = int(val)
elif f.type is bool:
args_dict[key] = bool(int(val))
elif f.type is pathlib.Path:
args_dict[key] = pathlib.Path(val)
elif f.type is typing.Optional[int]:
if val:
args_dict[key] = int(val)
else:
args_dict[key] = None
elif f.type is NoneType:
args_dict[key] = None
else:
raise RuntimeError(f"Couldn't set key '{key}' in metadata object! "
f"Expected type : {type(dummy[key])}")
# Finally construct the metadata object
md = cls(
dir_out=dir_out.resolve(),
dir_metadata=dir_metadata.resolve(),
**args_dict)
# Fetch/set more derivative metadata specific to the ibex
md._get_ibex_metadata()
return md
@classmethod
@typechecked
def construct_from_metadata_dir(cls, dir_metadata: pathlib.Path):
"""Construct metadata object from exported object using default filenames."""
md_pickle = pathlib.Path(dir_metadata)/'metadata.pickle'
md = cls.construct_from_pickle(md_pickle)
return md
def get_tests_and_counts(self) -> list[tuple[str, int]]:
"""Get a list of tests and the number of iterations to run of each.
ibex_config should be the name of the Ibex configuration to be tested.
If test is provided, it gives the test or tests (as a comma separated
string) to narrow to. Use the special name "all" to run all the tests.
If iterations is provided, it should be a positive number and overrides the
number of iterations for each test.
"""
rv_testlist = self.ibex_riscvdv_testlist
rv_test = self.test if self.test is not None else 'all'
rv_iterations = self.iterations or 0
# Get all the tests that match the test argument, scaling as necessary with
# the iterations argument.
matched_list = [] # type: _TestEntries
riscvdv_lib.process_regression_list(
testlist=rv_testlist,
test=rv_test,
iterations=rv_iterations,
matched_list=matched_list,
riscv_dv_root=self.riscvdv_root)
if not matched_list:
raise RuntimeError("Cannot find {} in {}".format(self.test, self.testlist))
# Filter tests by the chosen ibex configuration
filtered_list = ibex_cmd.filter_tests_by_config(
ibex_config.parse_config(self.ibex_config, str(self.ibex_configs)),
matched_list)
# Convert to desired output format (and check for well-formedness)
ret = []
for test in filtered_list:
name = test['test']
iterations = test['iterations']
assert isinstance(name, str) and isinstance(iterations, int)
assert iterations > 0
ret.append((name, iterations))
return ret
def tds(self, give_tuple: bool = False) -> Union[list[str],
list[tuple[str, int]]]:
"""Return the TEST.SEED strings for all the tests configured in the regression.
By default returns a list of strs which are TEST.SEED, but can return a list of
tuples as (TEST, SEED)
"""
if not self.tests_and_counts:
raise RuntimeError("self.tests_and_counts is empty, cant get TEST.SEED strings.")
tds_list = []
for test, count in self.tests_and_counts:
for i in range(count):
if give_tuple:
tds = (test, self.seed + i)
else:
tds = f"{test}.{self.seed + i}"
tds_list.append(tds)
return tds_list
class Ops(Enum):
"""Type of operations that can be specified by an argparse arg."""
CREATE = 'create_metadata'
PRINT_FIELD = 'print_field'
TESTS_AND_SEEDS = 'tests_and_seeds'
def __str__(self):
return self.value
def _main():
parser = argparse.ArgumentParser()
parser.add_argument('--op', type=Ops, choices=Ops, required=True)
parser.add_argument('--dir-metadata', type=pathlib.Path, required=True)
parser.add_argument('--dir-out', type=pathlib.Path, required=False)
parser.add_argument('--args-list', type=str, required=False)
parser.add_argument('--field', type=str, required=False)
args = parser.parse_args()
# Parse all variables from the argument string, and then add them
# to the metadata object
if args.op == Ops.CREATE:
"""
Use the --args-list input, a string of 'KEY=VALUE KEY2=VALUE2',
to create a new metadata object.
--dir-metadata specifies the directory of the test metadata
--dir-out specifies the directory for the regression build and test to take place
"""
if (pathlib.Path(args.dir_metadata)/'metadata.pickle').exists():
logger.error("Build metadata already exists, not recreating from scratch.")
return
md = RegressionMetadata.arg_list_initializer(dir_metadata=pathlib.Path(args.dir_metadata),
dir_out=pathlib.Path(args.dir_out),
args_list=args.args_list)
# Setup metadata objects for each of the tests to be run. Construct a list of these
# objects inside the regression_metadata object constructed above, so we can easily
# find and import them later, and give each test object a link back to this top-level
# object that defines the wider regression.
md.tests_pickle_files = []
for test, seed in md.tds(give_tuple=True):
tds_str = f"{test}.{seed}"
trr_pickle_file = md.dir_metadata / (tds_str + ".pickle")
# Initialize TestRunResult object
trr = TestRunResult(
passed=None,
failure_message=None,
testdotseed=tds_str,
testname=test,
seed=seed,
rtl_simulator=md.simulator,
iss_cosim=md.iss,
dir_test=md.dir_run/tds_str,
metadata_pickle_file=md.pickle_file,
pickle_file=trr_pickle_file,
yaml_file=(md.dir_run / tds_str / 'trr.yaml'))
# Save the path into a list in the regression metadata object for later.
md.tests_pickle_files.append(trr.pickle_file)
# Export the trr structure to disk.
trr.export(write_yaml=True)
# Export here to commit new RegressionMetadata object to disk.
md.export(write_yaml=True)
if args.op == Ops.PRINT_FIELD:
md = RegressionMetadata.construct_from_metadata_dir(args.dir_metadata)
value = getattr(md, args.field)
if value is None:
raise RuntimeError("Field requested is not present or not set in the regression metadata object")
logger.debug(f"Returning value of field {args.field} as {value}")
print(str(value)) # Captured into Makefile variable
if args.op == Ops.TESTS_AND_SEEDS:
"""Return a list of TEST.SEED for all the valid tests"""
md = RegressionMetadata.construct_from_metadata_dir(args.dir_metadata)
for tds in md.tds():
print(tds)
class LockedMetadata():
"""Construct instance of RegressionMetadata, while locking the on-disk file.
This allows us to not worry about multiple processes racing to write
into the file. This could have performance implications if there
is strict dependencies between steps, so aim to only hold this lock
for as short time as possible.
N.B. When used as follows....
'''
with LockedMetadata(args.dir_metadata, __file__) as md:
print(md.simulator)
# etc...
print(md.ibex_config) # !!!!!!
'''
... after the with-context is over, the file is closed and we have committed any
changes made to disk, but the object 'md' in memory is still around and useable.
Therefore, it is still valid to reference it after the scope has ended.
"""
def _handler(signum, frame, other):
logger.error(f"Timed-out [5s] waiting to open the regression metadata file! (signal = {signum})")
raise OSError("Couldn't open regression metadata file before we were timed out!")
def __init__(self, dir_metadata: pathlib.Path, script: pathlib.Path):
"""Construct object corresponding to the on-disk file.
Args:
dir_metadata: Directory containing the regression metadata
script: Name of the file locking the metadata. Only used for logging.
"""
self.pickle_file = pathlib.Path(dir_metadata)/'metadata.pickle'
self.file_name = pathlib.Path(script).name
def __enter__(self):
"""Provide a way to access the in-filesystem object safely (holds a lock)."""
# Set the signal handler and a 5-second alarm
# Since other scripts may lock this file, better implement a timeout
# to report what went wrong. Though we should never be racing/locking
# for all that long, this is just a backup.
signal.signal(signal.SIGALRM, self._handler)
signal.alarm(5) # 5s
self.handle = self.pickle_file.open('rb')
portalocker.lock(self.handle, portalocker.LockFlags.EXCLUSIVE)
logger.info(f"Locking metadata file for {self.file_name}...")
self.md = pickle.load(self.handle)
signal.alarm(0) # Disable the alarm
return self.md
def __exit__(self, type, value, traceback):
"""Close our exclusive access to the file, committing any changes to disk."""
self.md.export(write_yaml=True)
logger.info(f"Unlocked in {self.file_name}.")
portalocker.unlock(self.handle)
self.handle.close()
if __name__ == '__main__':
sys.exit(_main())

View File

@ -0,0 +1,4 @@
_GET_OBJS=$(find ./out/run -type f -iregex '.*test\.o')
for obj in $_GET_OBJS; do
$RISCV_TOOLCHAIN/bin/riscv32-unknown-elf-objdump -d $obj > $(dirname $obj)/test.dump
done

View File

@ -0,0 +1,4 @@
_GET_TRACES=$(find -type f -iregex '.*trace_core.*\.log')
for trace in $_GET_TRACES; do
column -t -s $'\t' -o ' ' -R 1,2,3,4,5 $trace > $(dirname $trace)/trace_pretty.log
done

View File

@ -0,0 +1,190 @@
"""Defines the interface to riscvdv features for random instruction generation and compilation.
riscv-dv provides both
- a runnable instruction-generator
(the sv/UVM program that actually generates .S assembly files)
- formatting guidelines for specifying simulators, test commands, optional arguments, etc.
(testlist.yaml / simulator.yaml)
Provide an interface to get runnable commands from data/configuration specified in
the riscdv way.
"""
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import re
import shlex
import pathlib
from typing import Union
from typeguard import typechecked
from metadata import RegressionMetadata
# ibex
from setup_imports import _RISCV_DV, _CORE_IBEX_RISCV_DV_EXTENSION
from scripts_lib import subst_dict, subst_env_vars
# riscv-dv
from lib import read_yaml
import logging
logger = logging.getLogger(__name__)
parameter_format = '<{}>'
parameter_regex = r'(<[\w]+>)' # Find matches to the above format
@typechecked
def get_run_cmd(verbose: bool) -> list[Union[str, pathlib.Path]]:
"""Return the command parts of a call to riscv-dv's run.py."""
riscvdv_run_py = _RISCV_DV/'run.py'
csr_desc = _CORE_IBEX_RISCV_DV_EXTENSION/'csr_description.yaml'
testlist = _CORE_IBEX_RISCV_DV_EXTENSION/'testlist.yaml'
cmd = ['python3',
riscvdv_run_py,
'--testlist', testlist,
'--gcc_opts=-mno-strict-align',
'--custom_target', _CORE_IBEX_RISCV_DV_EXTENSION,
# '--simulator_yaml', _CORE_IBEX_YAML/'rtl_simulation.yaml',
'--csr_yaml', csr_desc,
'--mabi=ilp32']
if verbose:
cmd.append('--verbose')
return cmd
def get_cov_cmd(md: RegressionMetadata) -> list[str]:
"""Return the the command to generate riscv-dv's functional coverage."""
riscvdv_cov_py = _RISCV_DV/'cov.py'
cmd = ['python3',
str(riscvdv_cov_py),
'--core', 'ibex',
'--dir', str(md.dir_run),
'-o', str(md.dir_fcov),
'--simulator', md.simulator,
'--opts', '--gen_timeout 1000',
'--isa', md.isa_ibex,
'--custom_target', str(md.ibex_riscvdv_customtarget)]
if md.verbose:
cmd.append('--verbose')
return cmd
@typechecked
def get_tool_cmds(yaml_path: pathlib.Path,
simulator: str,
cmd_type: str, # compile/sim
user_enables: dict[str, bool],
user_subst_options: dict[str, Union[str, pathlib.Path]]) -> list[list[str]]:
"""Substitute options and environment variables to construct a final command.
simulator is the name of the simulator to use.
cmd_type is Union['compile','sim']
user_subst_opts is a dict[str, str] of templated variables <T> in the
yaml commands that are to be substituted as <T> = user_subst_opts[T]
RISCV_DV allows both compile and sim keys in the yaml to have
multiple commands, so return [str]
Populate the riscv-dv rtl_simulation.yaml templated parameters <T> with
the following algorithm...
(1) If the yaml key 'tool':'compile/sim' contains K:V pairs with keys other
than 'cmd', for each of those keys K check if <K> exists in the cmd, and
if it does, substitute for the value V. Gate each substitution with a
user-specified enable.
(2) For any remaining templated values <_> in the cmd, take a user-defined
dict {K:V} and if <K> matches the templated value, replace <K> by V.
(3) If the yaml key 'tool' set contains a K:V pair 'env_var':[str],
then for each str in [str], check if it exists as a templated value <V>
in the cmd, and if it does, substitute with the environment variable of
the same name.
enable_dict should be a dict mapping names to bools.
For each key, N, in enable_dict, if enable_dict[N] is False, then all
occurrences of <N> in cmd will be replaced with ''.
If enable_dict[N] is True, all occurrences of <N> in cmd will be replaced
with opts_dict[N].
If N is not a key in opts_dict, this is no problem unless cmd contains
<N>, in which case we throw a RuntimeError.
Finally, the environment variables are substituted as described in
subst_env_vars and any newlines are stripped out.
"""
simulator_entry = _get_yaml_for_simulator(yaml_path, simulator)
cmds = []
for cmd in simulator_entry[cmd_type]['cmd']:
assert type(cmd) == str
formatted_cmd = cmd
logger.debug("Unformatted command :")
logger.debug(formatted_cmd)
# (1) #
# Get all k:v pairs which are not 'cmd'
# Substitute with matching parameters in the command, or if the
# parameter is disabled by a user_enable, remove it.
cmd_opts_dict = {k: (v.strip() if user_enables.get(k) else '')
for k, v in simulator_entry[cmd_type].items()
if k != 'cmd'}
if cmd_opts_dict != {}:
formatted_cmd = subst_dict(formatted_cmd, cmd_opts_dict)
logger.debug("After #1 :")
logger.debug(formatted_cmd)
# (2) #
if user_subst_options != {}:
formatted_cmd = subst_dict(formatted_cmd, user_subst_options)
logger.debug("After #2 :")
logger.debug(formatted_cmd)
# (3) #
if 'env_var' in simulator_entry.keys():
formatted_cmd = subst_env_vars(
formatted_cmd,
[i for i in simulator_entry['env_var'].replace(' ', '').split(',')]
)
logger.debug("After #3 :")
logger.debug(formatted_cmd)
# Finally, check if we have any parameters left which were not filled.
match = re.findall(parameter_regex, formatted_cmd)
if match:
logger.error("Parameters in riscvdv command not substituted!\n"
f"Parameters : {match}\n"
f"Command : {formatted_cmd}\n")
raise RuntimeError
logger.info(formatted_cmd)
cmds.append(shlex.split(formatted_cmd))
return cmds
@typechecked
def _get_yaml_for_simulator(yaml_path: pathlib.Path, simulator: str) -> dict:
"""Get the entry for the simulator in RTL simulation yaml.
riscv-dv specifies a yaml-schema for defining simulators and commands needed
for building and running testbenches.
The ibex build system uses this schema with it's own unique commands, in
the file 'rtl_simulation.yaml'.
Trigger an exception if there is no match.
"""
logger.info("Processing simulator setup file : %s" % yaml_path)
for entry in read_yaml(yaml_path):
if entry.get('tool') == simulator:
logger.debug(f"Got the following yaml for simulator '{simulator}' "
f"from {str(yaml_path.resolve())} :\n{entry}")
return entry
raise RuntimeError("Cannot find RTL simulator {}".format(simulator))

View File

@ -1,53 +0,0 @@
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import os
import sys
from scripts_lib import get_config, get_isas_for_config, run_one
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--iss', required=True)
parser.add_argument('--input', required=True)
parser.add_argument('--output', required=True)
parser.add_argument('--ibex-config', required=True)
args = parser.parse_args()
cfg = get_config(args.ibex_config)
isa, iss_isa = get_isas_for_config(cfg)
# riscv-dv knows how to run an ISS simulation (see yaml/iss.yaml in the
# vendored directory), but it has definite (and inconvenient!) opinions
# about where files should end up. Rather than fight with it, let's just
# generate the simple ISS command ourselves.
#
# NOTE: This only supports Spike, mainly because it's the only simulator we
# care about at the moment and this whole script is going to go away anyway
# very soon once we've switched across to using cosimulation.
if args.iss != 'spike':
raise RuntimeError(f'Unsupported ISS: {args.iss}')
spike_dir = os.getenv('SPIKE_PATH')
if spike_dir is not None:
spike = os.path.join(spike_dir, 'spike')
else:
spike = 'spike'
cmd = [spike, '--log-commits', '--isa', iss_isa, '-l', args.input]
return run_one(args.verbose,
cmd,
redirect_stdstreams=args.output,
timeout_s=30) # Spike can run indefinitely in some cases
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,113 +0,0 @@
#!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
from ibex_cmd import get_sim_opts
from sim_cmd import get_simulator_cmd
from scripts_lib import read_test_dot_seed, subst_vars, run_one
from test_entry import get_test_entry
_CORE_IBEX = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
def get_test_sim_cmd(base_cmd, test, binary, seed, sim_dir):
'''Generate the command that runs a test iteration in the simulator
base_cmd is the command to use before any test-specific substitutions. test
is a dictionary describing the test (originally read from the testlist YAML
file). binary is the path to the binary for the test. seed is the seed to
use.
sim_dir is the directory to which the test results will be written.
Returns the command to run.
'''
it_cmd = subst_vars(base_cmd, {'seed': str(seed)})
sim_cmd = (it_cmd + ' ' + test['sim_opts'].replace('\n', ' ')
if "sim_opts" in test
else it_cmd)
test_name = test['test']
# Do final interpolation into the test command for variables that depend on
# the test name or iteration number.
sim_cmd = subst_vars(sim_cmd,
{
'sim_dir': sim_dir,
'rtl_test': test['rtl_test'],
'binary': binary,
'test_name': test_name,
})
if not os.path.exists(binary):
raise RuntimeError('When computing simulation command for running '
'seed {} of test {}, cannot find the '
'expected binary at {!r}.'
.format(seed, test_name, binary))
return sim_cmd
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('--ibex-config', required=True)
parser.add_argument('--shared-cov-dir', required=True)
parser.add_argument('--simulator', required=True)
parser.add_argument("--en_cov", action='store_true')
parser.add_argument("--en_wave", action='store_true')
parser.add_argument('--signature-addr', required=True)
parser.add_argument('--test-dot-seed',
type=read_test_dot_seed,
required=True)
parser.add_argument('--binary', required=True)
parser.add_argument('--rtl-sim-dir', required=True)
parser.add_argument('--out-dir', required=True)
args = parser.parse_args()
testname, seed = args.test_dot_seed
entry = get_test_entry(testname)
# Look up how to run the simulator in general
enables = {
'cov_opts': args.en_cov,
'wave_opts': args.en_wave
}
_, base_cmd = get_simulator_cmd(args.simulator, enables)
sim_opts = (f'+signature_addr={args.signature_addr} ' +
get_sim_opts(args.ibex_config, args.simulator))
# Specialize base_cmd with the right directories and simulator options
sim_cmd = subst_vars(base_cmd,
{
'out': args.rtl_sim_dir,
'shared_cov_dir': args.shared_cov_dir,
'sim_opts': sim_opts,
'cwd': _CORE_IBEX,
})
# Specialize base_cmd for this specific test
test_cmd = get_test_sim_cmd(sim_cmd, entry,
args.binary, seed, args.out_dir)
# Run test_cmd (it's a string, so we have to call out to the shell to do
# so). Note that we don't capture the success or failure of the subprocess:
# if something goes horribly wrong, we assume we won't have a matching
# trace.
sim_log = os.path.join(args.out_dir, 'rtl.log')
os.makedirs(args.out_dir, exist_ok=True)
with open(sim_log, 'wb') as sim_fd:
run_one(False, test_cmd, redirect_stdstreams=sim_fd, timeout_s=900, shell=True)
# Always return 0 (success), even if the test failed. We've successfully
# generated a log either way.
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -11,115 +11,18 @@ import shlex
import shutil
import sys
import tempfile
import pathlib
from typing import List
from scripts_lib import (read_test_dot_seed, start_riscv_dv_run_cmd,
get_config, get_isas_for_config, run_one)
from test_entry import read_test_dot_seed
import riscvdv_interface
from scripts_lib import run_one, format_to_cmd
from ibex_cmd import get_config
from metadata import RegressionMetadata
from test_run_result import TestRunResult
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--simulator', required=True)
parser.add_argument('--end-signature-addr', required=True)
parser.add_argument('--output-dir', required=True)
parser.add_argument('--gen-build-dir', required=True)
parser.add_argument('--ibex-config', required=True)
parser.add_argument('--test-dot-seed',
type=read_test_dot_seed, required=True)
args = parser.parse_args()
cfg = get_config(args.ibex_config)
isa, iss_isa = get_isas_for_config(cfg)
testname, seed = args.test_dot_seed
inst_overrides = [
'riscv_asm_program_gen',
'ibex_asm_program_gen',
'uvm_test_top.asm_gen'
]
# Special-case for riscv_csr_test -> fixup the handshake addr.
# Currently use (signature_addr - 0x4) for test_done channel.
sig = ((args.end_signature_addr) if ('riscv_csr_test' not in testname)
else
f'{(int(args.end_signature_addr, 16) - 4):x}') # (signature_addr - 0x4)
sim_opts_dict = {
'uvm_set_inst_override': ','.join(inst_overrides),
'require_signature_addr': '1',
'signature_addr': sig,
'pmp_num_regions': str(cfg.pmp_num_regions),
'pmp_granularity': str(cfg.pmp_granularity),
'tvec_alignment': '8'
}
sim_opts_str = ' '.join('+{}={}'.format(k, v)
for k, v in sim_opts_dict.items())
# Ensure that the output directory actually exists
os.makedirs(args.output_dir, exist_ok=True)
riscv_dv_log = os.path.join(args.output_dir, f'riscv-dv.log')
gen_log = os.path.join(args.output_dir, f'gen-cmds.log')
with tempfile.TemporaryDirectory() as td:
orig_list = os.path.join(td, 'cmds.list')
placeholder = os.path.join(td, '@@PLACEHOLDER@@')
cmd = (start_riscv_dv_run_cmd(args.verbose) +
['--so', '--steps=gen',
'--output', placeholder,
'--simulator', args.simulator,
'--isa', isa,
'--test', testname,
'--start_seed', str(seed),
'--iterations', '1',
'--end_signature_addr', sig,
'--sim_opts', sim_opts_str,
'--debug', orig_list])
# Run riscv-dv to generate commands. This is rather chatty, so redirect
# its output to a log file.
gen_retcode = run_one(args.verbose, cmd,
redirect_stdstreams=riscv_dv_log)
if gen_retcode:
return gen_retcode
# Those commands assume the riscv-dv directory layout, where the build
# and run directories are the same. Transform each of the commands as
# necessary to point at the built generator
cmds = reloc_commands(placeholder,
args.gen_build_dir,
td,
args.simulator,
testname,
orig_list)
# Open up a file to take output from running the commands
with open(gen_log, 'w') as log_fd:
# Run the commands in sequence to create outputs in the temporary
# directory. Redirect stdout and stderr to gen_log
ret = 0
for cmd in cmds:
ret = run_one(args.verbose, cmd, redirect_stdstreams=log_fd)
if ret != 0:
break
test_file_copies = {
'riscv_csr_test': [('riscv_csr_test_0.S', 'test.S', False)]
}
default_file_copies = [('gen.log', 'gen.log', True),
('test_0.S', 'test.S', False)]
file_copies = test_file_copies.get(testname, default_file_copies)
do_file_copies(td, args.output_dir, file_copies, ret != 0)
return 0
import logging
logger = logging.getLogger(__name__)
def reloc_commands(placeholder_dir: str,
@ -128,12 +31,12 @@ def reloc_commands(placeholder_dir: str,
simulator: str,
testname: str,
src: str) -> List[List[str]]:
'''Reads the (one) line in src and apply relocations to it
"""Read (one) line in src and apply relocations to it.
The result should be a series of commands that build a single test into
scratch_dir/test_0.S, dumping a log into scratch_dir/gen.log.
'''
"""
ret = []
with open(src) as src_file:
for line in src_file:
@ -151,7 +54,7 @@ def reloc_commands(placeholder_dir: str,
def reloc_word(simulator: str,
placeholder_dir: str, build_dir: str, scratch_dir: str,
testname: str, word: str) -> str:
'''Helper function for reloc_commands that relocates just one word'''
"""Helper function for reloc_commands that relocates just one word."""
sim_relocs = {
'vcs': [
# The VCS-generated binary
@ -239,9 +142,104 @@ def do_file_copies(src_dir, dst_dir, copy_rules, run_failed):
f'but left no {src_name} in scratch directory.')
def _main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('--dir-metadata', type=pathlib.Path, required=True)
parser.add_argument('--test-dot-seed', type=read_test_dot_seed, required=True)
args = parser.parse_args()
tds = args.test_dot_seed
md = RegressionMetadata.construct_from_metadata_dir(args.dir_metadata)
trr = TestRunResult.construct_from_metadata_dir(args.dir_metadata, f"{tds[0]}.{tds[1]}")
cfg = get_config(md.ibex_config)
inst_overrides = [
'riscv_asm_program_gen',
'ibex_asm_program_gen',
'uvm_test_top.asm_gen'
]
# Special-case for riscv_csr_test -> fixup the handshake addr.
# Currently use (signature_addr - 0x4) for test_done channel.
sig = ((md.signature_addr) if ('riscv_csr_test' not in trr.testname)
else
f'{(int(md.signature_addr, 16) - 4):x}') # (signature_addr - 0x4)
sim_opts_dict = {
'uvm_set_inst_override': ','.join(inst_overrides),
'require_signature_addr': '1',
'signature_addr': sig,
'pmp_num_regions': str(cfg.pmp_num_regions),
'pmp_granularity': str(cfg.pmp_granularity),
'tvec_alignment': '8'
}
with tempfile.TemporaryDirectory() as td:
orig_list = pathlib.Path(td)/'cmds.list'
placeholder = pathlib.Path(td)/'@@PLACEHOLDER@@'
cmd = (riscvdv_interface.get_run_cmd(md.verbose) +
['--so', '--steps=gen',
'--output', str(placeholder),
'--simulator', md.simulator,
'--isa', md.isa_ibex,
'--test', trr.testname,
'--start_seed', str(trr.seed),
'--iterations', '1',
'--end_signature_addr', sig,
'--debug', str(orig_list),
'--sim_opts', ' '.join('+{}={}'.format(k, v)
for k, v in sim_opts_dict.items())
])
# Ensure that the output directory actually exists
trr.dir_test.mkdir(parents=True, exist_ok=True)
trr.riscvdv_run_gen_stdout = md.dir_instruction_generator/'riscvdv_cmds.log'
trr.riscvdv_run_gen_cmds = [format_to_cmd(cmd)]
# Run riscv-dv to generate commands. This is rather chatty, so redirect
# its output to a log file.
gen_retcode = run_one(md.verbose, trr.riscvdv_run_gen_cmds[0],
redirect_stdstreams=trr.riscvdv_run_gen_stdout)
if gen_retcode:
return gen_retcode
# Those commands assume the riscv-dv directory layout, where the build
# and run directories are the same. Transform each of the commands as
# necessary to point at the built generator
cmds = reloc_commands(str(placeholder),
str(md.dir_instruction_generator.resolve()),
td,
md.simulator,
trr.testname,
str(orig_list))
trr.riscvdv_run_cmds = [format_to_cmd(cmd) for cmd in cmds]
trr.riscvdv_run_stdout = md.dir_instruction_generator/'riscvdv_run.log'
trr.assembly = trr.dir_test / 'test.S'
# Open up a file to take output from running the commands
with trr.riscvdv_run_stdout.open('w') as log_fd:
# Run the commands in sequence to create outputs in the temporary
# directory. Redirect stdout and stderr to gen_log
ret = 0
for cmd in trr.riscvdv_run_cmds:
ret = run_one(md.verbose, cmd, redirect_stdstreams=log_fd)
if ret != 0:
break
test_file_copies = {
'riscv_csr_test': [('riscv_csr_test_0.S', 'test.S', False)]
}
default_file_copies = [('gen.log', 'gen.log', True),
('test_0.S', 'test.S', False)]
file_copies = test_file_copies.get(trr.testname, default_file_copies)
do_file_copies(td, trr.dir_test, file_copies, ret != 0)
trr.export(write_yaml=True)
return 0
if __name__ == '__main__':
try:
sys.exit(main())
except Exception as e:
print(f'ERROR: {e}', file=sys.stderr)
sys.exit(1)
sys.exit(_main())

View File

@ -0,0 +1,100 @@
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import os
import sys
import pathlib
from ibex_cmd import get_sim_opts
import riscvdv_interface
from scripts_lib import run_one, format_to_cmd
from test_entry import read_test_dot_seed, get_test_entry
from metadata import RegressionMetadata
from test_run_result import TestRunResult
import logging
logger = logging.getLogger(__name__)
def _main() -> int:
"""Generate and run rtl simulation commands."""
parser = argparse.ArgumentParser()
parser.add_argument('--dir-metadata', type=pathlib.Path, required=True)
parser.add_argument('--test-dot-seed', type=read_test_dot_seed, required=True)
args = parser.parse_args()
tds = args.test_dot_seed
md = RegressionMetadata.construct_from_metadata_dir(args.dir_metadata)
trr = TestRunResult.construct_from_metadata_dir(args.dir_metadata, f"{tds[0]}.{tds[1]}")
testopts = get_test_entry(trr.testname) # From testlist.yaml
if not os.path.exists(trr.binary):
raise RuntimeError(
"When computing simulation command for running "
f"seed {trr.seed} of test {trr.testname}, cannot find the "
f"expected binary at {trr.binary!r}.")
# Each test in testlist.yaml can (optionally) specify 'sim_opts'
# which are to be passed to the simulator when running the test.
sim_opts = ''
sim_opts_raw = testopts.get('sim_opts')
if sim_opts_raw:
sim_opts += sim_opts_raw.replace('\n', '')
trr.rtl_log = trr.dir_test / 'rtl_sim.log'
trr.rtl_trace = trr.dir_test / 'trace_core_00000000.log'
trr.iss_cosim_trace = trr.dir_test / f'{md.iss}_cosim_trace_core_00000000.log'
subst_vars_dict = {
'cwd': md.ibex_root,
'test_name': testopts['test'],
'rtl_test': testopts['rtl_test'],
'seed': str(trr.seed),
'binary': trr.binary,
'test_dir': trr.dir_test,
'tb_dir': md.dir_tb,
'dir_shared_cov': md.dir_shared_cov,
'rtl_sim_log': trr.rtl_log,
'rtl_trace': trr.rtl_trace.parent/'trace_core',
'iss_cosim_trace': trr.iss_cosim_trace,
'sim_opts': (f"+signature_addr={md.signature_addr}\n" +
f"{get_sim_opts(md.ibex_config, md.simulator)}\n" +
sim_opts)
}
# Look up how to run the simulator
sim_cmds = riscvdv_interface.get_tool_cmds(
yaml_path=md.ibex_riscvdv_simulator_yaml,
simulator=md.simulator,
cmd_type='sim',
user_enables={
'cov_opts': md.cov,
'wave_opts': md.waves,
},
user_subst_options=subst_vars_dict)
logger.info(sim_cmds)
trr.dir_test.mkdir(exist_ok=True, parents=True)
trr.rtl_cmds = [format_to_cmd(cmd) for cmd in sim_cmds]
trr.rtl_stdout = trr.dir_test / 'rtl_sim_stdstreams.log'
trr.export(write_yaml=True)
# Write all sim_cmd output into a single logfile
with open(trr.rtl_stdout, 'wb') as sim_fd:
for cmd in trr.rtl_cmds:
# Note that we don't capture the success or failure of the subprocess:
sim_fd.write(f"Running run-rtl command :\n{' '.join(cmd)}\n".encode())
run_one(md.verbose, cmd, redirect_stdstreams=sim_fd, timeout_s=900)
# Always return 0 (success), even if the test failed. We've successfully
# generated a log either way.
return 0
if __name__ == '__main__':
sys.exit(_main())

View File

@ -3,54 +3,53 @@
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import os
import re
import shlex
import subprocess
import sys
from typing import Dict, IO, List, Optional, Tuple, Union
THIS_DIR = os.path.dirname(__file__)
IBEX_ROOT = os.path.normpath(os.path.join(THIS_DIR, 4 * '../'))
RISCV_DV_ROOT = os.path.normpath(os.path.join(IBEX_ROOT,
'vendor/google_riscv-dv'))
_OLD_SYS_PATH = sys.path
try:
sys.path = [os.path.join(IBEX_ROOT, 'util')] + sys.path
from ibex_config import Config, parse_config
finally:
sys.path = _OLD_SYS_PATH
TestAndSeed = Tuple[str, int]
import pickle
import pathlib
from io import IOBase
from typing import Dict, TextIO, Optional, Union, List
import dataclasses
from typeguard import typechecked
import logging
logger = logging.getLogger(__name__)
@typechecked
def run_one(verbose: bool,
cmd: List[str],
redirect_stdstreams: Optional[Union[str, IO]] = None,
redirect_stdstreams: Optional[Union[str, pathlib.Path, IOBase]] = None,
timeout_s: Optional[int] = None,
shell: Optional[bool] = False,
env: Dict[str, str] = None) -> int:
'''Run a command, returning its return code
"""Run a command, returning its retcode.
If verbose is true, print the command to stderr first (a bit like bash -x).
If redirect_stdstreams is true, redirect the stdout and stderr of the
subprocess to the given file object or path.
The cmd argument must be formatted the idiomatic pythonic way, as list[str].
'''
If redirect_stdstreams is true, redirect the stdout and stderr of the
subprocess to the given file object or path. Be flexible here to different
possible destinations.
"""
stdstream_dest = None
needs_closing = False
if redirect_stdstreams is not None:
if redirect_stdstreams == '/dev/null':
stdstream_dest = subprocess.DEVNULL
elif isinstance(redirect_stdstreams, str):
elif isinstance(redirect_stdstreams, pathlib.Path):
stdstream_dest = open(redirect_stdstreams, 'wb')
needs_closing = True
else:
elif isinstance(redirect_stdstreams, IOBase):
stdstream_dest = redirect_stdstreams
else:
raise RuntimeError(
f"redirect_stdstream called as {redirect_stdstreams} "
f"but that argument is invalid.")
if verbose:
# The equivalent of bash -x
@ -82,12 +81,15 @@ def run_one(verbose: bool,
stderr=stdstream_dest,
close_fds=False,
timeout=timeout_s,
shell=shell,
env=env)
return ps.returncode
except subprocess.CalledProcessError:
print(ps.communicate()[0])
return(1)
except OSError as e:
print(e)
# print(ps.communicate()[0])
return(1)
except subprocess.TimeoutExpired:
print("Error: Timeout[{}s]: {}".format(timeout_s, cmd))
return(1)
@ -96,74 +98,209 @@ def run_one(verbose: bool,
stdstream_dest.close()
def start_riscv_dv_run_cmd(verbose: bool):
'''Return the command parts of a call to riscv-dv's run.py'''
riscv_dv_extension = os.path.join(THIS_DIR, '../riscv_dv_extension')
@typechecked
def format_to_cmd(input_arg: Union[str, list[any]]) -> list[str]:
"""Format useful compound-lists into list[str], suitable for subprocess.
csr_desc = os.path.join(riscv_dv_extension, 'csr_description.yaml')
testlist = os.path.join(riscv_dv_extension, 'testlist.yaml')
Can be a list of [str, int, bool, pathlib.Path]
"""
cmd_list = []
for item in input_arg:
try:
cmd_list.append(format_to_str(item))
except TypeError as e:
raise RuntimeError(f"Can't format item to str when constructing a cmd: {e}")
cmd = ['python3',
os.path.join(RISCV_DV_ROOT, 'run.py'),
'--testlist', testlist,
'--gcc_opts=-mno-strict-align',
'--custom_target', riscv_dv_extension,
'--csr_yaml', csr_desc,
'--mabi=ilp32']
if verbose:
cmd.append('--verbose')
return cmd
return cmd_list
def subst_vars(string: str, var_dict: Dict[str, str]) -> str:
'''Apply substitutions in var_dict to string
@typechecked
def subst_opt(string: str, name: str, replacement: str) -> str:
"""Substitute the <name> option in string with 'replacement'."""
from riscvdv_interface import parameter_format
needle = parameter_format.format(name)
if needle in string:
logger.debug(f"Substituting <{name}> with {replacement}")
return string.replace(needle, replacement)
else:
logger.debug(f"Tried to substitute for <{name}> in cmd but it was not found.")
return string
If var_dict[K] = V, then <K> will be replaced with V in string.'''
@typechecked
def subst_dict(string: str, var_dict: Dict[str, Union[str, pathlib.Path]]) -> str:
"""Apply substitutions in var_dict to string.
If <K> in string, substitute <K> for var_dict[K].
"""
for key, value in var_dict.items():
string = string.replace('<{}>'.format(key), value)
if isinstance(value, pathlib.Path):
# Resolve to absolute path
string = subst_opt(string, key, str(value.resolve()))
else:
string = subst_opt(string, key, value)
return string
def read_test_dot_seed(arg: str) -> TestAndSeed:
'''Read a value for --test-dot-seed'''
@typechecked
def subst_env_vars(string: str, env_vars: list[str]) -> str:
"""Substitute environment variables in string.
match = re.match(r'([^.]+)\.([0-9]+)$', arg)
if match is None:
raise argparse.ArgumentTypeError('Bad --test-dot-seed ({}): '
'should be of the form TEST.SEED.'
.format(arg))
For each environment variable, V, in the list, any
occurrence of <V> in string will be replaced by the value of the
environment variable with that name. If <V> occurs in the string but $V is
not set in the environment, an error is raised.
return (match.group(1), int(match.group(2), 10))
"""
for var in env_vars:
value = os.environ.get(var)
if value is None:
raise RuntimeError('Cannot substitute {} in command because '
'the environment variable ${} is not set.'
.format(var, var))
string = subst_opt(string, var, value)
return string
def get_config(cfg_name: str) -> Config:
yaml_path = os.path.join(IBEX_ROOT, "ibex_configs.yaml")
return parse_config(cfg_name, yaml_path)
# If any of these characters are present in a string output it in multi-line
# mode. This will either be because the string contains newlines or other
# characters that would otherwise need escaping
_YAML_MULTILINE_CHARS = ['[', ']', ':', "'", '"', '\n']
_YAML_PRINTABLE_TYPES = Union[int, str, bool]
def get_isas_for_config(cfg: Config) -> Tuple[str, str]:
'''Get ISA and ISS_ISA keys for the given Ibex config'''
# NOTE: This logic should match the code in the get_isa_string() function
# in core_ibex/tests/core_ibex_base_test.sv: keep them in sync!
has_multiplier = cfg.rv32m != 'ibex_pkg::RV32MNone'
base_isa = 'rv32{}{}c'.format('e' if cfg.rv32e else 'i',
'm' if has_multiplier else '')
@typechecked
def _yaml_value_format(val: _YAML_PRINTABLE_TYPES) -> str:
"""Format a value for yaml output.
bitmanip_mapping = {
'ibex_pkg::RV32BNone': [],
'ibex_pkg::RV32BBalanced': ['Zba', 'Zbb', 'Zbs', 'XZbf', 'XZbt'],
'ibex_pkg::RV32BOTEarlGrey': ['Zba', 'Zbb', 'Zbc', 'Zbs',
'XZbf', 'XZbp', 'XZbr', 'XZbt'],
'ibex_pkg::RV32BFull': ['Zba', 'Zbb', 'Zbc', 'Zbs',
'XZbe', 'XZbf', 'XZbp', 'XZbr', 'XZbt']
}
For int, str and bool value can just be converted to str with special
handling for some strings
"""
# If val is a multi-line string
if isinstance(val, str) and any(c in val for c in _YAML_MULTILINE_CHARS):
# Split into individual lines and output them after a suitable yaml
# multi-line string indicator ('|-') indenting each line.
lines = val.split('\n')
return '|-\n' + '\n'.join([f' {line}' for line in lines])
bitmanip_isa = bitmanip_mapping.get(cfg.rv32b)
if bitmanip_isa is None:
raise ValueError(f'Unknown RV32B value ({cfg.rv32b}) in config YAML')
if val is None:
return ''
has_bitmanip = cfg.rv32b != 'ibex_pkg::RV32BNone'
toolchain_isa = base_isa + ('b' if has_bitmanip else '')
return str(val)
return (toolchain_isa, '_'.join([base_isa] + bitmanip_isa))
@typechecked
def pprint_dict(d: dict, output: TextIO) -> None:
"""Pretty-Print a python dictionary as valid yaml.
Align all the values to the same offset.
eg.
spam eggs
turkey gravy
yorkshire pudding
"""
klen = 1
for k in d.keys():
klen = max(klen, len(k))
for k, v in d.items():
kpad = ' ' * (klen - len(k))
output.write(f'{k}:{kpad} {_yaml_value_format(v)}\n')
@typechecked
def format_dict_to_printable_dict(arg: dict) -> dict:
"""Convert all dictionary keys to strings."""
clean_dict = {}
for k, v in arg.items():
try:
if isinstance(v, dict):
clean_dict[k] = str(v)
continue
elif isinstance(v, list):
clean_dict[k] = ' '.join([format_to_str(item)
for item in v])
else:
clean_dict[k] = format_to_str(v)
except TypeError:
clean_dict[k] = str(v) # see what happens? yolo
pass
return clean_dict
def format_to_str(arg: any) -> str:
"""Format single arg to str or raise exception if unable."""
if isinstance(arg, _YAML_PRINTABLE_TYPES):
return str(arg)
elif isinstance(arg, pathlib.Path):
return str(arg.resolve())
elif (arg is None):
# Maybe remove?
return ''
else:
raise TypeError("Couldn't format element to str!")
class testdata_cls():
"""Baseclass for testdata to hold common methods....
Objects inheriting from this can easily import/export
themselves to files, allowing data to gain continuinty between
different phases of the regression and testing process
"""
@classmethod
@typechecked
def construct_from_pickle(cls, metadata_pickle: pathlib.Path):
"""Allow easy contruction of the data-structure from a file."""
trr = cls()
logger.debug(f"Constructing object from data in {metadata_pickle}")
with metadata_pickle.open('rb') as handle:
trr = pickle.load(handle)
return trr
@typechecked
def export(self, write_yaml: bool = False):
"""Write the object to disk.
Simultaneously write a pickle file and a yaml-file
for easy human consumption.
This should only be called from contexts where there
won't be races to write into the file. So, only
export if you are sure only one process has opened
the file. (eg. use LockedMetadata())
"""
# TODO redesign to try and remove the above restriction
# with better API design
if not self.pickle_file:
logger.error(f"Tried to export {type(self).__name__} but self.pickle_file has no path set!")
raise RuntimeError
if not self.yaml_file:
logger.error(f"Tried to export {type(self).__name__} but self.yaml_file has no path set!")
raise RuntimeError
logger.info(f"Dumping object to {self.pickle_file}")
self.pickle_file.parent.mkdir(parents=True, exist_ok=True)
with self.pickle_file.open('wb') as handle:
pickle.dump(self, handle)
if not write_yaml:
return
self.yaml_file.parent.mkdir(parents=True, exist_ok=True)
with self.yaml_file.open('w') as handle:
pprint_dict(self.format_to_printable_dict(), handle)
# TRIED EXPERIMENTING HERE BUT IT WASN'T SO PRETTY \"
# pp = pprint.PrettyPrinter(indent=4, stream=handle)
# pp.pprint(self.format_to_printable_dict())
def format_to_printable_dict(self) -> dict:
"""Return a printable dict of the object with all-str fields.
Recommended for human-consumption
"""
return format_dict_to_printable_dict(dataclasses.asdict(self))

View File

@ -0,0 +1,51 @@
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import sys
import git
import pathlib
from pathlib import Path
def get_project_root() -> pathlib.Path:
"""Get the project root directory using git."""
return Path(git.Repo('.', search_parent_directories=True).working_tree_dir)
root = get_project_root()
_IBEX_ROOT = root
_IBEX_UTIL = root/'util'
_CORE_IBEX = root/'dv'/'uvm'/'core_ibex'
_CORE_IBEX_SCRIPTS = _CORE_IBEX/'scripts'
_CORE_IBEX_RISCV_DV_EXTENSION = _CORE_IBEX/'riscv_dv_extension'
_CORE_IBEX_YAML = _CORE_IBEX/'yaml'
_RISCV_DV = root/'vendor'/'google_riscv-dv'
_RISCV_DV_SCRIPTS = _RISCV_DV/'scripts'
_OT_LOWRISC_IP = root/'vendor'/'lowrisc_ip'
def get_pythonpath() -> None:
"""Create a string to be exported as PYTHONPATH.
Setting this environment variable appropriately
(from the top of the buildsystem) will then allow all
python scripts to import all modules as needed.
"""
pythonpath = ':'.join([
str(_IBEX_ROOT),
str(_IBEX_UTIL),
# str(_CORE_IBEX),
str(_CORE_IBEX_SCRIPTS),
str(_CORE_IBEX_RISCV_DV_EXTENSION),
str(_CORE_IBEX_YAML),
# str(_RISCV_DV),
str(_RISCV_DV_SCRIPTS)
])
print(pythonpath) # This is the output (captured by the shell)
if __name__ == '__main__':
sys.exit(get_pythonpath())

View File

@ -1,118 +0,0 @@
import logging
import os
import sys
_THIS_DIR = os.path.normpath(os.path.join(os.path.dirname(__file__)))
_IBEX_ROOT = os.path.normpath(os.path.join(_THIS_DIR, '../../../..'))
_RISCV_DV_ROOT = os.path.join(_IBEX_ROOT, 'vendor/google_riscv-dv')
_OLD_SYS_PATH = sys.path
# Import lib from _DV_SCRIPTS before putting sys.path back as it started.
try:
sys.path = [os.path.join(_RISCV_DV_ROOT, 'scripts')] + sys.path
from lib import read_yaml
finally:
sys.path = _OLD_SYS_PATH
def subst_opt(string, name, enable, replacement):
'''Substitute the <name> option in string
If enable is False, <name> is replaced by '' in string. If it is True,
<name> is replaced by replacement, which should be a string or None. If
replacement is None and <name> occurs in string, we throw an error.
'''
needle = '<{}>'.format(name)
if not enable:
return string.replace(needle, '')
if replacement is None:
if needle in string:
raise RuntimeError('No replacement defined for {} '
'(used in string: {!r}).'
.format(needle, string))
return string
return string.replace(needle, replacement)
def subst_env_vars(string, env_vars):
'''Substitute environment variables in string
env_vars should be a string with a comma-separated list of environment
variables to substitute. For each environment variable, V, in the list, any
occurrence of <V> in string will be replaced by the value of the
environment variable with that name. If <V> occurs in the string but $V is
not set in the environment, an error is raised.
'''
env_vars = env_vars.strip()
if not env_vars:
return string
for env_var in env_vars.split(','):
env_var = env_var.strip()
needle = '<{}>'.format(env_var)
if needle in string:
value = os.environ.get(env_var)
if value is None:
raise RuntimeError('Cannot substitute {} in command because '
'the environment variable ${} is not set.'
.format(needle, env_var))
string = string.replace(needle, value)
return string
def subst_cmd(cmd, enable_dict, opts_dict, env_vars):
'''Substitute options and environment variables in cmd
enable_dict should be a dict mapping names to bools. For each key, N, in
enable_dict, if enable_dict[N] is False, then all occurrences of <N> in cmd
will be replaced with ''. If enable_dict[N] is True, all occurrences of <N>
in cmd will be replaced with opts_dict[N].
If N is not a key in opts_dict, this is no problem unless cmd contains
<N>, in which case we throw a RuntimeError.
Finally, the environment variables are substituted as described in
subst_env_vars and any newlines are stripped out.
'''
for name, enable in enable_dict.items():
cmd = subst_opt(cmd, name, enable, opts_dict.get(name))
return subst_env_vars(cmd, env_vars).replace('\n', ' ')
def get_yaml_for_simulator(simulator):
'''Get the entry for the simulator in RTL simulation yaml'''
yaml_dir = os.path.normpath(os.path.join(_THIS_DIR, '../yaml'))
yaml_path = os.path.join(yaml_dir, 'rtl_simulation.yaml')
logging.info("Processing simulator setup file : %s" % yaml_path)
for entry in read_yaml(yaml_path):
if entry.get('tool') == simulator:
return entry
raise RuntimeError("Cannot find RTL simulator {}".format(simulator))
def get_simulator_cmd(simulator, enables):
'''Get compile and run commands for the testbench
simulator is the name of the simulator to use. enables is a dictionary
keyed by option names with boolean values: true if the option is enabled.
Returns (compile_cmds, sim_cmd), which are the simulator commands to
compile and run the testbench, respectively. compile_cmd is a list of
strings (multiple commands); sim_cmd is a single string.
'''
entry = get_yaml_for_simulator(simulator)
env_vars = entry.get('env_var', '')
return ([subst_cmd(arg, enables, entry['compile'], env_vars)
for arg in entry['compile']['cmd']],
subst_cmd(entry['sim']['cmd'], enables, entry['sim'], env_vars))

View File

@ -1,31 +1,40 @@
import os
import sys
from typing import Dict, List
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
_CORE_IBEX = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
_IBEX_ROOT = os.path.normpath(os.path.join(_CORE_IBEX, 3 * '../'))
_RISCV_DV_ROOT = os.path.join(_IBEX_ROOT, 'vendor/google_riscv-dv')
_OLD_SYS_PATH = sys.path
import argparse
import re
import logging
from typing import Dict, List
# Import riscv_trace_csv and lib from _DV_SCRIPTS before putting sys.path back
# as it started.
try:
sys.path = ([os.path.join(_CORE_IBEX, 'riscv_dv_extension'),
os.path.join(_RISCV_DV_ROOT, 'scripts')] +
sys.path)
from lib import process_regression_list # type: ignore
finally:
sys.path = _OLD_SYS_PATH
from setup_imports import _CORE_IBEX_RISCV_DV_EXTENSION, _RISCV_DV
import lib as riscvdv_lib # type: ignore
TestEntry = Dict[str, object]
TestEntries = List[TestEntry]
TestAndSeed = tuple[str, int]
def read_test_dot_seed(arg: str) -> TestAndSeed:
'''Read a value for --test-dot-seed'''
match = re.match(r'([^.]+)\.([0-9]+)$', arg)
if match is None:
raise argparse.ArgumentTypeError('Bad --test-dot-seed ({}): '
'should be of the form TEST.SEED.'
.format(arg))
return (match.group(1), int(match.group(2), 10))
def get_test_entry(testname: str) -> TestEntry:
matched_list = [] # type: TestEntries
testlist = os.path.join(_CORE_IBEX, 'riscv_dv_extension', 'testlist.yaml')
process_regression_list(testlist, 'all', 0, matched_list, _RISCV_DV_ROOT)
testlist = _CORE_IBEX_RISCV_DV_EXTENSION/'testlist.yaml'
riscvdv_lib.process_regression_list(testlist, 'all', 0, matched_list, _RISCV_DV)
for entry in matched_list:
if entry['test'] == testname:

View File

@ -1,45 +1,95 @@
"""Helper to aggregate all metadata from a test in one place"""
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import collections
import pathlib
from typing import Optional
import dataclasses
from typeguard import typechecked
# test_name, test_idx, seed and passed must never be None. Other fields can be
# None.
test_run_result_fields = [
'name', # Name of test
'seed', # Seed of test
'binary', # Path to test binary
'uvm_log', # Path to UVM DV simulation log
'rtl_trace', # Path to RTL ibex trace output
'rtl_trace_csv', # Path to RTL ibex trace CSV
'iss_trace', # Path to spike trace
'iss_trace_csv', # Path to spike trac.
'en_cosim', # Is cosim enabled?
'cosim_trace', # Path to cosim_trace logfile
'cosim_trace_csv', # Path to cosim_trace CSV
'comparison_log', # Path to trace comparison log
'passed', # True if test passed
'failure_message' # Message describing failure, includes a
# '[FAILED]: XXXX' line at the end. Must not be
# None if passed is False
]
import scripts_lib
TestRunResult = collections.namedtuple('TestRunResult', test_run_result_fields)
import logging
logger = logging.getLogger(__name__)
def check_test_run_result(trr: TestRunResult):
assert (trr.name is not None and isinstance(trr.name, str))
assert (trr.seed is not None and isinstance(trr.seed, int))
assert (trr.binary is None or isinstance(trr.binary, str))
assert (trr.uvm_log is None or isinstance(trr.uvm_log, str))
assert (trr.rtl_trace is None or isinstance(trr.rtl_trace, str))
assert (trr.rtl_trace_csv is None or isinstance(trr.rtl_trace_csv, str))
assert (trr.iss_trace is None or isinstance(trr.iss_trace, str))
assert (trr.iss_trace_csv is None or isinstance(trr.iss_trace_csv, str))
assert (trr.en_cosim is None or isinstance(trr.en_cosim, bool))
assert (trr.cosim_trace is None or isinstance(trr.cosim_trace, str))
assert (trr.cosim_trace_csv is None or isinstance(trr.cosim_trace_csv, str))
assert (trr.comparison_log is None or isinstance(trr.comparison_log, str))
assert (isinstance(trr.passed, bool))
assert (trr.passed or isinstance(trr.failure_message, str))
@typechecked
@dataclasses.dataclass
class TestRunResult(scripts_lib.testdata_cls):
"""Holds metadata about a single test and its results.
Most of the fields aren't actually optional to running
the simulations, but they may be optional in that we haven't yet
populated the field or generated the item yet.
"""
passed: Optional[bool] = None # True if test passed
# Message describing failure, includes a '[FAILED]: XXXX' line at the end.
failure_message: Optional[str] = None
testdotseed: Optional[str] = None
testname: Optional[str] = None # Name of test
seed: Optional[int] = None # Seed of test
binary: Optional[pathlib.Path] = None # Path to test binary
rtl_simulator: Optional[str] = None # Which simulator is used
iss_cosim: Optional[str] = None # Which ISS are we cosimulating with?
# RISCV_DV specific test parameters
gen_test: Optional[str] = None
gen_opts: Optional[str] = None
rtl_test: Optional[str] = None
sim_opts: Optional[str] = None
dir_test: Optional[pathlib.Path] = None
assembly: Optional[pathlib.Path] = None # Path to assembly file
objectfile: Optional[pathlib.Path] = None
riscvdv_run_gen_log: Optional[pathlib.Path] = None
riscvdv_run_gen_stdout: Optional[pathlib.Path] = None
riscvdv_run_log: Optional[pathlib.Path] = None
riscvdv_run_stdout: Optional[pathlib.Path] = None
compile_asm_gen_log: Optional[pathlib.Path] = None
compile_asm_log: Optional[pathlib.Path] = None
rtl_log: Optional[pathlib.Path] = None # Path to UVM DV simulation log
rtl_stdout: Optional[pathlib.Path] = None
rtl_trace: Optional[pathlib.Path] = None # Path to RTL ibex trace output
iss_cosim_log: Optional[pathlib.Path] = None
iss_cosim_trace: Optional[pathlib.Path] = None # Path to cosim_trace logfile
dir_fcov: Optional[pathlib.Path] = None
riscvdv_run_gen_cmds: Optional[list[list[str]]] = None
riscvdv_run_cmds: Optional[list[list[str]]] = None
compile_asm_gen_cmds: Optional[list[str]] = None
compile_asm_cmds: Optional[list[list[str]]] = None
rtl_cmds: Optional[list[list[str]]] = None
metadata_pickle_file: pathlib.Path = None
pickle_file: Optional[pathlib.Path] = None
yaml_file: Optional[pathlib.Path] = None
@classmethod
@typechecked
def construct_from_metadata_dir(cls, dir_metadata: pathlib.Path, tds: str):
"""Construct metadata object from exported object using default filenames."""
trr_pickle = dir_metadata / f"{tds}.pickle"
trr = cls.construct_from_pickle(trr_pickle)
return trr
def format_to_printable_dict(self) -> dict:
"""Overwrite the default method in scripts_lib.testdata_cls.
Format to a printable dict, but for any pathlib.Path strings, print them
as relative to the test directory. More useful for human scanning.
"""
relative_dict = {}
for k, v in dataclasses.asdict(self).items():
if (isinstance(v, pathlib.Path) and v.is_relative_to(self.dir_test)):
relative_dict[k] = str(v.relative_to(self.dir_test))
else:
relative_dict[k] = v
return scripts_lib.format_dict_to_printable_dict(relative_dict)

59
dv/uvm/core_ibex/util.mk Normal file
View File

@ -0,0 +1,59 @@
###############################################################################
# Utility functions.
#
# If VS is a list of variable names, P is a path and X is a string, then $(call
# dump-vars,P,X,VS) will expand to a list of 'file' commands that write each
# variable to P in Makefile syntax, but with "last-X-" prepended. At the start
# of the file, we also define last-X-vars-loaded to 1. You can use this to
# check whether there was a dump file at all.
#
# Note that this doesn't work by expanding to a command. Instead, *evaluating*
# dump-vars causes the variables to be dumped.
dump-var = $(file >>$(1),last-$(2)-$(3) := $($(3)))
dump-vars = $(file >$(1),last-$(2)-vars-loaded := .) \
$(foreach name,$(3),$(call dump-var,$(1),$(2),$(name)))
# equal checks whether two strings are equal, evaluating to '.' if they are and
# '' otherwise.
both-empty = $(if $(1),,$(if $(2),,.))
find-find = $(if $(and $(findstring $(1),$(2)),$(findstring $(2),$(1))),.,)
equal = $(or $(call both-empty,$(1),$(2)),$(call find-find,$(1),$(2)))
# var-differs is used to check whether a variable has changed since it was
# dumped. If it has changed, the function evaluates to '.' (with some
# whitespace) and prints a message to the console; if not, it evaluates to ''.
#
# Call it as $(call var-differs,X,TGT,V).
var-differs = \
$(if $(call equal,$(strip $($(3))),$(strip $(last-$(1)-$(3)))),,\
.$(info Repeating $(2) because variable $(3) has changed value.))
# vars-differ is used to check whether several variables have the same value as
# they had when they were dumped. If we haven't loaded the dumpfile, it
# silently evaluates to '!'. Otherwise, if all the variables match, it
# evaluates to '.'. If not, it evaluates to '.' and prints some messages to the
# console explaining why a rebuild is happening.
#
# Call it as $(call vars-differ,X,TGT,VS).
vars-differ-lst = $(foreach v,$(3),$(call var-differs,$(1),$(2),$(v)))
vars-differ-sp = \
$(if $(last-$(1)-vars-loaded),\
$(if $(strip $(call vars-differ-lst,$(1),$(2),$(3))),.,),\
!)
vars-differ = $(strip $(call vars-differ-sp,$(1),$(2),$(3)))
# A phony target which can be used to force recompilation.
.PHONY: FORCE
FORCE:
# vars-prereq is empty if every variable in VS matches the last run (loaded
# with tag X), otherwise it is set to FORCE (which will force a recompile and
# might print a message to the console explaining why we're rebuilding TGT).
#
# Call it as $(call vars-prereq,X,TGT,VS)
vars-prereq = $(if $(call vars-differ,$(call strip,$(1)),$(2),$(3)),FORCE,)
# This expands to '@' if VERBOSE is 0 or not set, and to the empty
# string otherwise. Prefix commands with it in order that they only
# get printed when VERBOSE.
verb = $(if $(filter-out 0,$(VERBOSE)),,@)

View File

@ -1,5 +1,5 @@
# Copyright Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
############################################################
# The -CFLAGS option is required as some VCS DPI code contains smart quotes
# around some preprocessor macros, making G++ throw errors during compilation.
# As a result, passing -fno-extended-identifiers tells G++ to pretend that
@ -20,30 +21,39 @@
env_var: IBEX_ROOT
compile:
cmd:
- "vcs -f <core_ibex>/ibex_dv.f -full64
-l <out>/compile.log
-sverilog -ntb_opts uvm-1.2
+define+UVM
+define+UVM_REGEX_NO_DPI -timescale=1ns/10ps -licqueue
-LDFLAGS '-Wl,--no-as-needed'
-CFLAGS '--std=c99 -fno-extended-identifiers'
-Mdir=<out>/vcs_simv.csrc
-o <out>/vcs_simv
-debug_access+pp
-xlrm uniq_prior_final
-CFLAGS '--std=c99 -fno-extended-identifiers'
-lca -kdb <cmp_opts> <wave_opts> <cov_opts> <cosim_opts>"
cov_opts: >
- >-
vcs
-full64
-f <core_ibex>/ibex_dv.f
-l <tb_dir>/<rtl_log>
-sverilog
-ntb_opts uvm-1.2
+define+UVM
+define+UVM_REGEX_NO_DPI
-timescale=1ns/10ps
-licqueue
-LDFLAGS '-Wl,--no-as-needed'
-CFLAGS '--std=c99 -fno-extended-identifiers'
-Mdir=<tb_dir>/vcs_simv.csrc
-o <tb_dir>/vcs_simv
-debug_access+pp
-xlrm uniq_prior_final
-CFLAGS '--std=c99 -fno-extended-identifiers'
-lca -kdb
<cmp_opts> <wave_opts> <cov_opts> <cosim_opts>
cov_opts: >-
-cm line+tgl+assert+fsm+branch
-cm_tgl portsonly
-cm_tgl structarr
-cm_report noinitial
-cm_seqnoconst
-cm_dir <shared_cov_dir>/test.vdb
-cm_dir <dir_shared_cov>/test.vdb
-cm_hier cover.cfg
wave_opts: >
-debug_access+all -ucli -do vcs.tcl
cosim_opts: >
wave_opts: >-
-debug_access+all
-ucli
-do vcs.tcl
cosim_opts: >-
-f <core_ibex>/ibex_dv_cosim_dpi.f
+define+INC_IBEX_COSIM
-LDFLAGS '<ISS_LDFLAGS>'
@ -52,115 +62,187 @@
<ISS_LIBS>
-lstdc++
sim:
cmd: >
env SIM_DIR=<sim_dir>
<out>/vcs_simv +vcs+lic+wait <sim_opts> <wave_opts> <cov_opts>
+ntb_random_seed=<seed> +UVM_TESTNAME=<rtl_test>
+UVM_VERBOSITY=UVM_LOW +bin=<binary>
+ibex_tracer_file_base=<sim_dir>/trace_core
+cosim_log_file=<sim_dir>/spike_cosim.log
cmd:
- >-
env SIM_DIR=<test_dir>
<tb_dir>/vcs_simv
+vcs+lic+wait
+ntb_random_seed=<seed>
+UVM_TESTNAME=<rtl_test>
+UVM_VERBOSITY=UVM_LOW
+bin=<binary>
+ibex_tracer_file_base=<rtl_trace>
+cosim_log_file=<test_dir>/<iss_cosim_trace>
<sim_opts> <wave_opts> <cov_opts>
cov_opts: >
-cm line+tgl+assert+fsm+branch
-cm_dir <shared_cov_dir>/test.vdb
-cm_dir <dir_shared_cov>/test.vdb
-cm_log /dev/null
-assert nopostproc
-cm_name test_<test_name>_<seed>
+enable_ibex_fcov=1
wave_opts: >
-ucli -do <cwd>/vcs.tcl
-ucli
-do <cwd>/vcs.tcl
############################################################
- tool: questa
compile:
cmd:
- "vmap mtiUvm $QUESTA_HOME/questasim/uvm-1.2"
- "vlog -64
-f <core_ibex>/ibex_dv.f
-sv
-mfcu -cuname design_cuname
+define+UVM_REGEX_NO_DPI
+define+UVM
-timescale \"1 ns / 1 ps \"
-writetoplevels <out>/top.list
-l <out>/compile.log <cmp_opts>"
- >
vmap
mtiUvm $QUESTA_HOME/questasim/uvm-1.2
- >
vlog
-64
-f <core_ibex>/ibex_dv.f
-sv
-mfcu -cuname design_cuname
+define+UVM_REGEX_NO_DPI
+define+UVM
-timescale \"1 ns / 1 ps \"
-writetoplevels <tb_dir>/top.list
-l <tb_dir>/<rtl_log>
<cmp_opts>
sim:
cmd: >
vsim -64 -c <cov_opts> -do "run -a; quit -f" +designfile -f <out>/top.list <sim_opts> -sv_seed <seed> +access +r+w +UVM_TESTNAME=<rtl_test> +UVM_VERBOSITY=UVM_LOW +bin=<binary> +ibex_tracer_file_base="<sim_dir>/trace_core" -l <sim_dir>/sim.log
cov_opts: >
-do "coverage save -onexit <out>/cov.ucdb;"
cmd:
- >-
vsim
-64
-c
-do "run -a; quit -f"
+designfile -f <tb_dir>/top.list
<sim_opts>
-sv_seed <seed>
+access +r+w
+UVM_TESTNAME=<rtl_test>
+UVM_VERBOSITY=UVM_LOW
+bin=<binary>
+ibex_tracer_file_base=<rtl_trace>
-l <test_dir>/sim.log
<cov_opts>
cov_opts: >-
-do "coverage save -onexit <tb_dir>/cov.ucdb;"
############################################################
- tool: dsim
env_var: DSIM,DSIM_LIB_PATH
compile:
cmd:
- "mkdir -p <out>/dsim"
- "<DSIM> -sv -work <out>/dsim
-genimage image
-timescale 1ns/1ps
+incdir+$UVM_HOME/src
$UVM_HOME/src/uvm_pkg.sv
+define+UVM
+define+DSIM
+acc+rwb
-f <core_ibex>/ibex_dv.f
-l <out>/dsim/compile.log
-suppress EnumMustBePositive"
- >-
<DSIM>
-sv
-work <tb_dir>
-genimage image
-timescale 1ns/1ps
+incdir+$UVM_HOME/src
$UVM_HOME/src/uvm_pkg.sv
+define+UVM
+define+DSIM
+acc+rwb
-f <core_ibex>/ibex_dv.f
-l <tb_build_log>
-suppress EnumMustBePositive"
sim:
cmd: >
<DSIM> <sim_opts> -sv_seed <seed> -pli_lib <DSIM_LIB_PATH>/libuvm_dpi.so +acc+rwb -image image -work <out>/dsim <wave_opts> +UVM_TESTNAME=<rtl_test> +UVM_VERBOSITY=UVM_LOW +bin=<binary> +ibex_tracer_file_base=<sim_dir>/trace_core -l <sim_dir>/sim.log
cmd:
- >-
<DSIM>
-sv_seed <seed>
-pli_lib <DSIM_LIB_PATH>/libuvm_dpi.so
+acc+rwb
-image image
-work <tb_dir>
+UVM_TESTNAME=<rtl_test>
+UVM_VERBOSITY=UVM_LOW
+bin=<binary>
+ibex_tracer_file_base=<rtl_trace>
-l <rtl_sim_log>
<sim_opts> <wave_opts>
wave_opts: >
-waves waves.vcd
############################################################
- tool: riviera
env_var: ALDEC_PATH
compile:
cmd:
- "vlib <out>/work"
- "vlog -work <out>/work
<cmp_opts>
-uvmver 1.2
+define+UVM
-f <core_ibex>/ibex_dv.f"
- >-
vlib
<tb_dir>/work
- >-
vlog
-work <tb_dir>/work
-uvmver 1.2
+define+UVM
-f <core_ibex>/ibex_dv.f
<cmp_opts>
sim:
cmd: >
vsim -c <sim_opts> <cov_opts> -sv_seed <seed> -lib <out>/work +UVM_TESTNAME=<rtl_test> +UVM_VERBOSITY=UVM_LOW +bin=<binary> +ibex_tracer_file_base="<sim_dir>/trace_core" -l <sim_dir>/sim.log -do "run -all; endsim; quit -force"
cov_opts: >
-acdb_file <out>/cov.acdb
cmd:
- >-
vsim
-c
-sv_seed <seed>
-lib <tb_dir>/work
+UVM_TESTNAME=<rtl_test>
+UVM_VERBOSITY=UVM_LOW
+bin=<binary>
+ibex_tracer_file_base=<rtl_trace>
-l <rtl_sim_log>
-do "run -all; endsim; quit -force"
<sim_opts> <cov_opts>
cov_opts: >-
-acdb_file <tb_dir>/cov.acdb
############################################################
- tool: qrun
compile:
cmd:
- "qrun -f <core_ibex>/ibex_dv.f -uvmhome uvm-1.2
+define+UVM
-svinputport=net
-access=rw+/. -optimize
-suppress 2583
-mfcu -cuname design_cuname
-sv -o design_opt
-l <out>/qrun_compile_optimize.log
-outdir <out>/qrun.out"
- >-
qrun
-f <core_ibex>/ibex_dv.f
-uvmhome uvm-1.2
+define+UVM
-svinputport=net
-access=rw+/. -optimize
-suppress 2583
-mfcu -cuname design_cuname
-sv -o design_opt
-l <tb_build_logj>
-outdir <tb_dir>/qrun.out
sim:
cmd: >
qrun -simulate -snapshot design_opt <cov_opts> <sim_opts> -sv_seed <seed> -outdir <out>/qrun.out
cov_opts: >
-coverage -ucdb <out>/cov.ucdb
cmd:
- >-
qrun
-simulate
-snapshot design_opt
-sv_seed <seed>
-outdir <tb_dir>/qrun.out
<sim_opts> <cov_opts>
cov_opts: >-
-coverage
-ucdb <tb_dir>/cov.ucdb
############################################################
- tool: xlm
env_var: dv_root, DUT_TOP, IBEX_ROOT
compile:
cmd:
- "xrun -64bit
-q
-f <core_ibex>/ibex_dv.f
-sv
-licqueue
-uvm
-uvmhome CDNS-1.2
-define UVM_REGEX_NO_DPI
-elaborate
-l <out>/compile.log
-xmlibdirpath <out>
<cmp_opts> <cov_opts> <wave_opts> <cosim_opts>"
- >-
xrun
-64bit
-q
-f <core_ibex>/ibex_dv.f
-sv
-licqueue
-uvm
-uvmhome CDNS-1.2
-define UVM_REGEX_NO_DPI
-elaborate
-l <tb_build_log>
-xmlibdirpath <tb_dir>
<cmp_opts> <cov_opts> <wave_opts> <cosim_opts>
cov_opts: >
-coverage all
-nowarn COVDEF
@ -175,30 +257,30 @@
+define+INC_IBEX_COSIM
-lstdc++
sim:
cmd: >
xrun -64bit
-R
-xmlibdirpath <out>
-licqueue
-svseed <seed>
-svrnc rand_struct
+UVM_TESTNAME=<rtl_test>
+UVM_VERBOSITY=UVM_LOW
+bin=<binary>
+ibex_tracer_file_base=<sim_dir>/trace_core
+cosim_log_file=<sim_dir>/spike_cosim.log
-l <sim_dir>/xrun.log
-nokey
<sim_opts>
<cov_opts>
<wave_opts>
cmd:
- >-
xrun
-64bit
-R
-xmlibdirpath <tb_dir>
-licqueue
-svseed <seed>
-svrnc rand_struct
-nokey
-l <rtl_sim_log>
+UVM_TESTNAME=<rtl_test>
+UVM_VERBOSITY=UVM_LOW
+bin=<binary>
+ibex_tracer_file_base=<rtl_trace>
+cosim_log_file=<iss_cosim_trace>
<cov_opts> <wave_opts> <sim_opts>
cov_opts: >
-covmodeldir <sim_dir>/coverage
-covworkdir <sim_dir>
-covmodeldir <test_dir>/coverage
-covworkdir <test_dir>
-covscope coverage
-covtest <test_name>.<seed>
+enable_ibex_fcov=1
wave_opts: >
-input @"database -open <sim_dir>/waves -shm -default"
-input @"database -open <test_dir>/waves -shm -default"
-input @"probe -create core_ibex_tb_top -all -memories -depth all"
-input @"run"

View File

@ -11,6 +11,10 @@ git+https://github.com/lowRISC/fusesoc.git@ot
pyyaml
mako
junit-xml
# dataclass # needed for backports?
gitpython
typeguard
portalocker
# Needed by dvsim.py (not actually used in Ibex)
hjson

0
util/__init__.py Normal file
View File