Enabled scalafmt on more sources (#1429)

This commit is contained in:
Nandor Licker 2023-02-09 19:25:39 +02:00 committed by GitHub
parent 30fd72bc7f
commit d194593ece
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 464 additions and 445 deletions

View File

@ -29,7 +29,6 @@ project {
"glob:**midas/src/main/scala/midas/SynthUnitTests.scala",
"glob:**midas/src/main/scala/midas/core/CPUManagedStreamEngine.scala",
"glob:**midas/src/main/scala/midas/core/Channel.scala",
"glob:**midas/src/main/scala/midas/core/FPGATop.scala",
"glob:**midas/src/main/scala/midas/core/Interfaces.scala",
"glob:**midas/src/main/scala/midas/core/LIBDNUnitTest.scala",
"glob:**midas/src/main/scala/midas/core/SimUtils.scala",
@ -105,8 +104,6 @@ project {
"glob:**midas/src/main/scala/midas/passes/fame/package.scala",
"glob:**midas/src/main/scala/midas/passes/package.scala",
"glob:**midas/src/main/scala/midas/passes/xilinx/package.scala",
"glob:**midas/src/main/scala/midas/platform/F1Shim.scala",
"glob:**midas/src/main/scala/midas/platform/PlatformShim.scala",
"glob:**midas/src/main/scala/midas/stage/AddDerivedAnnotations.scala",
"glob:**midas/src/main/scala/midas/stage/Annotations.scala",
"glob:**midas/src/main/scala/midas/stage/Checks.scala",
@ -130,7 +127,6 @@ project {
"glob:**midas/src/main/scala/midas/widgets/CppGeneration.scala",
"glob:**midas/src/main/scala/midas/widgets/FuzzingUIntSource.scala",
"glob:**midas/src/main/scala/midas/widgets/HostPort.scala",
"glob:**midas/src/main/scala/midas/widgets/Lib.scala",
"glob:**midas/src/main/scala/midas/widgets/LoadMem.scala",
"glob:**midas/src/main/scala/midas/widgets/Master.scala",
"glob:**midas/src/main/scala/midas/widgets/PeekPokeIO.scala",
@ -140,7 +136,6 @@ project {
"glob:**midas/src/main/scala/midas/widgets/TerminationBridge.scala",
"glob:**midas/src/main/scala/midas/widgets/UsesBridgeStreams.scala",
"glob:**midas/src/main/scala/midas/widgets/UsesHostDRAM.scala",
"glob:**midas/src/main/scala/midas/widgets/Widget.scala",
"glob:**midas/src/test/scala/firrtl/testutils/FirrtlSpec.scala",
"glob:**midas/src/test/scala/firrtl/testutils/LeanTransformSpec.scala",
"glob:**midas/src/test/scala/firrtl/testutils/PassTests.scala",

View File

@ -5,59 +5,52 @@ package core
import junctions._
import midas.widgets._
import midas.passes.{HostClockSource}
import midas.passes.HostClockSource
import chisel3._
import chisel3.util._
import freechips.rocketchip.amba.axi4._
import freechips.rocketchip.config.{Parameters, Field}
import freechips.rocketchip.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import scala.collection.immutable.ListMap
/**
* The following [[Field]]s capture the parameters of the four AXI4 bus types
* presented to a simulator (in [[FPGATop]]). A [[PlatformShim]] is free to
* adapt these widths, apply address offsets, etc..., but the values set here
* define what is used in metasimulation, since it treats
* [[FPGATop]] as the root of the module hierarchy.
/** The following [[Field]] s capture the parameters of the four AXI4 bus types presented to a simulator (in
* [[FPGATop]]). A [[PlatformShim]] is free to adapt these widths, apply address offsets, etc..., but the values set
* here define what is used in metasimulation, since it treats [[FPGATop]] as the root of the module hierarchy.
*/
/** CPU-managed AXI4, aka "pcis" on EC2 F1. Used by the CPU to do DMA into fabric-controlled memories.
* This could include in-fabric RAMs/FIFOs (for bridge streams) or (in the future) FPGA-attached DRAM channels.
/** CPU-managed AXI4, aka "pcis" on EC2 F1. Used by the CPU to do DMA into fabric-controlled memories. This could
* include in-fabric RAMs/FIFOs (for bridge streams) or (in the future) FPGA-attached DRAM channels.
*/
case object CPUManagedAXI4Key extends Field[Option[CPUManagedAXI4Params]]
/** FPGA-managed AXI4, aka "pcim" on F1. Used by the fabric to do DMA into
* the host-CPU's memory. Used to implement bridge streams on platforms that lack a CPU-managed AXI4 interface.
* Set this to None if this interface is not present on the host.
/** FPGA-managed AXI4, aka "pcim" on F1. Used by the fabric to do DMA into the host-CPU's memory. Used to implement
* bridge streams on platforms that lack a CPU-managed AXI4 interface. Set this to None if this interface is not
* present on the host.
*/
case object FPGAManagedAXI4Key extends Field[Option[FPGAManagedAXI4Params]]
// The AXI4 widths for a single host-DRAM channel
case object HostMemChannelKey extends Field[HostMemChannelParams]
case object HostMemChannelKey extends Field[HostMemChannelParams]
// The number of host-DRAM channels -> all channels must have the same AXI4 widths
case object HostMemNumChannels extends Field[Int]
// See widgets/Widget.scala for CtrlNastiKey -> Configures the simulation control bus
/**
* DRAM Allocation Knobs
/** DRAM Allocation Knobs
*
* Constrains how much of memory controller's id space is used. If no
* constraint is provided, the unified id space of all masters is presented
* directly to each memory controller. If this id width exceeds that of the
* controller, Golden Gate will throw an get an elaboration-time error
* requesting a constraint. See [[AXI4IdSpaceConstraint]].
* Constrains how much of memory controller's id space is used. If no constraint is provided, the unified id space of
* all masters is presented directly to each memory controller. If this id width exceeds that of the controller, Golden
* Gate will throw an get an elaboration-time error requesting a constraint. See [[AXI4IdSpaceConstraint]].
*/
case object HostMemIdSpaceKey extends Field[Option[AXI4IdSpaceConstraint]](None)
case object HostMemIdSpaceKey extends Field[Option[AXI4IdSpaceConstraint]](None)
/** Constrains how many id bits of the host memory channel are used, as well
* as how many requests are issued per id. This generates hardware
* proportional to (2^idBits) * maxFlight.
*
* @param idBits The number of lower idBits of the host memory channel to use.
* @param maxFlight A bound on the number of requests the simulator will make per id.
/** Constrains how many id bits of the host memory channel are used, as well as how many requests are issued per id.
* This generates hardware proportional to (2^idBits) * maxFlight.
*
* @param idBits
* The number of lower idBits of the host memory channel to use.
* @param maxFlight
* A bound on the number of requests the simulator will make per id.
*/
case class AXI4IdSpaceConstraint(idBits: Int = 4, maxFlight: Int = 8)
@ -66,70 +59,73 @@ case object MemNastiKey extends Field[NastiParameters]
/** Specifies the size and width of external memory ports */
case class HostMemChannelParams(
size: BigInt,
beatBytes: Int,
idBits: Int,
maxXferBytes: Int = 256) {
def axi4BundleParams = AXI4BundleParameters(
addrBits = log2Ceil(size),
dataBits = 8 * beatBytes,
idBits = idBits)
size: BigInt,
beatBytes: Int,
idBits: Int,
maxXferBytes: Int = 256,
) {
def axi4BundleParams = AXI4BundleParameters(addrBits = log2Ceil(size), dataBits = 8 * beatBytes, idBits = idBits)
}
/**
* Specifies the AXI4 interface for FPGA-driven DMA
/** Specifies the AXI4 interface for FPGA-driven DMA
*
* @param size The size, in bytes, of the addressable region on the host CPU.
* The addressable region is assumed to span [0, size). Host-specific offsets
* should be handled by the FPGAShim.
* @param dataBits The width of the interface in bits.
* @param idBits The number of ID bits supported by the interface.
* @param writeTransferSizes Supported write transfer sizes in bytes
* @param readTransferSizes Supported read transfer sizes in bytes
* @param interleavedId Set to indicate DMA responses may be interleaved.
* @param size
* The size, in bytes, of the addressable region on the host CPU. The addressable region is assumed to span [0,
* size). Host-specific offsets should be handled by the FPGAShim.
* @param dataBits
* The width of the interface in bits.
* @param idBits
* The number of ID bits supported by the interface.
* @param writeTransferSizes
* Supported write transfer sizes in bytes
* @param readTransferSizes
* Supported read transfer sizes in bytes
* @param interleavedId
* Set to indicate DMA responses may be interleaved.
*/
case class FPGAManagedAXI4Params(
size: BigInt,
dataBits: Int,
idBits: Int,
writeTransferSizes: TransferSizes,
readTransferSizes: TransferSizes,
interleavedId: Option[Int] = Some(0),
) {
size: BigInt,
dataBits: Int,
idBits: Int,
writeTransferSizes: TransferSizes,
readTransferSizes: TransferSizes,
interleavedId: Option[Int] = Some(0),
) {
require(interleavedId == Some(0), "IdDeinterleaver not currently instantiated in FPGATop")
require((isPow2(size)) && (size % 4096 == 0),
"The size of the FPGA-managed DMA regions must be a power of 2, and larger than a page.")
require(
(isPow2(size)) && (size % 4096 == 0),
"The size of the FPGA-managed DMA regions must be a power of 2, and larger than a page.",
)
def axi4BundleParams = AXI4BundleParameters(
addrBits = log2Ceil(size),
dataBits = dataBits,
idBits = idBits,
idBits = idBits,
)
}
case class CPUManagedAXI4Params(
addrBits: Int,
dataBits: Int,
idBits: Int,
maxFlight: Option[Int] = None,
) {
addrBits: Int,
dataBits: Int,
idBits: Int,
maxFlight: Option[Int] = None,
) {
def axi4BundleParams = AXI4BundleParameters(
addrBits = addrBits,
dataBits = dataBits,
idBits = idBits,
idBits = idBits,
)
}
// Platform agnostic wrapper of the simulation models for FPGA
class FPGATop(implicit p: Parameters) extends LazyModule with HasWidgets {
require(p(HostMemNumChannels) <= 4, "Midas-level simulation harnesses support up to 4 channels")
require(p(CtrlNastiKey).dataBits == 32,
"Simulation control bus must be 32-bits wide per AXI4-lite specification")
require(p(CtrlNastiKey).dataBits == 32, "Simulation control bus must be 32-bits wide per AXI4-lite specification")
val master = addWidget(new SimulationMaster)
val bridgeAnnos = p(SimWrapperKey).annotations collect { case ba: BridgeIOAnnotation => ba }
val bridgeModuleMap: ListMap[BridgeIOAnnotation, BridgeModule[_ <: Record with HasChannels]] =
ListMap((bridgeAnnos.map(anno => anno -> addWidget(anno.elaborateWidget))):_*)
val bridgeAnnos = p(SimWrapperKey).annotations.collect { case ba: BridgeIOAnnotation => ba }
val bridgeModuleMap: ListMap[BridgeIOAnnotation, BridgeModule[_ <: Record with HasChannels]] =
ListMap((bridgeAnnos.map(anno => anno -> addWidget(anno.elaborateWidget))): _*)
// Find all bridges that wish to be allocated FPGA DRAM, and group them
// according to their memoryRegionName. Requested addresses will be unified
@ -139,49 +135,57 @@ class FPGATop(implicit p: Parameters) extends LazyModule with HasWidgets {
// When cacheline-striping a target's memory system across multiple FASED
// memory channels, it's useful to ee a single contiguous region of host
// memory that corresponds to the target's memory space.
val bridgesRequiringDRAM = bridgeModuleMap.values.collect({ case b: UsesHostDRAM => b})
val combinedRegions = bridgesRequiringDRAM.groupBy(_.memoryRegionName)
val regionTuples = combinedRegions.values.map { bridgeSeq =>
val bridgesRequiringDRAM = bridgeModuleMap.values.collect({ case b: UsesHostDRAM => b })
val combinedRegions = bridgesRequiringDRAM.groupBy(_.memoryRegionName)
val regionTuples = combinedRegions.values.map { bridgeSeq =>
val unifiedAS = AddressSet.unify(bridgeSeq.flatMap(_.memorySlaveConstraints.address).toSeq)
(bridgeSeq, unifiedAS)
}
// Tie-break with the name of the region.
val sortedRegionTuples = regionTuples.toSeq.sortBy(r => (BytesOfDRAMRequired(r._2), r._1.head.memoryRegionName)).reverse
val sortedRegionTuples =
regionTuples.toSeq.sortBy(r => (BytesOfDRAMRequired(r._2), r._1.head.memoryRegionName)).reverse
// Allocate memory regions using a base-and-bounds scheme
val dramOffsetsRev = sortedRegionTuples.foldLeft(Seq(BigInt(0)))({
case (offsets, (bridgeSeq, addresses)) =>
val requestedCapacity = BytesOfDRAMRequired(addresses)
val pageAligned4k = ((requestedCapacity + 4095) >> 12) << 12
(offsets.head + pageAligned4k) +: offsets
val dramOffsetsRev = sortedRegionTuples.foldLeft(Seq(BigInt(0)))({ case (offsets, (bridgeSeq, addresses)) =>
val requestedCapacity = BytesOfDRAMRequired(addresses)
val pageAligned4k = ((requestedCapacity + 4095) >> 12) << 12
(offsets.head + pageAligned4k) +: offsets
})
val totalDRAMAllocated = dramOffsetsRev.head
val dramOffsets = dramOffsetsRev.tail.reverse
val availableDRAM = p(HostMemNumChannels) * p(HostMemChannelKey).size
require(totalDRAMAllocated <= availableDRAM,
s"Total requested DRAM of ${totalDRAMAllocated}B, exceeds host capacity of ${availableDRAM}B")
val dramOffsets = dramOffsetsRev.tail.reverse
val availableDRAM = p(HostMemNumChannels) * p(HostMemChannelKey).size
require(
totalDRAMAllocated <= availableDRAM,
s"Total requested DRAM of ${totalDRAMAllocated}B, exceeds host capacity of ${availableDRAM}B",
)
val loadMem = addWidget(new LoadMemWidget(totalDRAMAllocated))
val loadMem = addWidget(new LoadMemWidget(totalDRAMAllocated))
// Host DRAM handling
val memChannelParams = p(HostMemChannelKey)
// Define multiple single-channel nodes, instead of one multichannel node to more easily
// Define multiple single-channel nodes, instead of one multichannel node to more easily
// bind a subset to the XBAR.
val memAXI4Nodes = Seq.tabulate(p(HostMemNumChannels)) { channel =>
val memAXI4Nodes = Seq.tabulate(p(HostMemNumChannels)) { channel =>
val device = new MemoryDevice
val base = channel * memChannelParams.size
val base = channel * memChannelParams.size
AXI4SlaveNode(
Seq(AXI4SlavePortParameters(
slaves = Seq(AXI4SlaveParameters(
address = Seq(AddressSet(base, memChannelParams.size - 1)),
resources = device.reg,
regionType = RegionType.UNCACHED, // cacheable
executable = false,
supportsWrite = TransferSizes(1, memChannelParams.maxXferBytes),
supportsRead = TransferSizes(1, memChannelParams.maxXferBytes),
interleavedId = Some(0))), // slave does not interleave read responses
beatBytes = memChannelParams.beatBytes)
))
Seq(
AXI4SlavePortParameters(
slaves = Seq(
AXI4SlaveParameters(
address = Seq(AddressSet(base, memChannelParams.size - 1)),
resources = device.reg,
regionType = RegionType.UNCACHED, // cacheable
executable = false,
supportsWrite = TransferSizes(1, memChannelParams.maxXferBytes),
supportsRead = TransferSizes(1, memChannelParams.maxXferBytes),
interleavedId = Some(0),
)
), // slave does not interleave read responses
beatBytes = memChannelParams.beatBytes,
)
)
)
}
// In keeping with the Nasti implementation, we put all channels on a single XBar.
@ -190,13 +194,13 @@ class FPGATop(implicit p: Parameters) extends LazyModule with HasWidgets {
private def bindActiveHostChannel(channelNode: AXI4SlaveNode): Unit = p(HostMemIdSpaceKey) match {
case Some(AXI4IdSpaceConstraint(idBits, maxFlight)) =>
(channelNode := AXI4Buffer()
:= AXI4UserYanker(Some(maxFlight))
:= AXI4IdIndexer(idBits)
:= AXI4Buffer()
:= xbar)
case None =>
:= AXI4UserYanker(Some(maxFlight))
:= AXI4IdIndexer(idBits)
:= AXI4Buffer()
:= xbar)
case None =>
(channelNode := AXI4Buffer()
:= xbar)
:= xbar)
}
// Connect only as many channels as needed by bridges requesting host DRAM.
@ -204,7 +208,8 @@ class FPGATop(implicit p: Parameters) extends LazyModule with HasWidgets {
// 1) It is still assumed in some places, see loadmem
// 2) Almost all simulators we've built to date require at least one channel
// 3) In F1, the first DRAM channel cannot be omitted.
val dramChannelsRequired = math.max(1, math.ceil(totalDRAMAllocated.toDouble / p(HostMemChannelKey).size.toLong).toInt)
val dramChannelsRequired =
math.max(1, math.ceil(totalDRAMAllocated.toDouble / p(HostMemChannelKey).size.toLong).toInt)
for ((node, idx) <- memAXI4Nodes.zipWithIndex) {
if (idx < dramChannelsRequired) {
bindActiveHostChannel(node)
@ -214,19 +219,23 @@ class FPGATop(implicit p: Parameters) extends LazyModule with HasWidgets {
}
xbar := loadMem.toHostMemory
val targetMemoryRegions = Map(sortedRegionTuples.zip(dramOffsets).map({ case ((bridgeSeq, addresses), hostBaseAddr) =>
val regionName = bridgeSeq.head.memoryRegionName
val virtualBaseAddr = addresses.map(_.base).min
val offset = hostBaseAddr - virtualBaseAddr
val preTranslationPort = (xbar
:=* AXI4Buffer()
:=* AXI4AddressTranslation(offset, addresses, regionName))
bridgeSeq.foreach { bridge =>
(preTranslationPort := AXI4Deinterleaver(bridge.memorySlaveConstraints.supportsRead.max)
:= bridge.memoryMasterNode)
}
regionName -> offset
}):_*)
val targetMemoryRegions = Map(
sortedRegionTuples
.zip(dramOffsets)
.map({ case ((bridgeSeq, addresses), hostBaseAddr) =>
val regionName = bridgeSeq.head.memoryRegionName
val virtualBaseAddr = addresses.map(_.base).min
val offset = hostBaseAddr - virtualBaseAddr
val preTranslationPort = (xbar
:=* AXI4Buffer()
:=* AXI4AddressTranslation(offset, addresses, regionName))
bridgeSeq.foreach { bridge =>
(preTranslationPort := AXI4Deinterleaver(bridge.memorySlaveConstraints.supportsRead.max)
:= bridge.memoryMasterNode)
}
regionName -> offset
}): _*
)
def printHostDRAMSummary(): Unit = {
def toIECString(value: BigInt): String = {
@ -239,61 +248,72 @@ class FPGATop(implicit p: Parameters) extends LazyModule with HasWidgets {
f"${dv / 1024}%.3f KiB"
}
}
println(s"Total Host-FPGA DRAM Allocated: ${toIECString(totalDRAMAllocated)} of ${toIECString(availableDRAM)} available.")
println(
s"Total Host-FPGA DRAM Allocated: ${toIECString(totalDRAMAllocated)} of ${toIECString(availableDRAM)} available."
)
if (sortedRegionTuples.nonEmpty) {
println("Host-FPGA DRAM Allocation Map:")
}
sortedRegionTuples.zip(dramOffsets).foreach({ case ((bridgeSeq, addresses), offset) =>
val regionName = bridgeSeq.head.memoryRegionName
val bridgeNames = bridgeSeq.map(_.getWName).mkString(", ")
println(f" ${regionName} -> [0x${offset}%X, 0x${offset + BytesOfDRAMRequired(addresses) - 1}%X]")
println(f" Associated bridges: ${bridgeNames}")
})
sortedRegionTuples
.zip(dramOffsets)
.foreach({ case ((bridgeSeq, addresses), offset) =>
val regionName = bridgeSeq.head.memoryRegionName
val bridgeNames = bridgeSeq.map(_.getWName).mkString(", ")
println(f" ${regionName} -> [0x${offset}%X, 0x${offset + BytesOfDRAMRequired(addresses) - 1}%X]")
println(f" Associated bridges: ${bridgeNames}")
})
}
val bridgesWithToHostCPUStreams = bridgeModuleMap.values
.collect { case b: StreamToHostCPU => b }
val hasToHostStreams = bridgesWithToHostCPUStreams.nonEmpty
val hasToHostStreams = bridgesWithToHostCPUStreams.nonEmpty
val bridgesWithFromHostCPUStreams = bridgeModuleMap.values
.collect { case b: StreamFromHostCPU => b }
val hasFromHostCPUStreams = bridgesWithFromHostCPUStreams.nonEmpty
val hasFromHostCPUStreams = bridgesWithFromHostCPUStreams.nonEmpty
def printStreamSummary(streams: Iterable[StreamParameters], header: String): Unit = {
val summaries = streams.toList match {
case Nil => "None" :: Nil
case o => o.map { _.summaryString }
case o => o.map { _.summaryString }
}
println((header +: summaries).mkString("\n "))
}
val toCPUStreamParams = bridgesWithToHostCPUStreams.map { _.streamSourceParams }
val toCPUStreamParams = bridgesWithToHostCPUStreams.map { _.streamSourceParams }
val fromCPUStreamParams = bridgesWithFromHostCPUStreams.map { _.streamSinkParams }
val streamingEngine = addWidget(p(StreamEngineInstantiatorKey)(
StreamEngineParameters(toCPUStreamParams.toSeq, fromCPUStreamParams.toSeq), p)
val streamingEngine = addWidget(
p(StreamEngineInstantiatorKey)(StreamEngineParameters(toCPUStreamParams.toSeq, fromCPUStreamParams.toSeq), p)
)
require(streamingEngine.fpgaManagedAXI4NodeOpt.isEmpty || p(FPGAManagedAXI4Key).nonEmpty,
"Selected StreamEngine uses the FPGA-managed AXI4 interface but it is not available on this platform."
require(
streamingEngine.fpgaManagedAXI4NodeOpt.isEmpty || p(FPGAManagedAXI4Key).nonEmpty,
"Selected StreamEngine uses the FPGA-managed AXI4 interface but it is not available on this platform.",
)
require(streamingEngine.cpuManagedAXI4NodeOpt.isEmpty || p(CPUManagedAXI4Key).nonEmpty,
"Selected StreamEngine uses the CPU-managed AXI4 interface, but it is not available on this platform."
require(
streamingEngine.cpuManagedAXI4NodeOpt.isEmpty || p(CPUManagedAXI4Key).nonEmpty,
"Selected StreamEngine uses the CPU-managed AXI4 interface, but it is not available on this platform.",
)
val cpuManagedAXI4NodeTuple = p(CPUManagedAXI4Key).map { params =>
val node = AXI4MasterNode(Seq(AXI4MasterPortParameters(
masters = Seq(AXI4MasterParameters(
name = "cpu-managed-axi4",
id = IdRange(0, 1 << params.idBits),
aligned = false,
maxFlight = params.maxFlight, // None = infinite, else is a per-ID cap
))
val cpuManagedAXI4NodeTuple = p(CPUManagedAXI4Key).map { params =>
val node = AXI4MasterNode(
Seq(
AXI4MasterPortParameters(
masters = Seq(
AXI4MasterParameters(
name = "cpu-managed-axi4",
id = IdRange(0, 1 << params.idBits),
aligned = false,
maxFlight = params.maxFlight,// None = infinite, else is a per-ID cap
)
)
)
)
))
)
streamingEngine.cpuManagedAXI4NodeOpt.foreach {
_ := AXI4Buffer() := node
}
@ -302,22 +322,28 @@ class FPGATop(implicit p: Parameters) extends LazyModule with HasWidgets {
val fpgaManagedAXI4NodeTuple = p(FPGAManagedAXI4Key).map { params =>
val node = AXI4SlaveNode(
Seq(AXI4SlavePortParameters(
slaves = Seq(AXI4SlaveParameters(
address = Seq(AddressSet(0, params.size - 1)),
resources = (new MemoryDevice).reg,
regionType = RegionType.UNCACHED, // cacheable
executable = false,
supportsWrite = params.writeTransferSizes,
supportsRead = params.readTransferSizes,
interleavedId = params.interleavedId)),
beatBytes = params.dataBits / 8)
))
Seq(
AXI4SlavePortParameters(
slaves = Seq(
AXI4SlaveParameters(
address = Seq(AddressSet(0, params.size - 1)),
resources = (new MemoryDevice).reg,
regionType = RegionType.UNCACHED, // cacheable
executable = false,
supportsWrite = params.writeTransferSizes,
supportsRead = params.readTransferSizes,
interleavedId = params.interleavedId,
)
),
beatBytes = params.dataBits / 8,
)
)
)
streamingEngine.fpgaManagedAXI4NodeOpt match {
case Some(engineNode) =>
node := AXI4IdIndexer(params.idBits) := AXI4Buffer() := engineNode
case None =>
case None =>
node := AXI4TieOff()
}
(node, params)
@ -336,9 +362,9 @@ class FPGATopImp(outer: FPGATop)(implicit p: Parameters) extends LazyModuleImp(o
HostClockSource.annotate(clock)
val ctrl = IO(Flipped(WidgetMMIO()))
val mem = IO(Vec(p(HostMemNumChannels), AXI4Bundle(p(HostMemChannelKey).axi4BundleParams)))
val mem = IO(Vec(p(HostMemNumChannels), AXI4Bundle(p(HostMemChannelKey).axi4BundleParams)))
val cpu_managed_axi4 = outer.cpuManagedAXI4NodeTuple.map { case (node, params) =>
val cpu_managed_axi4 = outer.cpuManagedAXI4NodeTuple.map { case (node, params) =>
val port = IO(Flipped(AXI4Bundle(params.axi4BundleParams)))
node.out.head._1 <> port
port
@ -356,14 +382,16 @@ class FPGATopImp(outer: FPGATop)(implicit p: Parameters) extends LazyModuleImp(o
fpga_managed_axi4.foreach(dontTouch(_))
(mem zip outer.memAXI4Nodes.map(_.in.head)).foreach { case (io, (bundle, _)) =>
require(bundle.params.idBits <= p(HostMemChannelKey).idBits,
require(
bundle.params.idBits <= p(HostMemChannelKey).idBits,
s"""| Required memory channel ID bits exceeds that present on host.
| Required: ${bundle.params.idBits} Available: ${p(HostMemChannelKey).idBits}
| Enable host ID reuse with the HostMemIdSpaceKey""".stripMargin)
| Enable host ID reuse with the HostMemIdSpaceKey""".stripMargin,
)
io <> bundle
}
val sim = Module(new SimWrapper(p(SimWrapperKey)))
val sim = Module(new SimWrapper(p(SimWrapperKey)))
val simIo = sim.channelPorts
// Instantiate bridge widgets.
@ -372,20 +400,28 @@ class FPGATopImp(outer: FPGATop)(implicit p: Parameters) extends LazyModuleImp(o
bridgeMod.module.hPort.connectChannels2Port(bridgeAnno, simIo)
})
outer.printStreamSummary(outer.toCPUStreamParams, "Bridge Streams To CPU:")
outer.printStreamSummary(outer.toCPUStreamParams, "Bridge Streams To CPU:")
outer.printStreamSummary(outer.fromCPUStreamParams, "Bridge Streams From CPU:")
for (((sink, src), idx) <- outer.streamingEngine.streamsToHostCPU.zip(outer.bridgesWithToHostCPUStreams).zipWithIndex) {
for (
((sink, src), idx) <- outer.streamingEngine.streamsToHostCPU.zip(outer.bridgesWithToHostCPUStreams).zipWithIndex
) {
val allocatedIdx = src.toHostStreamIdx
require(allocatedIdx == idx,
s"Allocated to-host stream index ${allocatedIdx} does not match stream vector index ${idx}.")
require(
allocatedIdx == idx,
s"Allocated to-host stream index ${allocatedIdx} does not match stream vector index ${idx}.",
)
sink <> src.streamEnq
}
for (((sink, src), idx) <- outer.bridgesWithFromHostCPUStreams.zip(outer.streamingEngine.streamsFromHostCPU).zipWithIndex) {
for (
((sink, src), idx) <- outer.bridgesWithFromHostCPUStreams.zip(outer.streamingEngine.streamsFromHostCPU).zipWithIndex
) {
val allocatedIdx = sink.fromHostStreamIdx
require(allocatedIdx == idx,
s"Allocated from-host stream index ${allocatedIdx} does not match stream vector index ${idx}.")
require(
allocatedIdx == idx,
s"Allocated from-host stream index ${allocatedIdx} does not match stream vector index ${idx}.",
)
sink.streamDeq <> src
}
@ -393,10 +429,10 @@ class FPGATopImp(outer: FPGATop)(implicit p: Parameters) extends LazyModuleImp(o
outer.printMemoryMapSummary()
outer.printHostDRAMSummary()
val confCtrl = (ctrl.nastiXIdBits, ctrl.nastiXAddrBits, ctrl.nastiXDataBits)
val memParams = p(HostMemChannelKey).axi4BundleParams
val confMem = (memParams.idBits, memParams.addrBits, memParams.dataBits)
val confCPUManaged = cpu_managed_axi4.map(m => (m.params.idBits, m.params.addrBits, m.params.dataBits))
val confCtrl = (ctrl.nastiXIdBits, ctrl.nastiXAddrBits, ctrl.nastiXDataBits)
val memParams = p(HostMemChannelKey).axi4BundleParams
val confMem = (memParams.idBits, memParams.addrBits, memParams.dataBits)
val confCPUManaged = cpu_managed_axi4.map(m => (m.params.idBits, m.params.addrBits, m.params.dataBits))
val confFPGAManaged = fpga_managed_axi4.map(m => (m.params.idBits, m.params.addrBits, m.params.dataBits))
def genHeader(sb: StringBuilder, target: String)(implicit p: Parameters) = {
@ -404,7 +440,7 @@ class FPGATopImp(outer: FPGATop)(implicit p: Parameters) extends LazyModuleImp(o
sb.append("#ifdef GET_METASIM_INTERFACE_CONFIG\n")
def printConfig(conf: (Int, Int, Int)) : Unit = {
def printConfig(conf: (Int, Int, Int)): Unit = {
val (idBits, addrBits, dataBits) = conf
sb.append("AXI4Config{")
sb.append(s"${idBits}, ${addrBits}, ${dataBits}")
@ -422,12 +458,12 @@ class FPGATopImp(outer: FPGATop)(implicit p: Parameters) extends LazyModuleImp(o
sb.append(",\n.cpu_managed = ")
confCPUManaged match {
case None => sb.append("std::nullopt")
case None => sb.append("std::nullopt")
case Some(conf) => printConfig(conf)
}
sb.append(",\n.fpga_managed = ")
confFPGAManaged match {
case None => sb.append("std::nullopt")
case None => sb.append("std::nullopt")
case Some(conf) => printConfig(conf)
}
sb.append(s",\n.target_name = ${CStrLit(target).toC}")

View File

@ -4,11 +4,11 @@ package platform
import chisel3._
import chisel3.util._
import junctions._
import freechips.rocketchip.config.{Parameters, Field}
import freechips.rocketchip.config.{Field, Parameters}
import freechips.rocketchip.diplomacy.LazyModuleImp
import freechips.rocketchip.util.HeterogeneousBag
import midas.core.{CPUManagedAXI4Key}
import midas.core.CPUManagedAXI4Key
import midas.widgets.{AXI4Printf, CtrlNastiKey}
import midas.stage.GoldenGateOutputFileAnnotation
@ -16,18 +16,19 @@ case object AXIDebugPrint extends Field[Boolean]
class F1Shim(implicit p: Parameters) extends PlatformShim {
lazy val module = new LazyModuleImp(this) {
val io_master = IO(Flipped(new NastiIO()(p alterPartial { case NastiKey => p(CtrlNastiKey) })))
val io_dma = IO(Flipped(new NastiIO()(p alterPartial {
case NastiKey => NastiParameters(p(CPUManagedAXI4Key).get.axi4BundleParams) })))
val io_slave = IO(HeterogeneousBag(top.module.mem.map(x => x.cloneType)))
val io_master = IO(Flipped(new NastiIO()(p.alterPartial { case NastiKey => p(CtrlNastiKey) })))
val io_dma = IO(Flipped(new NastiIO()(p.alterPartial { case NastiKey =>
NastiParameters(p(CPUManagedAXI4Key).get.axi4BundleParams)
})))
val io_slave = IO(HeterogeneousBag(top.module.mem.map(x => x.cloneType)))
if (p(AXIDebugPrint)) {
AXI4Printf(io_master, "master")
AXI4Printf(io_dma, "dma")
io_slave.zipWithIndex foreach { case (io, idx) => AXI4Printf(io, s"slave_${idx}") }
AXI4Printf(io_dma, "dma")
io_slave.zipWithIndex.foreach { case (io, idx) => AXI4Printf(io, s"slave_${idx}") }
}
top.module.ctrl <> io_master
top.module.ctrl <> io_master
AXI4NastiAssigner.toAXI4(top.module.cpu_managed_axi4.get, io_dma)
io_slave.zip(top.module.mem).foreach({ case (io, bundle) => io <> bundle })
@ -48,6 +49,7 @@ class F1Shim(implicit p: Parameters) extends PlatformShim {
|`define USE_DDR_CHANNEL_B ${channelInUse(2)}
|`define USE_DDR_CHANNEL_D ${channelInUse(3)}
|""".stripMargin,
fileSuffix = ".defines.vh")
fileSuffix = ".defines.vh",
)
}
}

View File

@ -2,22 +2,18 @@
package midas.platform
import freechips.rocketchip.config.{Parameters}
import freechips.rocketchip.diplomacy.{LazyModule}
import freechips.rocketchip.config.Parameters
import freechips.rocketchip.diplomacy.LazyModule
import midas.Platform
import midas.core._
import midas.targetutils.xdc.SpecifyXDCCircuitPaths
import midas.{PreLinkCircuitPath, PostLinkCircuitPath}
import midas.{PostLinkCircuitPath, PreLinkCircuitPath}
/**
* Generates the platform wrapper (which includes most of the chisel-generated
* RTL that constitutes the simulator, including BridgeModules) using
* parameters instance and the required annotations from the transformed
* target design.
*
/** Generates the platform wrapper (which includes most of the chisel-generated RTL that constitutes the simulator,
* including BridgeModules) using parameters instance and the required annotations from the transformed target design.
*/
private [midas] object PlatformShim {
private[midas] object PlatformShim {
def apply(config: SimWrapperConfig)(implicit p: Parameters): PlatformShim = {
p(Platform)(p.alterPartial({ case SimWrapperKey => config }))
}

View File

@ -11,41 +11,39 @@ import chisel3.experimental.DataMirror
import freechips.rocketchip.config.Parameters
import scala.collection.mutable.{ArrayBuffer, LinkedHashMap}
/** Takes an arbtirary Data type, and flattens it (akin to .flatten()).
* Returns a Seq of the leaf nodes with their absolute direction.
/** Takes an arbtirary Data type, and flattens it (akin to .flatten()). Returns a Seq of the leaf nodes with their
* absolute direction.
*/
object FlattenData {
object FlattenData {
def apply[T <: Data](gen: T): Seq[(Data, ActualDirection)] = {
gen match {
case a : Aggregate => a.getElements flatMap(e => this(e))
case e : Element => Seq((e, DataMirror.directionOf(e)))
case _ => throw new RuntimeException("Cannot handle this type")
case a: Aggregate => a.getElements.flatMap(e => this(e))
case e: Element => Seq((e, DataMirror.directionOf(e)))
case _ => throw new RuntimeException("Cannot handle this type")
}
}
}
/** An object that is useful for measuring the QoR of a module on FPGA
* CAD tools; achieves two goals
* 1) Registers all inputs/outputs to properly measure intra-module timing
* 2) Inserts a scan chain across the elements - this reduces the total module
* I/O, and prevents the FPGA CAD tools from optimizing I/O driven paths
/** An object that is useful for measuring the QoR of a module on FPGA CAD tools; achieves two goals 1) Registers all
* inputs/outputs to properly measure intra-module timing 2) Inserts a scan chain across the elements - this reduces
* the total module I/O, and prevents the FPGA CAD tools from optimizing I/O driven paths
*/
object ScanRegister {
def apply(data: Seq[Data], scanEnable: Bool, scanIn: Bool): Bool = {
val leaves = data flatMap FlattenData.apply
val leaves = data.flatMap(FlattenData.apply)
leaves.foldLeft(scanIn)((in: Bool, leaf: (Data, ActualDirection)) => {
val r = Reg(VecInit(leaf._1.asUInt.asBools).cloneType)
(leaf._2) match {
case ActualDirection.Output =>
r := VecInit(leaf._1.asUInt.asBools)
case ActualDirection.Input =>
case ActualDirection.Input =>
leaf._1 := r.asUInt
case _ => throw new RuntimeException("Directions on all elements must be specified")
case _ => throw new RuntimeException("Directions on all elements must be specified")
}
val out = WireInit(false.B)
when (scanEnable) {
out := r.foldLeft(in)((in: Bool, r: Bool) => {r := in; r })
when(scanEnable) {
out := r.foldLeft(in)((in: Bool, r: Bool) => { r := in; r })
}
out
})
@ -53,31 +51,33 @@ object ScanRegister {
}
class SatUpDownCounterIO(val n: Int) extends Bundle {
val inc = Input(Bool())
val dec = Input(Bool())
val set = Input(Valid(UInt(log2Up(n+1).W)))
val max = Input(UInt(log2Up(n+1).W))
val inc = Input(Bool())
val dec = Input(Bool())
val set = Input(Valid(UInt(log2Up(n + 1).W)))
val max = Input(UInt(log2Up(n + 1).W))
val value = Output(UInt())
val full = Output(Bool())
val full = Output(Bool())
val empty = Output(Bool())
}
/** A saturating up down counter.
*
* @param n The maximum value at which the counter will saturate.
* @param n
* The maximum value at which the counter will saturate.
*/
class SatUpDownCounter(val n: Int) extends Module {
require(n >= 1)
val io = IO(new SatUpDownCounterIO(n))
val value = RegInit(0.U(log2Up(n + 1).W))
val io = IO(new SatUpDownCounterIO(n))
val value = RegInit(0.U(log2Up(n + 1).W))
io.value := value
io.full := value >= io.max
io.full := value >= io.max
io.empty := value === 0.U
when (io.set.valid) {
when(io.set.valid) {
io.value := io.set.bits
}.elsewhen (io.inc && ~io.dec && ~io.full) {
}.elsewhen(io.inc && ~io.dec && ~io.full) {
value := value + 1.U
}.elsewhen(~io.inc && io.dec && ~io.empty){
}.elsewhen(~io.inc && io.dec && ~io.empty) {
value := value - 1.U
}
}
@ -85,99 +85,96 @@ class SatUpDownCounter(val n: Int) extends Module {
object SatUpDownCounter {
def apply(n: Int): SatUpDownCounterIO = {
val c = (Module(new SatUpDownCounter(n))).io
c.max := n.U
c.inc := false.B
c.max := n.U
c.inc := false.B
c.set.valid := false.B
c.dec := false.B
c.set.bits := DontCare
c.dec := false.B
c.set.bits := DontCare
c
}
}
class MultiQueueIO[T <: Data](private val gen: T, val numQueues: Int, entries: Int) extends
QueueIO(gen, entries) {
class MultiQueueIO[T <: Data](private val gen: T, val numQueues: Int, entries: Int) extends QueueIO(gen, entries) {
val enqAddr = Input(UInt(log2Up(numQueues).W))
val deqAddr = Input(UInt(log2Up(numQueues).W))
val empty = Output(Bool())
val empty = Output(Bool())
}
/** An extension of queue that co locates a set of Queues at a single mem.
* Key assumptions:
* 1) A writer to a queue dumps a complete transaction into a single queue
* before it proceeds to enq to another queue.
* 2) A reader consumes the contents of a queue entirely before reading from another
* This way we require only a single set of read and write pointers
/** An extension of queue that co locates a set of Queues at a single mem. Key assumptions: 1) A writer to a queue dumps
* a complete transaction into a single queue before it proceeds to enq to another queue. 2) A reader consumes the
* contents of a queue entirely before reading from another This way we require only a single set of read and write
* pointers
*/
class MultiQueue[T <: Data](
gen: T,
val numQueues: Int,
requestedEntries: Int
) extends Module {
gen: T,
val numQueues: Int,
requestedEntries: Int,
) extends Module {
val entries = 1 << log2Ceil(requestedEntries)
val io = IO(new MultiQueueIO(gen, numQueues, entries))
val io = IO(new MultiQueueIO(gen, numQueues, entries))
io.count := DontCare
// Rely on the ROB & freelist to ensure we are always enq-ing to an available
// slot
val ram = SyncReadMem(entries * numQueues, gen)
val enqPtrs = RegInit(VecInit(Seq.fill(numQueues)(0.U(log2Up(entries).W))))
val deqPtrs = RegInit(VecInit(Seq.fill(numQueues)(0.U(log2Up(entries).W))))
val maybe_full = RegInit(VecInit(Seq.fill(numQueues)(false.B)))
val ram = SyncReadMem(entries * numQueues, gen)
val enqPtrs = RegInit(VecInit(Seq.fill(numQueues)(0.U(log2Up(entries).W))))
val deqPtrs = RegInit(VecInit(Seq.fill(numQueues)(0.U(log2Up(entries).W))))
val maybe_full = RegInit(VecInit(Seq.fill(numQueues)(false.B)))
val ptr_matches = VecInit.tabulate(numQueues)(i => enqPtrs(i) === deqPtrs(i))
val empty = Wire(Bool())
val full = ptr_matches(io.enqAddr) && maybe_full(io.enqAddr)
val do_enq = WireInit(io.enq.fire)
val do_deq = WireInit(io.deq.fire)
val empty = Wire(Bool())
val full = ptr_matches(io.enqAddr) && maybe_full(io.enqAddr)
val do_enq = WireInit(io.enq.fire)
val do_deq = WireInit(io.deq.fire)
val deqAddrReg = RegNext(io.deqAddr)
when (do_enq) {
when(do_enq) {
ram(Cat(io.enqAddr, enqPtrs(io.enqAddr))) := io.enq.bits
enqPtrs(io.enqAddr) := enqPtrs(io.enqAddr) + 1.U
enqPtrs(io.enqAddr) := enqPtrs(io.enqAddr) + 1.U
}
when (do_deq) {
when(do_deq) {
deqPtrs(deqAddrReg) := deqPtrs(deqAddrReg) + 1.U
}
when (io.enqAddr === deqAddrReg) {
when(io.enqAddr === deqAddrReg) {
when(do_enq =/= do_deq) {
maybe_full(io.enqAddr) := do_enq
maybe_full(io.enqAddr) := do_enq
}
}.otherwise {
when(do_enq) {
maybe_full(io.enqAddr) := true.B
}
when (do_deq) {
when(do_deq) {
maybe_full(deqAddrReg) := false.B
}
}
val deqPtr = Wire(UInt())
val deqPtr = Wire(UInt())
when(do_deq && (deqAddrReg === io.deqAddr)) {
deqPtr := deqPtrs(io.deqAddr) + 1.U
empty := (deqPtrs(io.deqAddr) + 1.U) === enqPtrs(io.deqAddr)
empty := (deqPtrs(io.deqAddr) + 1.U) === enqPtrs(io.deqAddr)
}.otherwise {
deqPtr := deqPtrs(io.deqAddr)
empty := ptr_matches(io.deqAddr) && !maybe_full(io.deqAddr)
empty := ptr_matches(io.deqAddr) && !maybe_full(io.deqAddr)
}
val deqValid = RegNext(!empty, false.B)
io.empty := empty
io.empty := empty
io.deq.valid := deqValid
io.enq.ready := !full
io.deq.bits := ram.read(Cat(io.deqAddr, deqPtr))
io.deq.bits := ram.read(Cat(io.deqAddr, deqPtr))
}
case class Permissions(readable: Boolean, writeable: Boolean)
object ReadOnly extends Permissions(true, false)
object ReadOnly extends Permissions(true, false)
object WriteOnly extends Permissions(false, true)
object ReadWrite extends Permissions(true, true)
abstract class MCRMapEntry {
def name: String
def name: String
def permissions: Permissions
def substruct: Boolean
def substruct: Boolean
}
case class DecoupledSinkEntry(node: DecoupledIO[UInt], name: String, substruct: Boolean) extends MCRMapEntry {
val permissions = WriteOnly
}
@ -186,22 +183,20 @@ case class DecoupledSourceEntry(node: DecoupledIO[UInt], name: String, substruct
}
case class RegisterEntry(node: Data, name: String, permissions: Permissions, substruct: Boolean) extends MCRMapEntry
/**
* Manages the metadata associated with a widget's configuration registers
* (exposed via the control bus). Registers are incrementally allocated, which
* each register consuming a fixed number of bytes of the address space.
/** Manages the metadata associated with a widget's configuration registers (exposed via the control bus). Registers are
* incrementally allocated, which each register consuming a fixed number of bytes of the address space.
*
* This derives from a very early form of CSR handling in Rocket Chip which
* has since been replaced with diplomacy and its regmapper utilities.
* This derives from a very early form of CSR handling in Rocket Chip which has since been replaced with diplomacy and
* its regmapper utilities.
*
* @param bytesPerAddress The number of bytes of address space consumed by each bound register.
* @param bytesPerAddress
* The number of bytes of address space consumed by each bound register.
*
* Historical: MCR -> Midas Configuration Register
*
*/
class MCRFileMap(bytesPerAddress: Int) {
private val name2addr = LinkedHashMap[String, Int]()
private val regList = ArrayBuffer[MCRMapEntry]()
private val regList = ArrayBuffer[MCRMapEntry]()
def allocate(entry: MCRMapEntry): Int = {
Predef.assert(!name2addr.contains(entry.name), s"name already allocated '${entry.name}'")
@ -215,31 +210,28 @@ class MCRFileMap(bytesPerAddress: Int) {
def numRegs: Int = regList.size
/**
* Return the name-address mapping of registers included in the substruct.
/** Return the name-address mapping of registers included in the substruct.
*/
def getSubstructRegs: Seq[(String, Int)] =
regList.toSeq.filter(_.substruct).map(entry => entry.name -> name2addr(entry.name))
/**
* Return the name-address mapping of all registers.
/** Return the name-address mapping of all registers.
*/
def getAllRegs: Seq[(String, Int)] =
def getAllRegs: Seq[(String, Int)] =
regList.toSeq.map(entry => entry.name -> name2addr(entry.name))
def bindRegs(mcrIO: MCRIO): Unit = {
// Distinct configuration registers are assigned to new word addresses.
// The assumption that is an AXI4 lite bus implies they are 32b apart
require((mcrIO.nastiXDataBits / 8) == bytesPerAddress)
regList.zipWithIndex foreach {
case (e: DecoupledSinkEntry, index) => mcrIO.bindDecoupledSink(e, index)
regList.zipWithIndex.foreach {
case (e: DecoupledSinkEntry, index) => mcrIO.bindDecoupledSink(e, index)
case (e: DecoupledSourceEntry, index) => mcrIO.bindDecoupledSource(e, index)
case (e: RegisterEntry, index) => mcrIO.bindReg(e, index)
case (e: RegisterEntry, index) => mcrIO.bindReg(e, index)
}
}
/**
* Append the C++ representation of the address map to a string builder.
/** Append the C++ representation of the address map to a string builder.
*
* @param base
* Base address of the widget MMIO registers.
@ -248,14 +240,14 @@ class MCRFileMap(bytesPerAddress: Int) {
*/
def genAddressMap(base: BigInt, sb: StringBuilder): Unit = {
def emitArrays(regs: Seq[(MCRMapEntry, BigInt)]): Unit = {
regs foreach { case (reg, addr) =>
regs.foreach { case (reg, addr) =>
sb.append(s" { ${CStrLit(reg.name).toC}, ${addr} },\\\n")
}
}
val regAddrs = regList map (reg => reg -> (base + lookupAddress(reg.name).get))
val readRegs = regAddrs filter (_._1.permissions.readable)
val writeRegs = regAddrs filter (_._1.permissions.writeable)
val regAddrs = regList.map(reg => reg -> (base + lookupAddress(reg.name).get))
val readRegs = regAddrs.filter(_._1.permissions.readable)
val writeRegs = regAddrs.filter(_._1.permissions.writeable)
sb.append(s" AddressMap{\n")
sb.append(s" std::vector<std::pair<std::string, uint32_t>>{\n")
@ -268,12 +260,12 @@ class MCRFileMap(bytesPerAddress: Int) {
}
def printCRs: Unit = {
regList.zipWithIndex foreach { case (entry, i) => println(s"Name: ${entry.name}, Addr: $i") }
regList.zipWithIndex.foreach { case (entry, i) => println(s"Name: ${entry.name}, Addr: $i") }
}
}
class MCRIO(numCRs: Int)(implicit p: Parameters) extends NastiBundle()(p) {
val read = Vec(numCRs, Flipped(Decoupled(UInt(nastiXDataBits.W))))
val read = Vec(numCRs, Flipped(Decoupled(UInt(nastiXDataBits.W))))
val write = Vec(numCRs, Decoupled(UInt(nastiXDataBits.W)))
val wstrb = Output(UInt(nastiWStrobeBits.W))
@ -285,7 +277,7 @@ class MCRIO(numCRs: Int)(implicit p: Parameters) extends NastiBundle()(p) {
def bindReg(reg: RegisterEntry, index: Int): Unit = {
if (reg.permissions.writeable) {
when(write(index).valid){
when(write(index).valid) {
reg.node := write(index).bits
}
} else {
@ -298,7 +290,7 @@ class MCRIO(numCRs: Int)(implicit p: Parameters) extends NastiBundle()(p) {
assert(read(index).ready === false.B, "Register ${reg.name} is write only")
}
read(index).valid := true.B
read(index).valid := true.B
write(index).ready := true.B
}
@ -317,40 +309,40 @@ class MCRIO(numCRs: Int)(implicit p: Parameters) extends NastiBundle()(p) {
class MCRFile(numRegs: Int)(implicit p: Parameters) extends NastiModule()(p) {
val io = IO(new Bundle {
val nasti = Flipped(new NastiIO)
val mcr = new MCRIO(numRegs)
val mcr = new MCRIO(numRegs)
})
//TODO: Just use a damn state machine.
val rValid = RegInit(false.B)
val arFired = RegInit(false.B)
val awFired = RegInit(false.B)
val wFired = RegInit(false.B)
val rValid = RegInit(false.B)
val arFired = RegInit(false.B)
val awFired = RegInit(false.B)
val wFired = RegInit(false.B)
val wCommited = RegInit(false.B)
val bId = Reg(UInt(p(NastiKey).idBits.W))
val rId = Reg(UInt(p(NastiKey).idBits.W))
val rData = Reg(UInt(nastiXDataBits.W))
val wData = Reg(UInt(nastiXDataBits.W))
val wIndex = Reg(UInt(log2Up(numRegs).W))
val rIndex = Reg(UInt(log2Up(numRegs).W))
val wStrb = Reg(UInt(nastiWStrobeBits.W))
val bId = Reg(UInt(p(NastiKey).idBits.W))
val rId = Reg(UInt(p(NastiKey).idBits.W))
val rData = Reg(UInt(nastiXDataBits.W))
val wData = Reg(UInt(nastiXDataBits.W))
val wIndex = Reg(UInt(log2Up(numRegs).W))
val rIndex = Reg(UInt(log2Up(numRegs).W))
val wStrb = Reg(UInt(nastiWStrobeBits.W))
when(io.nasti.aw.fire){
when(io.nasti.aw.fire) {
awFired := true.B
wIndex := io.nasti.aw.bits.addr >> log2Up(nastiWStrobeBits)
bId := io.nasti.aw.bits.id
wIndex := io.nasti.aw.bits.addr >> log2Up(nastiWStrobeBits)
bId := io.nasti.aw.bits.id
assert(io.nasti.aw.bits.len === 0.U)
}
when(io.nasti.w.fire){
when(io.nasti.w.fire) {
wFired := true.B
wData := io.nasti.w.bits.data
wStrb := io.nasti.w.bits.strb
wData := io.nasti.w.bits.data
wStrb := io.nasti.w.bits.strb
}
when(io.nasti.ar.fire) {
arFired := true.B
rIndex := (io.nasti.ar.bits.addr >> log2Up(nastiWStrobeBits))(log2Up(numRegs)-1,0)
rId := io.nasti.ar.bits.id
rIndex := (io.nasti.ar.bits.addr >> log2Up(nastiWStrobeBits))(log2Up(numRegs) - 1, 0)
rId := io.nasti.ar.bits.id
assert(io.nasti.ar.bits.len === 0.U, "MCRFile only support single beat reads")
}
@ -359,35 +351,35 @@ class MCRFile(numRegs: Int)(implicit p: Parameters) extends NastiModule()(p) {
}
when(io.nasti.b.fire) {
awFired := false.B
wFired := false.B
awFired := false.B
wFired := false.B
wCommited := false.B
}
when(io.mcr.write(wIndex).fire){
when(io.mcr.write(wIndex).fire) {
wCommited := true.B
}
io.mcr.write foreach { w => w.valid := false.B; w.bits := wData }
io.mcr.write.foreach { w => w.valid := false.B; w.bits := wData }
io.mcr.write(wIndex).valid := awFired && wFired && ~wCommited
io.mcr.read.zipWithIndex foreach { case (decoupled, idx: Int) =>
io.mcr.read.zipWithIndex.foreach { case (decoupled, idx: Int) =>
decoupled.ready := (rIndex === idx.U) && arFired && io.nasti.r.ready
}
io.nasti.r.bits := NastiReadDataChannel(rId, io.mcr.read(rIndex).bits)
io.nasti.r.bits := NastiReadDataChannel(rId, io.mcr.read(rIndex).bits)
io.nasti.r.valid := arFired && io.mcr.read(rIndex).valid
io.nasti.b.bits := NastiWriteResponseChannel(bId)
io.nasti.b.bits := NastiWriteResponseChannel(bId)
io.nasti.b.valid := awFired && wFired && wCommited
io.nasti.ar.ready := ~arFired
io.nasti.aw.ready := ~awFired
io.nasti.w.ready := ~wFired
io.nasti.w.ready := ~wFired
}
class CRIO(direction: ActualDirection, width: Int, val default: Int) extends Bundle {
val value = (direction: @unchecked) match {
case ActualDirection.Input => Input(UInt(width.W))
case ActualDirection.Input => Input(UInt(width.W))
case ActualDirection.Output => Output(UInt(width.W))
}
def apply(dummy: Int = 0) = value
@ -404,10 +396,10 @@ object DecoupledCRIO {
}
// I need the right name for this
object D2V {
object D2V {
def apply[T <: Data](in: DecoupledIO[T]): ValidIO[T] = {
val v = Wire(Valid(in.bits.cloneType))
v.bits := in.bits
v.bits := in.bits
v.valid := in.valid
v
}
@ -416,24 +408,22 @@ object D2V {
object V2D {
def apply[T <: Data](in: ValidIO[T]): DecoupledIO[T] = {
val d = Wire(Decoupled(in.bits.cloneType))
d.bits := in.bits
d.bits := in.bits
d.valid := in.valid
d
}
}
class IdentityModule[T <: Data](gen: T) extends Module
{
class IdentityModule[T <: Data](gen: T) extends Module {
val io = IO(new Bundle {
val in = Flipped(gen.cloneType)
val in = Flipped(gen.cloneType)
val out = gen.cloneType
})
io.out <> io.in
}
object IdentityModule
{
object IdentityModule {
def apply[T <: Data](x: T): T = {
val identity = Module(new IdentityModule(x))
identity.io.in := x
@ -449,30 +439,30 @@ class BRAMFlowQueue[T <: Data](val entries: Int)(data: => T) extends Module {
io.count := 0.U
val do_flow = Wire(Bool())
val do_enq = io.enq.fire && !do_flow
val do_deq = io.deq.fire && !do_flow
val do_enq = io.enq.fire && !do_flow
val do_deq = io.deq.fire && !do_flow
val maybe_full = RegInit(false.B)
val enq_ptr = Counter(do_enq, entries)._1
val maybe_full = RegInit(false.B)
val enq_ptr = Counter(do_enq, entries)._1
val (deq_ptr, deq_done) = Counter(do_deq, entries)
when (do_enq =/= do_deq) { maybe_full := do_enq }
when(do_enq =/= do_deq) { maybe_full := do_enq }
val ptr_match = enq_ptr === deq_ptr
val empty = ptr_match && !maybe_full
val full = ptr_match && maybe_full
val atLeastTwo = full || enq_ptr - deq_ptr >= 2.U
val ptr_match = enq_ptr === deq_ptr
val empty = ptr_match && !maybe_full
val full = ptr_match && maybe_full
val atLeastTwo = full || enq_ptr - deq_ptr >= 2.U
do_flow := empty && io.deq.ready
val ram = Mem(entries, data)
when (do_enq) { ram.write(enq_ptr, io.enq.bits) }
when(do_enq) { ram.write(enq_ptr, io.enq.bits) }
val ren = io.deq.ready && (atLeastTwo || !io.deq.valid && !empty)
val raddr = Mux(io.deq.valid, Mux(deq_done, 0.U, deq_ptr + 1.U), deq_ptr)
val ren = io.deq.ready && (atLeastTwo || !io.deq.valid && !empty)
val raddr = Mux(io.deq.valid, Mux(deq_done, 0.U, deq_ptr + 1.U), deq_ptr)
val ram_out_valid = RegNext(ren)
io.deq.valid := Mux(empty, io.enq.valid, ram_out_valid)
io.enq.ready := !full
io.deq.bits := Mux(empty, io.enq.bits, RegEnable(ram.read(raddr), ren))
io.deq.bits := Mux(empty, io.enq.bits, RegEnable(ram.read(raddr), ren))
}
class BRAMQueue[T <: Data](val entries: Int)(data: => T) extends Module {
@ -480,23 +470,22 @@ class BRAMQueue[T <: Data](val entries: Int)(data: => T) extends Module {
val count = RegInit(0.U(log2Ceil(entries + 1).W))
when (io.enq.fire ^ io.deq.fire) {
when(io.enq.fire ^ io.deq.fire) {
count := Mux(io.enq.fire, count + 1.U, count - 1.U)
}
io.count := count
val fq = Module(new BRAMFlowQueue(entries)(data))
fq.io.enq <> io.enq
io.deq <> Queue(fq.io.deq, 1, pipe = true)
io.deq <> Queue(fq.io.deq, 1, pipe = true)
}
object BRAMQueue {
def apply[T <: Data](enq: DecoupledIO[T], entries: Int) = {
val q = Module((new BRAMQueue(entries)) { enq.bits.cloneType })
q.io.enq.valid := enq.valid // not using <> so that override is allowed
q.io.enq.bits := enq.bits
enq.ready := q.io.enq.ready
q.io.enq.bits := enq.bits
enq.ready := q.io.enq.ready
q.io.deq
}
}

View File

@ -7,7 +7,7 @@ import chisel3._
import chisel3.util._
import chisel3.experimental.DataMirror
import junctions._
import freechips.rocketchip.config.{Parameters, Field}
import freechips.rocketchip.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util.ParameterizedBundle
@ -17,19 +17,18 @@ import scala.collection.mutable
case object CtrlNastiKey extends Field[NastiParameters]
// Just NASTI, but pointing at the right key.
class WidgetMMIO(implicit p: Parameters) extends NastiIO()(p)
with HasNastiParameters
class WidgetMMIO(implicit p: Parameters) extends NastiIO()(p) with HasNastiParameters
object WidgetMMIO {
def apply()(implicit p: Parameters): WidgetMMIO = {
new WidgetMMIO()(p alterPartial ({ case NastiKey => p(CtrlNastiKey) }))
new WidgetMMIO()(p.alterPartial({ case NastiKey => p(CtrlNastiKey) }))
}
}
// All widgets must implement this interface
// NOTE: Changing ParameterizedBundle -> Bundle breaks PeekPokeWidgetIO when
// outNum = 0
class WidgetIO(implicit p: Parameters) extends ParameterizedBundle()(p){
class WidgetIO(implicit p: Parameters) extends ParameterizedBundle()(p) {
val ctrl = Flipped(WidgetMMIO())
}
abstract class Widget()(implicit p: Parameters) extends LazyModule()(p) {
@ -40,24 +39,25 @@ abstract class Widget()(implicit p: Parameters) extends LazyModule()(p) {
this.suggestName(wName)
def getWName = wName
def getWId = wId
def getWId = wId
// Returns widget-relative word address
def getCRAddr(name: String): Int = {
module.crRegistry.lookupAddress(name).getOrElse(
throw new RuntimeException(s"Could not find CR:${name} in widget: $wName"))
module.crRegistry
.lookupAddress(name)
.getOrElse(throw new RuntimeException(s"Could not find CR:${name} in widget: $wName"))
}
val customSize: Option[BigInt] = None
def memRegionSize = customSize.getOrElse(BigInt(1 << log2Up(module.numRegs * (module.ctrlWidth/8))))
def memRegionSize = customSize.getOrElse(BigInt(1 << log2Up(module.numRegs * (module.ctrlWidth / 8))))
def printCRs = module.crRegistry.printCRs
}
abstract class WidgetImp(wrapper: Widget) extends LazyModuleImp(wrapper) {
val ctrlWidth = p(CtrlNastiKey).dataBits
val ctrlWidth = p(CtrlNastiKey).dataBits
val crRegistry = new MCRFileMap(ctrlWidth / 8)
def numRegs = crRegistry.numRegs
def numRegs = crRegistry.numRegs
def io: WidgetIO
@ -72,34 +72,33 @@ abstract class WidgetImp(wrapper: Widget) extends LazyModuleImp(wrapper) {
// For outputs, direct binds the wire to the map
def attachIO(io: Record, prefix: String = ""): Unit = {
/**
* For FASED memory timing models, initalize programmable registers to defaults if provided.
* See [[midas.models.HasProgrammableRegisters]] for more detail.
/** For FASED memory timing models, initalize programmable registers to defaults if provided. See
* [[midas.models.HasProgrammableRegisters]] for more detail.
*/
def getInitValue(field: Bits, parent: Data): Option[UInt] = parent match {
case p: midas.models.HasProgrammableRegisters if p.regMap.isDefinedAt(field) =>
Some(p.regMap(field).default.U)
case _ => None
case _ => None
}
def innerAttachIO(node: Data, parent: Data, name: String): Unit = node match {
case (b: Bits) => (DataMirror.directionOf(b): @unchecked) match {
case ActualDirection.Output => attach(b, s"${name}", ReadOnly, substruct = false)
case ActualDirection.Input =>
genAndAttachReg(b, name, getInitValue(b, parent), substruct = false)
}
case (b: Bits) =>
(DataMirror.directionOf(b): @unchecked) match {
case ActualDirection.Output => attach(b, s"${name}", ReadOnly, substruct = false)
case ActualDirection.Input =>
genAndAttachReg(b, name, getInitValue(b, parent), substruct = false)
}
case (v: Vec[_]) => {
(v.zipWithIndex).foreach({ case (elm, idx) => innerAttachIO(elm, node, s"${name}_$idx")})
(v.zipWithIndex).foreach({ case (elm, idx) => innerAttachIO(elm, node, s"${name}_$idx") })
}
case (r: Record) => {
r.elements.foreach({ case (subName, elm) => innerAttachIO(elm, node, s"${name}_${subName}")})
r.elements.foreach({ case (subName, elm) => innerAttachIO(elm, node, s"${name}_${subName}") })
}
case _ => new RuntimeException("Cannot bind to this sort of node...")
case _ => new RuntimeException("Cannot bind to this sort of node...")
}
io.elements.foreach({ case (name, elm) => innerAttachIO(elm, io, s"${prefix}${name}")})
io.elements.foreach({ case (name, elm) => innerAttachIO(elm, io, s"${prefix}${name}") })
}
def attachDecoupledSink(channel: DecoupledIO[UInt], name: String, substruct: Boolean = true): Int = {
crRegistry.allocate(DecoupledSinkEntry(channel, name, substruct))
}
@ -116,102 +115,103 @@ abstract class WidgetImp(wrapper: Widget) extends LazyModuleImp(wrapper) {
}
def genAndAttachReg[T <: Data](
wire: T,
name: String,
default: Option[T] = None,
masterDriven: Boolean = true,
substruct: Boolean = true): T = {
wire: T,
name: String,
default: Option[T] = None,
masterDriven: Boolean = true,
substruct: Boolean = true,
): T = {
require(wire.getWidth <= ctrlWidth)
val reg = default match {
case None => Reg(wire.cloneType)
case None => Reg(wire.cloneType)
case Some(init) => RegInit(init)
}
if (masterDriven) wire := reg else reg := wire
attach(reg, name, substruct = substruct)
reg suggestName name
reg.suggestName(name)
reg
}
def genWOReg[T <: Data](wire: T, name: String, substruct: Boolean = true): T =
def genWOReg[T <: Data](wire: T, name: String, substruct: Boolean = true): T =
genAndAttachReg(wire, name, substruct = substruct)
def genROReg[T <: Data](wire: T, name: String, substruct: Boolean = true): T =
def genROReg[T <: Data](wire: T, name: String, substruct: Boolean = true): T =
genAndAttachReg(wire, name, masterDriven = false, substruct = substruct)
def genWORegInit[T <: Data](wire: T, name: String, default: T, substruct: Boolean = true): T =
def genWORegInit[T <: Data](wire: T, name: String, default: T, substruct: Boolean = true): T =
genAndAttachReg(wire, name, Some(default), true, substruct = substruct)
def genRORegInit[T <: Data](wire: T, name: String, default: T, substruct: Boolean = true): T =
def genRORegInit[T <: Data](wire: T, name: String, default: T, substruct: Boolean = true): T =
genAndAttachReg(wire, name, Some(default), false, substruct = substruct)
def genWideRORegInit[T <: Bits](default: T, name: String, substruct: Boolean = true): T = {
val reg = RegInit(default)
val shadowReg = Reg(default.cloneType)
val reg = RegInit(default)
val shadowReg = Reg(default.cloneType)
shadowReg.suggestName(s"${name}_mmreg")
val baseAddr = Seq.tabulate((default.getWidth + ctrlWidth - 1) / ctrlWidth)({ i =>
val msb = math.min(ctrlWidth * (i + 1) - 1, default.getWidth - 1)
val slice = shadowReg(msb, ctrlWidth * i)
attach(slice, s"${name}_$i", ReadOnly, substruct)
}).head
val baseAddr = Seq
.tabulate((default.getWidth + ctrlWidth - 1) / ctrlWidth)({ i =>
val msb = math.min(ctrlWidth * (i + 1) - 1, default.getWidth - 1)
val slice = shadowReg(msb, ctrlWidth * i)
attach(slice, s"${name}_$i", ReadOnly, substruct)
})
.head
// When a read request is made of the low order address snapshot the entire register
val latchEnable = WireInit(false.B).suggestName(s"${name}_latchEnable")
attach(latchEnable, s"${name}_latch", WriteOnly, substruct)
when (latchEnable) {
when(latchEnable) {
shadowReg := reg
}
reg
}
def genCRFile(): MCRFile = {
val crFile = Module(new MCRFile(numRegs)(p alterPartial ({ case NastiKey => p(CtrlNastiKey) })))
crFile.io.mcr := DontCare
val crFile = Module(new MCRFile(numRegs)(p.alterPartial({ case NastiKey => p(CtrlNastiKey) })))
crFile.io.mcr := DontCare
crFile.io.nasti <> io.ctrl
crRegistry.bindRegs(crFile.io.mcr)
crFile
}
/** Emits a header snippet for this widget.
* @param base
* The base address of the MMIO region allocated to the widget.
* The base address of the MMIO region allocated to the widget.
* @param memoryRegions
* A mapping of names to allocated FPGA-DRAM regions. This is one mechanism
* for establishing side-channels between two otherwise unconnected bridges or widgets.
* A mapping of names to allocated FPGA-DRAM regions. This is one mechanism for establishing side-channels between
* two otherwise unconnected bridges or widgets.
*/
def genHeader(base: BigInt, memoryRegions: Map[String, BigInt], sb: StringBuilder): Unit
/** Emits a call to the construction into the generated header.
* @param base
* The base address of the MMIO region allocated to the widget.
* The base address of the MMIO region allocated to the widget.
* @param sb
* The string builder to append to.
* The string builder to append to.
* @param bridgeDriverClassName
* Name of the bridge driver class.
* Name of the bridge driver class.
* @param bridgeDriverHeaderName
* Name of the header to get the driver from.
* Name of the header to get the driver from.
* @param args
* List of C++ literals to pass as arguments.
* List of C++ literals to pass as arguments.
* @param guard
* Name of the header guard, used to order constructor calls.
* Name of the header guard, used to order constructor calls.
* @param hasStreams
* Flag indicating that a stream engine reference should be provided.
* Flag indicating that a stream engine reference should be provided.
* @param hasLoadMem
* Flag indicating that a loadmem widget reference should be provided.
* Flag indicating that a loadmem widget reference should be provided.
* @param hasMMIOAddrMap
* Flag indicating that an address map should be provided.
* Flag indicating that an address map should be provided.
*/
def genConstructor(
base: BigInt,
sb: StringBuilder,
bridgeDriverClassName: String,
bridgeDriverHeaderName: String,
args: Seq[CPPLiteral] = Seq(),
guard: String = "GET_BRIDGE_CONSTRUCTOR",
hasStreams: Boolean = false,
hasLoadMem: Boolean = false,
hasMMIOAddrMap: Boolean = false,
base: BigInt,
sb: StringBuilder,
bridgeDriverClassName: String,
bridgeDriverHeaderName: String,
args: Seq[CPPLiteral] = Seq(),
guard: String = "GET_BRIDGE_CONSTRUCTOR",
hasStreams: Boolean = false,
hasLoadMem: Boolean = false,
hasMMIOAddrMap: Boolean = false,
): Unit = {
val mmioName = wrapper.getWName.toUpperCase.split("_").head
val regs = crRegistry.getSubstructRegs
val regs = crRegistry.getSubstructRegs
sb.append(s"""|#ifdef GET_INCLUDES
|#include "bridges/${bridgeDriverHeaderName}.h"
@ -222,7 +222,7 @@ abstract class WidgetImp(wrapper: Widget) extends LazyModuleImp(wrapper) {
// that the structure defined in C++ matches the widget registers.
if (regs.nonEmpty) {
sb.append("#ifdef GET_SUBSTRUCT_CHECKS\n")
regs.zipWithIndex foreach { case ((regName, _), i) =>
regs.zipWithIndex.foreach { case ((regName, _), i) =>
sb.append(s"static_assert(")
sb.append(s"offsetof(${mmioName}_struct, ${regName}) == ${i} * sizeof(uint64_t), ")
sb.append(s"${'\"'}invalid ${regName}${'\"'});\\\n")
@ -273,7 +273,7 @@ object Widget {
// 2) We currently rely on having fixed widget names based on the class
// name, in the simulation driver.
val widgetBasename = m.getClass.getSimpleName
val idx = widgetInstCount(widgetBasename)
val idx = widgetInstCount(widgetBasename)
widgetInstCount(widgetBasename) = idx + 1
(widgetBasename + "_" + idx, idx)
}
@ -287,15 +287,14 @@ object WidgetRegion {
}
trait HasWidgets {
private var _finalized = false
private val widgets = mutable.ArrayBuffer[Widget]()
private val name2inst = mutable.HashMap[String, Widget]()
private var _finalized = false
private val widgets = mutable.ArrayBuffer[Widget]()
private val name2inst = mutable.HashMap[String, Widget]()
private lazy val addrMap = new AddrMap({
val (_, entries) = (sortedWidgets foldLeft (BigInt(0), Seq[AddrMapEntry]())){
case ((start, es), w) =>
val name = w.getWName
val size = w.memRegionSize
(start + size, es :+ AddrMapEntry(name, WidgetRegion(start, size)))
val (_, entries) = (sortedWidgets.foldLeft(BigInt(0), Seq[AddrMapEntry]())) { case ((start, es), w) =>
val name = w.getWName
val size = w.memRegionSize
(start + size, es :+ AddrMapEntry(name, WidgetRegion(start, size)))
}
entries
})
@ -314,19 +313,23 @@ trait HasWidgets {
val lastWidgetRegion = addrMap.entries.last.region
val widgetAddressMax = lastWidgetRegion.start + lastWidgetRegion.size
require(log2Up(widgetAddressMax) <= p(CtrlNastiKey).addrBits,
require(
log2Up(widgetAddressMax) <= p(CtrlNastiKey).addrBits,
s"""| Widgets have allocated ${widgetAddressMax >> 2} MMIO Registers, requiring
| ${widgetAddressMax} bytes of addressible register space. The ctrl bus
| is configured to only have ${p(CtrlNastiKey).addrBits} bits of address,
| not the required ${log2Up(widgetAddressMax)} bits.""".stripMargin)
| not the required ${log2Up(widgetAddressMax)} bits.""".stripMargin,
)
val ctrlInterconnect = Module(new NastiRecursiveInterconnect(
nMasters = 1,
addrMap = addrMap
)(p alterPartial ({ case NastiKey => p(CtrlNastiKey) })))
val ctrlInterconnect = Module(
new NastiRecursiveInterconnect(
nMasters = 1,
addrMap = addrMap,
)(p.alterPartial({ case NastiKey => p(CtrlNastiKey) }))
)
ctrlInterconnect.io.masters(0) <> master
sortedWidgets.zip(ctrlInterconnect.io.slaves) foreach {
case (w: Widget, m) => w.module.io.ctrl <> m
sortedWidgets.zip(ctrlInterconnect.io.slaves).foreach { case (w: Widget, m) =>
w.module.io.ctrl <> m
}
}
@ -337,27 +340,25 @@ trait HasWidgets {
}
}
/**
* Iterates through each bridge, generating the header fragment. Must be
* called after bridge address assignment is complete.
/** Iterates through each bridge, generating the header fragment. Must be called after bridge address assignment is
* complete.
*/
def genWidgetHeaders(sb: StringBuilder, memoryRegions: Map[String, BigInt]): Unit = {
widgets foreach ((w: Widget) => w.module.genHeader(addrMap(w.getWName).start, memoryRegions, sb))
widgets.foreach((w: Widget) => w.module.genHeader(addrMap(w.getWName).start, memoryRegions, sb))
}
def printWidgets: Unit = {
widgets foreach ((w: Widget) => println(w.getWName))
widgets.foreach((w: Widget) => println(w.getWName))
}
def getCRAddr(wName: String, crName: String)(implicit channelWidth: Int): BigInt = {
val widget = name2inst.get(wName).getOrElse(
throw new RuntimeException("Could not find Widget: $wName"))
val widget = name2inst.get(wName).getOrElse(throw new RuntimeException("Could not find Widget: $wName"))
getCRAddr(widget, crName)
}
def getCRAddr(w: Widget, crName: String)(implicit channelWidth: Int): BigInt = {
// TODO: Deal with byte vs word addresses && don't use a name in the hash?
val base = (addrMap(w.getWName).start >> log2Up(channelWidth/8))
val base = (addrMap(w.getWName).start >> log2Up(channelWidth / 8))
base + w.getCRAddr(crName)
}
}