ARM: Added SMMU.

This commit is contained in:
Udo Steinberg 2019-10-08 19:11:06 +02:00
parent c8b5691776
commit 52cb33b982
7 changed files with 569 additions and 2 deletions

265
inc/aarch64/smmu.hpp Normal file
View File

@ -0,0 +1,265 @@
/*
* System Memory Management Unit (ARM SMMUv2)
*
* Copyright (C) 2019-2023 Udo Steinberg, BedRock Systems, Inc.
*
* This file is part of the NOVA microhypervisor.
*
* NOVA is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* NOVA is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License version 2 for more details.
*/
#pragma once
#include "list.hpp"
#include "memory.hpp"
#include "ptab_hpt.hpp"
#include "sdid.hpp"
#include "slab.hpp"
#include "spinlock.hpp"
#include "std.hpp"
#include "types.hpp"
class Space_dma;
class Smmu final : public List<Smmu>
{
private:
struct Config
{
struct Entry
{
Space_dma * dma { nullptr };
uint16_t sid { 0 };
uint16_t msk { 0 };
uint8_t ctx { 0 };
};
Entry entry[256];
[[nodiscard]] static void *operator new (size_t) noexcept
{
static_assert (sizeof (Config) == PAGE_SIZE (0));
return Buddy::alloc (0);
}
static void operator delete (void *ptr)
{
Buddy::free (ptr);
}
};
enum class Mode : unsigned
{
STREAM_MATCHING, // Stream Matching
STREAM_MATCHING_EXT, // Extended Stream Matching
STREAM_INDEXING, // Stream Indexing
STREAM_INDEXING_CMP, // Compressed Stream Indexing
};
enum class GR0_Reg32 : unsigned
{
CR0 = 0x000, // rw Configuration Register 0
CR1 = 0x004, // rw Configuration Register 1
CR2 = 0x008, // rw Configuration Register 2
ACR = 0x010, // rw Auxiliary Configuration Register
IDR0 = 0x020, // r- Identification Register 0
IDR1 = 0x024, // r- Identification Register 1
IDR2 = 0x028, // r- Identification Register 2
IDR3 = 0x02c, // r- Identification Register 3
IDR4 = 0x030, // r- Identification Register 4
IDR5 = 0x034, // r- Identification Register 5
IDR6 = 0x038, // r- Identification Register 6
IDR7 = 0x03c, // r- Identification Register 7
GFSR = 0x048, // rw Global Fault Status Register
GFSRRESTORE = 0x04c, // -w Global Fault Status Restore Register
GFSYNR0 = 0x050, // rw Global Fault Syndrom Register 0
GFSYNR1 = 0x054, // rw Global Fault Syndrom Register 1
GFSYNR2 = 0x058, // rw Global Fault Syndrom Register 2
TLBIALL = 0x060, // -w TLB Invalidate All
TLBIVMID = 0x064, // -w TLB Invalidate by VMID
TLBIALLNSNH = 0x068, // -w TLB Invalidate All, Non-Secure, Non-Hyp
TLBIALLH = 0x06c, // -w TLB Invalidate All, Hyp
TLBGSYNC = 0x070, // -w Global Synchronize TLB Invalidate
TLBGSTATUS = 0x074, // r- Global TLB Status Register
TLBIVAH = 0x078, // -w TLB Invalidate by VA, Hyp
};
enum class GR0_Reg64 : unsigned
{
GFAR = 0x040, // rw Global Fault Address Register
};
enum class GR0_Arr32 : unsigned
{
SMR = 0x800, // rw Stream Match Register
S2CR = 0xc00, // rw Stream-to-Context Register
};
enum class GR1_Arr32 : unsigned
{
CBAR = 0x000, // rw Context Bank Attribute Register
CBFRSYNRA = 0x400, // rw Context Bank Fault Restricted Syndrome Register A
CBA2R = 0x800, // rw Context Bank Attribute Register
};
enum class Ctx_Arr32 : unsigned
{
SCTLR = 0x000, // s1 s2 rw System Control Register
ACTLR = 0x004, // s1 s2 rw Auxiliary Control Register
RESUME = 0x008, // s1 s2 -w Transaction Resume Register
TCR2 = 0x010, // s1 -- rw Translation Control Register 2
TCR = 0x030, // s1 s2 rw Translation Control Register
CONTEXTIDR = 0x034, // s1 -- rw Context Identification Register
MAIR0 = 0x038, // s1 -- rw Memory Attribute Indirection Register 0
MAIR1 = 0x03c, // s1 -- rw Memory Attribute Indirection Register 1
FSR = 0x058, // s1 s2 rw Fault Status Register
FSRRESTORE = 0x05c, // s1 s2 -w Fault Status Restore Register
FSYNR0 = 0x068, // s1 s2 rw Fault Syndrome Register 0
FSYNR1 = 0x06c, // s1 s2 rw Fault Syndrome Register 1
TLBIASID = 0x610, // s1 -- -w TLB Invalidate by ASID
TLBIALL = 0x618, // s1 -- -w TLB Invalidate All
TLBSYNC = 0x7f0, // s1 s2 -w TLB Synchronize Invalidate
TLBSTATUS = 0x7f4, // s1 s2 r- TLB Status
ATSR = 0x8f0, // s1 -- r- Address Translation Status Register
};
enum class Ctx_Arr64 : unsigned
{
TTBR0 = 0x020, // s1 s2 rw Translation Table Base Register 0
TTBR1 = 0x028, // s1 -- rw Translation Table Base Register 1
PAR = 0x050, // s1 -- rw Physical Address Register
FAR = 0x060, // s1 s2 rw Fault Address Register
IPAFAR = 0x070, // -- s2 rw IPA Fault Address Register
TLBIVA = 0x600, // s1 -- -w TLB Invalidate by VA
TLBIVAA = 0x608, // s1 -- -w TLB Invalidate by VA, All ASIDs
TLBIVAL = 0x620, // s1 -- -w TLB Invalidate by VA, Last Level
TLBIVAAL = 0x628, // s1 -- -w TLB Invalidate by VA, All ASIDs, Last Level
TLBIIPAS2 = 0x630, // -- s2 -w TLB Invalidate by IPA
TLBIIPAS2L = 0x638, // -- s2 -w TLB Invalidate by IPA, Last Level
ATS1PR = 0x800, // s1 -- -w Address Translation Stage 1, Privileged Read
ATS1PW = 0x808, // s1 -- -w Address Translation Stage 1, Privileged Write
ATS1UR = 0x810, // s1 -- -w Address Translation Stage 1, Unprivileged Read
ATS1UW = 0x818, // s1 -- -w Address Translation Stage 1, Unprivileged Write
};
uintptr_t mmio_base_gr0 { 0 }; // Global Register Space 0
uintptr_t mmio_base_gr1 { 0 }; // Global Register Space 1
uintptr_t mmio_base_ctx { 0 }; // Translation Context Bank Space
unsigned page_size { 0 }; // 4KiB or 64KiB
unsigned sidx_bits { 0 }; // Stream ID Bits
uint8_t num_smg { 0 }; // Stream Mapping Groups
uint8_t num_ctx { 0 }; // Translation Context Banks
uint8_t ias { 0 }; // IAddr Size
uint8_t oas { 0 }; // OAddr Size
Mode mode { 0 }; // SMMU Mode
Config * config { nullptr }; // Configuration Table Pointer
Board::Smmu const & board; // SMMU Board Setup
Spinlock cfg_lock; // SMMU CFG Lock
Spinlock inv_lock; // SMMU INV Lock
static Slab_cache cache; // SMMU Slab Cache
static inline Smmu * list { nullptr }; // SMMU List
static inline uintptr_t mmap { MMAP_GLB_SMMU }; // SMMU Memory Map Pointer
inline auto read (GR0_Reg32 r) { return *reinterpret_cast<uint32_t volatile *>(mmio_base_gr0 + std::to_underlying (r)); }
inline auto read (GR0_Reg64 r) { return *reinterpret_cast<uint64_t volatile *>(mmio_base_gr0 + std::to_underlying (r)); }
inline auto read (unsigned smg, GR0_Arr32 r) { return *reinterpret_cast<uint32_t volatile *>(mmio_base_gr0 + smg * sizeof (uint32_t) + std::to_underlying (r)); }
inline auto read (unsigned ctx, GR1_Arr32 r) { return *reinterpret_cast<uint32_t volatile *>(mmio_base_gr1 + ctx * sizeof (uint32_t) + std::to_underlying (r)); }
inline auto read (unsigned ctx, Ctx_Arr32 r) { return *reinterpret_cast<uint32_t volatile *>(mmio_base_ctx + ctx * page_size + std::to_underlying (r)); }
inline auto read (unsigned ctx, Ctx_Arr64 r) { return *reinterpret_cast<uint64_t volatile *>(mmio_base_ctx + ctx * page_size + std::to_underlying (r)); }
inline void write (GR0_Reg32 r, uint32_t v) { *reinterpret_cast<uint32_t volatile *>(mmio_base_gr0 + std::to_underlying (r)) = v; }
inline void write (GR0_Reg64 r, uint64_t v) { *reinterpret_cast<uint64_t volatile *>(mmio_base_gr0 + std::to_underlying (r)) = v; }
inline void write (unsigned smg, GR0_Arr32 r, uint32_t v) { *reinterpret_cast<uint32_t volatile *>(mmio_base_gr0 + smg * sizeof (uint32_t) + std::to_underlying (r)) = v; }
inline void write (unsigned ctx, GR1_Arr32 r, uint32_t v) { *reinterpret_cast<uint32_t volatile *>(mmio_base_gr1 + ctx * sizeof (uint32_t) + std::to_underlying (r)) = v; }
inline void write (unsigned ctx, Ctx_Arr32 r, uint32_t v) { *reinterpret_cast<uint32_t volatile *>(mmio_base_ctx + ctx * page_size + std::to_underlying (r)) = v; }
inline void write (unsigned ctx, Ctx_Arr64 r, uint64_t v) { *reinterpret_cast<uint64_t volatile *>(mmio_base_ctx + ctx * page_size + std::to_underlying (r)) = v; }
inline bool glb_spi (unsigned spi) const
{
for (unsigned i { 0 }; i < sizeof (board.glb) / sizeof (*board.glb); i++)
if (board.glb[i].flg && board.glb[i].spi == spi)
return true;
return false;
}
inline bool ctx_spi (unsigned spi) const
{
for (unsigned i { 0 }; i < sizeof (board.ctx) / sizeof (*board.ctx); i++)
if (board.ctx[i].flg && board.ctx[i].spi == spi)
return true;
return false;
}
void init();
void fault();
void tlb_invalidate (unsigned, uint64_t);
void tlb_invalidate (Sdid);
void tlb_sync_ctx (unsigned);
void tlb_sync_glb();
public:
explicit Smmu (Board::Smmu const &);
bool conf_smg (uint8_t);
bool configure (Space_dma *, uintptr_t);
// FIXME: Reports first SMMU only
static inline uint8_t avail_smg() { return list ? list->num_smg : 0; }
static inline uint8_t avail_ctx() { return list ? list->num_ctx : 0; }
static inline void initialize()
{
for (auto smmu { list }; smmu; smmu = smmu->next)
smmu->init();
}
static inline void tlb_invalidate_all (Sdid s)
{
for (auto smmu { list }; smmu; smmu = smmu->next)
smmu->tlb_invalidate (s);
}
static inline Smmu *lookup (Hpt::OAddr p)
{
for (auto smmu { list }; smmu; smmu = smmu->next)
if (smmu->board.mmio == p)
return smmu;
return nullptr;
}
static inline bool using_spi (unsigned spi)
{
for (auto smmu { list }; smmu; smmu = smmu->next)
if (smmu->glb_spi (spi) || smmu->ctx_spi (spi))
return true;
return false;
}
static inline void interrupt (unsigned spi)
{
for (auto smmu { list }; smmu; smmu = smmu->next)
if (smmu->glb_spi (spi) || smmu->ctx_spi (spi))
smmu->fault();
}
[[nodiscard]]
static inline void *operator new (size_t) noexcept
{
return cache.alloc();
}
};

View File

@ -18,6 +18,7 @@
#pragma once
#include "ptab_dpt.hpp"
#include "smmu.hpp"
#include "space_mem.hpp"
class Space_dma final : public Space_mem<Space_dma>
@ -75,7 +76,7 @@ class Space_dma final : public Space_mem<Space_dma>
auto update (uint64_t v, uint64_t p, unsigned o, Paging::Permissions pm, Memattr ma) { return dptp.update (v, p, o, pm, ma); }
void sync() {}
void sync() { Smmu::tlb_invalidate_all (sdid); }
auto get_sdid() const { return sdid; }
};

View File

@ -19,6 +19,7 @@
#include "cpu.hpp"
#include "interrupt.hpp"
#include "lowlevel.hpp"
#include "smmu.hpp"
extern "C" [[noreturn]]
void bootstrap (cpu_t c, unsigned e)
@ -28,6 +29,10 @@ void bootstrap (cpu_t c, unsigned e)
// Once initialized, each core can handle its assigned interrupts
Interrupt::init();
// Before cores leave the barrier into userland, the SMMU must be active
if (Cpu::bsp)
Smmu::initialize();
// Barrier: wait for all CPUs to arrive here
for (Cpu::online++; Cpu::online != Cpu::count; pause()) ;

View File

@ -16,7 +16,14 @@
*/
#include "hip_arch.hpp"
#include "smmu.hpp"
#include "stdio.hpp"
void Hip_arch::build()
{
num_smg = Smmu::avail_smg();
num_ctx = Smmu::avail_ctx();
trace (TRACE_ROOT, "INFO: SMG#: %3u", num_smg);
trace (TRACE_ROOT, "INFO: CTX#: %3u", num_ctx);
}

View File

@ -22,6 +22,7 @@
#include "extern.hpp"
#include "fdt.hpp"
#include "ptab_hpt.hpp"
#include "smmu.hpp"
extern "C" Hpt::OAddr kern_ptab_setup (unsigned cpu)
{
@ -76,5 +77,11 @@ extern "C" unsigned init()
Acpi::init() || Fdt::init();
// If SMMUs were not enumerated by firmware, then enumerate them based on board knowledge
if (!Smmu::avail_smg() && !Smmu::avail_ctx())
for (unsigned i = 0; i < sizeof (Board::smmu) / sizeof (*Board::smmu); i++)
if (Board::smmu[i].mmio)
new Smmu (Board::smmu[i]);
return Cpu::boot_cpu;
}

View File

@ -23,10 +23,10 @@
#include "gicr.hpp"
#include "hazard.hpp"
#include "interrupt.hpp"
#include "smmu.hpp"
#include "stdio.hpp"
#include "timeout.hpp"
#include "timer.hpp"
#include "util.hpp"
Interrupt Interrupt::int_table[NUM_SPI];
@ -90,6 +90,13 @@ Event::Selector Interrupt::handle_spi (uint32_t val, bool)
Gicc::eoi (val);
if (true) {
Smmu::interrupt (spi);
Gicc::dir (val);
}
return Event::Selector::NONE;
}
@ -172,6 +179,10 @@ void Interrupt::init()
for (unsigned spi { 0 }; spi < num_pin(); spi++) {
// Don't touch SMMU interrupts
if (Smmu::using_spi (spi))
continue;
Config cfg { int_table[spi].config };
// Interrupt is not assigned to this CPU

271
src/aarch64/smmu.cpp Normal file
View File

@ -0,0 +1,271 @@
/*
* System Memory Management Unit (ARM SMMUv2)
*
* Copyright (C) 2019-2023 Udo Steinberg, BedRock Systems, Inc.
*
* This file is part of the NOVA microhypervisor.
*
* NOVA is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* NOVA is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License version 2 for more details.
*/
#include "bits.hpp"
#include "hip.hpp"
#include "interrupt.hpp"
#include "lock_guard.hpp"
#include "lowlevel.hpp"
#include "smmu.hpp"
#include "space_dma.hpp"
#include "space_hst.hpp"
#include "stdio.hpp"
INIT_PRIORITY (PRIO_SLAB) Slab_cache Smmu::cache { sizeof (Smmu), alignof (Smmu) };
Smmu::Smmu (Board::Smmu const &brd) : List (list), board (brd)
{
// Map first SMMU page
Hptp::master_map (mmap, board.mmio, 0,
Paging::Permissions (Paging::G | Paging::W | Paging::R), Memattr::dev());
// This facilitates access to the GR0 register space only
mmio_base_gr0 = mmap;
auto const idr0 { read (GR0_Reg32::IDR0) };
auto const idr1 { read (GR0_Reg32::IDR1) };
auto const idr2 { read (GR0_Reg32::IDR2) };
auto const idr7 { read (GR0_Reg32::IDR7) };
// Determine SMMU capabilities
mode = idr0 & BIT (27) ? Mode::STREAM_MATCHING : Mode::STREAM_INDEXING;
sidx_bits = idr0 & BIT (8) ? 16 : idr0 >> 9 & BIT_RANGE (3, 0);
page_size = idr1 & BIT (31) ? BIT (16) : BIT (12);
num_smg = BIT_RANGE (7, 0) & idr0;
num_ctx = BIT_RANGE (7, 0) & idr1;
ias = BIT_RANGE (3, 0) & idr2;
oas = BIT_RANGE (3, 0) & idr2 >> 4;
// Treat DPT as noncoherent if at least one SMMU requires it
Dpt::noncoherent |= !(idr0 & BIT (14));
// Determine total size of the SMMU
auto const smmu_pnum { BIT ((idr1 >> 28 & BIT_RANGE (2, 0)) + 1) };
auto const smmu_size { page_size * smmu_pnum * 2 };
// Map all SMMU pages
Hptp::master_map (mmap, board.mmio, static_cast<unsigned>(bit_scan_reverse (smmu_size)) - PAGE_BITS,
Paging::Permissions (Paging::G | Paging::W | Paging::R), Memattr::dev());
// This facilitates access to the GR1 and CTX register spaces
mmio_base_gr1 = mmio_base_gr0 + page_size;
mmio_base_ctx = mmio_base_gr0 + page_size * smmu_pnum;
// Allocate configuration table
config = new Config;
trace (TRACE_SMMU, "SMMU: %#010lx %#x r%up%u S1:%u S2:%u N:%u C:%u SMG:%u CTX:%u SID:%u-bit Mode:%u",
board.mmio, smmu_size, idr7 >> 4 & BIT_RANGE (3, 0), idr7 & BIT_RANGE (3, 0),
!!(idr0 & BIT (30)), !!(idr0 & BIT (29)), !!(idr0 & BIT (28)), !!(idr0 & BIT (14)),
num_smg, num_ctx, sidx_bits, std::to_underlying (mode));
// Reserve MMIO region
Space_hst::user_access (board.mmio, smmu_size, false);
// Advance memory map pointer
mmap += smmu_size;
Hip::set_feature (Hip_arch::Feature::SMMU);
}
void Smmu::init()
{
// Configure global fault interrupts
for (unsigned i { 0 }; i < sizeof (board.glb) / sizeof (*board.glb); i++)
if (board.glb[i].flg)
Interrupt::conf_spi (board.glb[i].spi, false, board.glb[i].flg & BIT_RANGE (3, 2), Cpu::id);
// Configure context fault interrupts
for (unsigned i { 0 }; i < sizeof (board.ctx) / sizeof (*board.ctx); i++)
if (board.ctx[i].flg)
Interrupt::conf_spi (board.ctx[i].spi, false, board.ctx[i].flg & BIT_RANGE (3, 2), Cpu::id);
// Configure CTXs
for (uint8_t ctx { 0 }; ctx < num_ctx; ctx++)
write (ctx, GR1_Arr32::CBAR, BIT (17)); // Generate "invalid context" fault
// Configure SMGs
for (uint8_t smg { 0 }; smg < num_smg; smg++)
if (!conf_smg (smg))
write (smg, GR0_Arr32::S2CR, BIT (17)); // Generate "invalid context" fault
write (GR0_Reg32::CR0, BIT (21) | BIT_RANGE (12, 11) | BIT (10) | BIT_RANGE (5, 4) | BIT_RANGE (2, 1));
}
bool Smmu::conf_smg (uint8_t smg)
{
// Obtain SMG configuration
auto const dma { config->entry[smg].dma };
auto const sid { config->entry[smg].sid };
auto const msk { config->entry[smg].msk };
auto const ctx { config->entry[smg].ctx };
if (!dma)
return false;
auto const sdid { dma->get_sdid() };
// Disable CTX during configuration
write (ctx, Ctx_Arr32::SCTLR, 0);
// Invalidate stale TLB entries for SDID
tlb_invalidate (sdid);
// Configure CTX as VA64 stage-2
write (ctx, GR1_Arr32::CBA2R, BIT (0));
write (ctx, GR1_Arr32::CBAR, sdid & BIT_RANGE (7, 0));
// Determine input size and number of levels
auto const isz { Dpt::pas (ias) };
auto const lev { Dpt::lev (isz) };
// Configure and enable CTX
write (ctx, Ctx_Arr32::TCR, oas << 16 | TCR_TG0_4K | TCR_SH0_INNER | TCR_ORGN0_WB_RW | TCR_IRGN0_WB_RW | (lev - 2) << 6 | (64 - isz));
write (ctx, Ctx_Arr64::TTBR0, Kmem::ptr_to_phys (dma->get_ptab (lev - 1)));
write (ctx, Ctx_Arr32::SCTLR, BIT_RANGE (6, 5) | BIT (0));
// Disable SMG during configuration
write (smg, GR0_Arr32::SMR, 0);
// Configure and enable SMG
write (smg, GR0_Arr32::S2CR, BIT (27) | ctx);
write (smg, GR0_Arr32::SMR, BIT (31) | msk << 16 | sid);
return true;
}
bool Smmu::configure (Space_dma *dma, uintptr_t dad)
{
auto const sid { static_cast<uint16_t>(dad) };
auto const msk { static_cast<uint16_t>(dad >> 16) };
auto smg { static_cast<uint8_t> (dad >> 32) };
auto const ctx { static_cast<uint8_t> (dad >> 40) };
// When using stream indexing, the maximum SID size is 7 bits and selects the SMG directly
if (mode == Mode::STREAM_INDEXING)
smg = static_cast<uint8_t>(sid);
if (!config || (sid | msk) >= BIT (sidx_bits) || smg >= num_smg || ctx >= num_ctx)
return false;
trace (TRACE_SMMU, "SMMU: SID:%#06x MSK:%#06x SMG:%#04x CTX:%#04x assigned to Domain %u", sid, msk, smg, ctx, static_cast<unsigned>(dma->get_sdid()));
Lock_guard <Spinlock> guard { cfg_lock };
// Remember SMG configuration for suspend/resume
config->entry[smg].dma = dma;
config->entry[smg].sid = sid;
config->entry[smg].msk = msk;
config->entry[smg].ctx = ctx;
return conf_smg (smg);
}
void Smmu::fault()
{
auto const gfsr { read (GR0_Reg32::GFSR) };
if (gfsr & BIT_RANGE (8, 0)) {
auto const syn { read (GR0_Reg32::GFSYNR0) };
trace (TRACE_SMMU, "SMMU: GLB Fault (M:%u UUT:%u P:%u E:%u CA:%u UCI:%u UCB:%u SMC:%u US:%u IC:%u) at %#010lx (%c%c%c) SID:%#x",
!!(gfsr & BIT (31)), !!(gfsr & BIT (8)), !!(gfsr & BIT (7)), !!(gfsr & BIT (6)), !!(gfsr & BIT (5)),
!!(gfsr & BIT (4)), !!(gfsr & BIT (3)), !!(gfsr & BIT (2)), !!(gfsr & BIT (1)), !!(gfsr & BIT (0)),
read (GR0_Reg64::GFAR),
syn & BIT (3) ? 'I' : 'D', // Instruction / Data
syn & BIT (2) ? 'P' : 'U', // Privileged / Unprivileged
syn & BIT (1) ? 'W' : 'R', // Write / Read
read (GR0_Reg32::GFSYNR1) & BIT_RANGE (15, 0));
write (GR0_Reg32::GFSR, gfsr);
}
for (unsigned ctx { 0 }; ctx < num_ctx; ctx++) {
auto const fsr { read (ctx, Ctx_Arr32::FSR) };
if (fsr & BIT_RANGE (8, 1)) {
auto const syn { read (ctx, Ctx_Arr32::FSYNR0) };
trace (TRACE_SMMU, "SMMU: C%02u Fault (M:%u SS:%u UUT:%u AS:%u LK:%u MC:%u E:%u P:%u A:%u T:%u) at %#010lx (%c%c%c) LVL:%u",
ctx, !!(fsr & BIT (31)), !!(fsr & BIT (30)),
!!(fsr & BIT (8)), !!(fsr & BIT (7)), !!(fsr & BIT (6)), !!(fsr & BIT (5)),
!!(fsr & BIT (4)), !!(fsr & BIT (3)), !!(fsr & BIT (2)), !!(fsr & BIT (1)),
read (ctx, Ctx_Arr64::FAR),
syn & BIT (6) ? 'I' : 'D', // Instruction / Data
syn & BIT (5) ? 'P' : 'U', // Privileged / Unprivileged
syn & BIT (4) ? 'W' : 'R', // Write / Read
syn & BIT_RANGE (1, 0));
write (ctx, Ctx_Arr32::FSR, fsr);
}
}
}
/*
* TLB Invalidate by IPA
*/
void Smmu::tlb_invalidate (unsigned ctx, uint64_t ipa)
{
// Post TLB maintenance operation
write (ctx, Ctx_Arr64::TLBIIPAS2, ipa >> 12);
// Ensure completion
tlb_sync_ctx (ctx);
}
/*
* TLB Invalidate by VMID
*/
void Smmu::tlb_invalidate (Sdid vmid)
{
// Post TLB maintenance operation
write (GR0_Reg32::TLBIVMID, vmid & BIT_RANGE (15, 0));
// Ensure completion
tlb_sync_glb();
}
/*
* Ensure completion of one or more posted TLB invalidate operations
* accepted in the specified translation context bank only.
*/
void Smmu::tlb_sync_ctx (unsigned ctx)
{
Lock_guard <Spinlock> guard { inv_lock };
write (ctx, Ctx_Arr32::TLBSYNC, 0);
while (read (ctx, Ctx_Arr32::TLBSTATUS) & BIT (0))
pause();
}
/*
* Ensure completion of one or more posted TLB invalidate operations
* accepted in the global address space or in any translation context bank.
*/
void Smmu::tlb_sync_glb()
{
Lock_guard <Spinlock> guard { inv_lock };
write (GR0_Reg32::TLBGSYNC, 0);
while (read (GR0_Reg32::TLBGSTATUS) & BIT (0))
pause();
}