insn.spec: implement some interrupts (program,alignment,system call).

main
Jean-François Nguyen 2 years ago
parent 2e29794b7d
commit d3546e4362

@ -133,8 +133,7 @@ class MicrowattWrapper(Elaboratable):
end architecture behave;
""")

def __init__(self, **kwargs):
self.pfv = pfv.Interface(mem_aligned=False)
self.pfv = pfv.Interface(mem_aligned=False, illegal_insn_heai=False)
self.wb_insn = wishbone.Interface(addr_width=29, data_width=64, granularity=8,
features=("stall",))
self.wb_data = wishbone.Interface(addr_width=29, data_width=64, granularity=8,
@ -256,6 +255,14 @@ class MicrowattWrapper(Elaboratable):
Assume(~dmi.req),
Assume(~terminated),
]
with m.If(self.pfv.stb):
m.d.comb += [
# no decrementer interrupts
Assume(self.pfv.msr.w_mask.ee.implies(~self.pfv.msr.w_data.ee)),
# no trace interrupts
Assume(self.pfv.msr.w_mask.te[0].implies(~self.pfv.msr.w_data.te[0])),
Assume(self.pfv.msr.w_mask.te[1].implies(~self.pfv.msr.w_data.te[1])),
]

return m


@ -62,10 +62,8 @@ class InsnTestbench(Elaboratable):
Assert(dut.pfv.intr == spec.pfv.intr),
]

with m.If(t_post.zero & ~spec.pfv.intr):
m.d.comb += [
Assert(dut.pfv.nia == spec.pfv.nia),
]
with m.If(t_post.zero):
m.d.comb += Assert(dut.pfv.nia == spec.pfv.nia)

m.submodules.ra = ra = _GPRFileTest(self.check, port="ra")
m.submodules.rb = rb = _GPRFileTest(self.check, port="rb")
@ -79,7 +77,7 @@ class InsnTestbench(Elaboratable):
spec.pfv.rt.r_data.eq(dut.pfv.rt.r_data),
]

with m.If(t_post.zero & ~spec.pfv.intr):
with m.If(t_post.zero):
m.d.comb += [
Assert(ra.valid.all()),
Assert(rb.valid.all()),
@ -91,7 +89,7 @@ class InsnTestbench(Elaboratable):

m.d.comb += spec.pfv.mem.r_data.eq(dut.pfv.mem.r_data)

with m.If(t_post.zero & ~spec.pfv.intr):
with m.If(t_post.zero):
m.d.comb += Assert(mem.valid.all())

m.submodules.cr = cr = _SysRegTest(self.check, reg="cr" )
@ -114,7 +112,7 @@ class InsnTestbench(Elaboratable):
spec.pfv.srr1.r_data.eq(dut.pfv.srr1.r_data),
]

with m.If(t_post.zero & ~spec.pfv.intr):
with m.If(t_post.zero):
m.d.comb += [
Assert(cr .valid.all()),
Assert(msr .valid.all()),

@ -2,9 +2,10 @@ from amaranth import *

from power_fv import pfv
from power_fv.insn.const import *
from power_fv.intr import *

from . import InsnSpec
from .utils import iea
from .utils import iea, msr_to_srr1


__all__ = ["BranchSpec"]
@ -20,7 +21,9 @@ class BranchSpec(InsnSpec, Elaboratable):
self.pfv.msr.r_mask.sf.eq(1),
]

# Raise an interrupt if the BO field is invalid.
# Raise an interrupt if BO is invalid

illegal_insn = Record([("bo", 1)])

if isinstance(self.insn, (
BC , BCA , BCL , BCLA ,
@ -42,122 +45,145 @@ class BranchSpec(InsnSpec, Elaboratable):
"1-00-",
"1-01-",
]
m.d.comb += self.pfv.intr.eq(~self.insn.BO.matches(*bo_valid_patterns))

else:
m.d.comb += self.pfv.intr.eq(0)

# Is this branch taken ?
m.d.comb += illegal_insn.bo.eq(~self.insn.BO.matches(*bo_valid_patterns))

taken = Signal()
with m.If(illegal_insn.any()):
if self.pfv.illegal_insn_heai:
raise NotImplementedError

cond_bit = Signal()
cond_ok = Signal()
ctr_any = Signal()
ctr_ok = Signal()
m.d.comb += [
self.pfv.intr.eq(1),
self.pfv.nia .eq(INTR_PROGRAM.vector_addr),
INTR_PROGRAM.write_msr(self.pfv.msr),

self.pfv.srr0.w_mask.eq(Repl(1, 64)),
self.pfv.srr0.w_data.eq(iea(self.pfv.cia, self.pfv.msr.r_data.sf)),

self.pfv.srr1.w_mask[63-36:64-33].eq(Repl(1, 4)),
self.pfv.srr1.w_data[63-36:64-33].eq(0),
self.pfv.srr1.w_mask[63-42].eq(1),
self.pfv.srr1.w_data[63-42].eq(0),
self.pfv.srr1.w_mask[63-46:64-43].eq(Repl(1, 4)),
self.pfv.srr1.w_data[63-46:64-43].eq(0b0100), # Illegal Instruction type (deprecated)
self.pfv.srr1.w_mask[63-47].eq(1),
self.pfv.srr1.w_data[63-47].eq(0),

msr_to_srr1(self.pfv.msr, self.pfv.srr1, 0, 32),
msr_to_srr1(self.pfv.msr, self.pfv.srr1, 37, 41),
msr_to_srr1(self.pfv.msr, self.pfv.srr1, 48, 63),
]

if isinstance(self.insn, (B, BA, BL, BLA)):
m.d.comb += taken.eq(1)
with m.Else():
taken = Signal()
cond_bit = Signal()
cond_ok = Signal()
ctr_any = Signal()
ctr_ok = Signal()

elif isinstance(self.insn, (
BC , BCA , BCL , BCLA ,
BCLR , BCLRL , BCTAR, BCTARL,
BCCTR, BCCTRL,
)):
# Is this branch taken ?

# If BO(0) = 0, test CR(BI)
with m.If(self.insn.BO[4 - 0]):
m.d.comb += cond_ok.eq(1)
with m.Else():
m.d.comb += [
self.pfv.cr.r_mask[::-1].bit_select(self.insn.BI, width=1).eq(1),
if isinstance(self.insn, (B, BA, BL, BLA)):
m.d.comb += taken.eq(1)

cond_bit.eq(self.pfv.cr.r_data[::-1].bit_select(self.insn.BI, width=1)),
cond_ok .eq(cond_bit == self.insn.BO[4 - 1]),
]
elif isinstance(self.insn, (
BC , BCA , BCL , BCLA ,
BCLR , BCLRL , BCTAR, BCTARL,
BCCTR, BCCTRL,
)):

if isinstance(self.insn, (BCCTR, BCCTRL)):
m.d.comb += taken.eq(cond_ok)
else:
# If BO(2) = 0, decrement CTR then test its value.
with m.If(self.insn.BO[4 - 2]):
m.d.comb += ctr_ok.eq(1)
with m.If(self.insn.BO[4 - 0]):
m.d.comb += cond_ok.eq(1)
with m.Else():
# BO(0) = 0, test CR(BI)
m.d.comb += self.pfv.cr.r_mask.bit_select(31-self.insn.BI, width=1).eq(1)
m.d.comb += [
self.pfv.ctr.r_mask.eq(Repl(1, 64)),
self.pfv.ctr.w_mask.eq(Repl(1, 64)),
self.pfv.ctr.w_data.eq(self.pfv.ctr.r_data - 1),

ctr_any.eq(iea(self.pfv.ctr.w_data, self.pfv.msr.r_data.sf).any()),
ctr_ok .eq(ctr_any ^ self.insn.BO[4 - 3]),
cond_bit.eq(self.pfv.cr.r_data.bit_select(31-self.insn.BI, width=1)),
cond_ok .eq(cond_bit == self.insn.BO[4 - 1]),
]
m.d.comb += taken.eq(cond_ok & ctr_ok)

else:
assert False
if isinstance(self.insn, (BCCTR, BCCTRL)):
m.d.comb += taken.eq(cond_ok)
else:
with m.If(self.insn.BO[4 - 2]):
m.d.comb += ctr_ok.eq(1)
with m.Else():
# BO(2) = 0, decrement CTR then test its value.
m.d.comb += [
self.pfv.ctr.r_mask.eq(Repl(1, 64)),
self.pfv.ctr.w_mask.eq(Repl(1, 64)),
self.pfv.ctr.w_data.eq(self.pfv.ctr.r_data - 1),
]
m.d.comb += [
ctr_any.eq(iea(self.pfv.ctr.w_data, self.pfv.msr.r_data.sf).any()),
ctr_ok .eq(ctr_any ^ self.insn.BO[4 - 3]),
]
m.d.comb += taken.eq(cond_ok & ctr_ok)

# Compute the target address
else:
assert False

target = Signal(unsigned(64))
base = Signal(unsigned(64))
offset = Signal( signed(62))
# Compute the target address

# base : CIA if AA=0, 0 otherwise
target = Signal(unsigned(64))
base = Signal(unsigned(64))
offset = Signal( signed(62))

if isinstance(self.insn, (B, BL, BC, BCL)):
m.d.comb += base.eq(self.pfv.cia)
elif isinstance(self.insn, (
BA , BLA , BCA , BCLA ,
BCLR, BCLRL, BCCTR, BCCTRL, BCTAR, BCTARL,
)):
m.d.comb += base.eq(0)
else:
assert False
# base : CIA if AA=0, 0 otherwise

# offset : LI or BD or LR>>2 or CTR>>2 or TAR>>2
if isinstance(self.insn, (B, BL, BC, BCL)):
m.d.comb += base.eq(self.pfv.cia)
elif isinstance(self.insn, (
BA , BLA , BCA , BCLA ,
BCLR, BCLRL, BCCTR, BCCTRL, BCTAR, BCTARL,
)):
m.d.comb += base.eq(0)
else:
assert False

if isinstance(self.insn, (B, BA, BL, BLA)):
m.d.comb += offset.eq(self.insn.LI)
elif isinstance(self.insn, (BC, BCA, BCL, BCLA)):
m.d.comb += offset.eq(self.insn.BD)
elif isinstance(self.insn, (BCLR, BCLRL)):
m.d.comb += [
self.pfv.lr.r_mask[2:].eq(Repl(1, 62)),
offset.eq(self.pfv.lr.r_data[2:]),
]
elif isinstance(self.insn, (BCCTR, BCCTRL)):
m.d.comb += [
self.pfv.ctr.r_mask[2:].eq(Repl(1, 62)),
offset.eq(self.pfv.ctr.r_data[2:]),
]
elif isinstance(self.insn, (BCTAR, BCTARL)):
m.d.comb += [
self.pfv.tar.r_mask[2:].eq(Repl(1, 62)),
offset.eq(self.pfv.tar.r_data[2:]),
]
else:
assert False
# offset : LI or BD or LR>>2 or CTR>>2 or TAR>>2

if isinstance(self.insn, (B, BA, BL, BLA)):
m.d.comb += offset.eq(self.insn.LI)
elif isinstance(self.insn, (BC, BCA, BCL, BCLA)):
m.d.comb += offset.eq(self.insn.BD)
elif isinstance(self.insn, (BCLR, BCLRL)):
m.d.comb += [
self.pfv.lr.r_mask[2:].eq(Repl(1, 62)),
offset.eq(self.pfv.lr.r_data[2:]),
]
elif isinstance(self.insn, (BCCTR, BCCTRL)):
m.d.comb += [
self.pfv.ctr.r_mask[2:].eq(Repl(1, 62)),
offset.eq(self.pfv.ctr.r_data[2:]),
]
elif isinstance(self.insn, (BCTAR, BCTARL)):
m.d.comb += [
self.pfv.tar.r_mask[2:].eq(Repl(1, 62)),
offset.eq(self.pfv.tar.r_data[2:]),
]
else:
assert False

# target : base + offset<<2
# target : base + offset<<2

m.d.comb += target.eq(base + Cat(Const(0, 2), offset))
m.d.comb += target.eq(base + Cat(Const(0, 2), offset))

# Update NIA
# Update NIA

with m.If(taken):
m.d.comb += self.pfv.nia.eq(iea(target, self.pfv.msr.r_data.sf))
with m.Else():
m.d.comb += self.pfv.nia.eq(iea(self.pfv.cia + 4, self.pfv.msr.r_data.sf))
with m.If(taken):
m.d.comb += self.pfv.nia.eq(iea(target, self.pfv.msr.r_data.sf))
with m.Else():
m.d.comb += self.pfv.nia.eq(iea(self.pfv.cia + 4, self.pfv.msr.r_data.sf))

# Write the return address to LR if LK=1
# Write the return address to LR if LK=1

if isinstance(self.insn, (
BL , BLA , BCL , BCLA,
BCLRL, BCCTRL, BCTARL,
)):
m.d.comb += [
self.pfv.lr.w_mask.eq(Repl(1, 64)),
self.pfv.lr.w_data.eq(iea(self.pfv.cia + 4, self.pfv.msr.r_data.sf)),
]
if isinstance(self.insn, (
BL , BLA , BCL , BCLA,
BCLRL, BCCTRL, BCTARL,
)):
m.d.comb += [
self.pfv.lr.w_mask.eq(Repl(1, 64)),
self.pfv.lr.w_data.eq(iea(self.pfv.cia + 4, self.pfv.msr.r_data.sf)),
]

return m

@ -3,9 +3,10 @@ from amaranth.utils import log2_int

from power_fv import pfv
from power_fv.insn.const import *
from power_fv.intr import *

from . import InsnSpec
from .utils import iea, byte_reversed
from .utils import iea, byte_reversed, msr_to_srr1


__all__ = ["LoadStoreSpec"]
@ -18,250 +19,296 @@ class LoadStoreSpec(InsnSpec, Elaboratable):
m.d.comb += [
self.pfv.stb .eq(1),
self.pfv.insn.eq(Cat(Const(0, 32), self.insn.as_value())),
self.pfv.nia .eq(iea(self.pfv.cia + 4, self.pfv.msr.r_data.sf)),
self.pfv.msr.r_mask.sf.eq(1),
]

# EA (effective address) = ea_base + ea_offset
# Raise an interrupt if RA is invalid

ea = Signal(64)
ea_base = Signal(64)
ea_offset = Signal(64)

# ea_base : (RA|0) or (RA)

m.d.comb += self.pfv.ra.index.eq(self.insn.RA)
illegal_insn = Record([
("ra_zero", 1),
("ra_rt" , 1),
])

if isinstance(self.insn, (
LBZ, LBZX, LHZ, LHZX, LHA, LHAX, LWZ, LWZX,
STB, STBX, STH, STHX, STW, STWX,
LWBRX, STHBRX, STWBRX
)):
m.d.comb += [
self.pfv.ra.r_stb.eq(self.insn.RA != 0),
ea_base.eq(Mux(self.insn.RA != 0, self.pfv.ra.r_data, 0)),
]
elif isinstance(self.insn, (
LBZU, LBZUX, LHZU, LHZUX, LHAU, LHAUX, LWZU, LWZUX,
STBU, STBUX, STHU, STHUX, STWU, STWUX
STBU, STBUX, STHU, STHUX, STWU, STWUX,
)):
m.d.comb += [
self.pfv.ra.r_stb.eq(1),
ea_base.eq(self.pfv.ra.r_data),
]
else:
assert False

# ea_offset : EXTS(D) or (RB)

m.d.comb += illegal_insn.ra_zero.eq(self.insn.RA == 0)
if isinstance(self.insn, (
LBZ, LBZU, LHZ, LHZU, LHA, LHAU, LWZ, LWZU,
STB, STBU, STH, STHU, STW, STWU,
)):
m.d.comb += ea_offset.eq(self.insn.D.as_signed())
elif isinstance(self.insn, (
LBZX, LBZUX, LHZX, LHZUX, LHAX, LHAUX, LWZX, LWZUX,
STBX, STBUX, STHX, STHUX, STWX, STWUX,
LWBRX, STHBRX, STWBRX,
LBZU, LBZUX, LHZU, LHZUX, LHAU, LHAUX, LWZU, LWZUX,
)):
m.d.comb += [
self.pfv.rb.index.eq(self.insn.RB),
self.pfv.rb.r_stb.eq(1),
ea_offset.eq(self.pfv.rb.r_data)
]
else:
assert False

m.d.comb += ea.eq(iea(ea_base + ea_offset, self.pfv.msr.r_data.sf))

# If `pfv.mem_aligned` is set, `pfv.mem.addr` points to the dword containing EA.
# If `pfv.mem_aligned` is unset, `pfv.mem.addr` is equal to EA.

byte_offset = Signal(3)
half_offset = Signal(2)
word_offset = Signal(1)
m.d.comb += illegal_insn.ra_rt.eq(self.insn.RA == self.insn.RT)

m.d.comb += self.pfv.mem.addr[3:].eq(ea[3:])
with m.If(illegal_insn.any()):
if self.pfv.illegal_insn_heai:
raise NotImplementedError

if self.pfv.mem_aligned:
m.d.comb += [
self.pfv.mem.addr[:3].eq(0),
byte_offset.eq(ea[:3]),
]
else:
m.d.comb += [
self.pfv.mem.addr[:3].eq(ea[:3]),
byte_offset.eq(0),
self.pfv.intr.eq(1),
self.pfv.nia .eq(INTR_PROGRAM.vector_addr),
INTR_PROGRAM.write_msr(self.pfv.msr),

self.pfv.srr0.w_mask.eq(Repl(1, 64)),
self.pfv.srr0.w_data.eq(iea(self.pfv.cia, self.pfv.msr.r_data.sf)),

self.pfv.srr1.w_mask[63-36:64-33].eq(Repl(1, 4)),
self.pfv.srr1.w_data[63-36:64-33].eq(0),
self.pfv.srr1.w_mask[63-42].eq(1),
self.pfv.srr1.w_data[63-42].eq(0),
self.pfv.srr1.w_mask[63-46:64-43].eq(Repl(1, 4)),
self.pfv.srr1.w_data[63-46:64-43].eq(0b0100), # Illegal Instruction type (deprecated)
self.pfv.srr1.w_mask[63-47].eq(1),
self.pfv.srr1.w_data[63-47].eq(0),

msr_to_srr1(self.pfv.msr, self.pfv.srr1, 0, 32),
msr_to_srr1(self.pfv.msr, self.pfv.srr1, 37, 41),
msr_to_srr1(self.pfv.msr, self.pfv.srr1, 48, 63),
]

m.d.comb += [
half_offset.eq(byte_offset[1:]),
word_offset.eq(byte_offset[2:]),
]
with m.Else():
# EA (effective address) = ea_base + ea_offset

msr_le = self.pfv.msr.r_data.le
m.d.comb += self.pfv.msr.r_mask.le.eq(1)
ea = Signal(64)
ea_base = Signal(64)
ea_offset = Signal(64)

# Load: read from memory, then write the result to RT.
# ea_base : (RA|0) or (RA)

if isinstance(self.insn, (
LBZ, LBZX, LBZU, LBZUX,
LHZ, LHZX, LHZU, LHZUX,
LHA, LHAX, LHAU, LHAUX,
LWZ, LWZX, LWZU, LWZUX,
LWBRX,
)):
load_byte = Signal( 8)
load_half = Signal(16)
load_word = Signal(32)
load_result = Signal(64)

m.d.comb += [
load_byte.eq(self.pfv.mem.r_data.word_select(byte_offset, width= 8)),
load_half.eq(self.pfv.mem.r_data.word_select(half_offset, width=16)),
load_word.eq(self.pfv.mem.r_data.word_select(word_offset, width=32)),
]
m.d.comb += self.pfv.ra.index.eq(self.insn.RA)

if isinstance(self.insn, (LBZ, LBZX, LBZU, LBZUX)):
if isinstance(self.insn, (
LBZ, LBZX, LHZ, LHZX, LHA, LHAX, LWZ, LWZX,
STB, STBX, STH, STHX, STW, STWX,
LWBRX, STHBRX, STWBRX
)):
m.d.comb += [
self.pfv.mem.r_mask.word_select(byte_offset, width=1).eq(0x1),
load_result.eq(load_byte.as_unsigned()),
self.pfv.ra.r_stb.eq(self.insn.RA != 0),
ea_base.eq(Mux(self.insn.RA != 0, self.pfv.ra.r_data, 0)),
]
elif isinstance(self.insn, (LHZ, LHZX, LHZU, LHZUX)):
elif isinstance(self.insn, (
LBZU, LBZUX, LHZU, LHZUX, LHAU, LHAUX, LWZU, LWZUX,
STBU, STBUX, STHU, STHUX, STWU, STWUX
)):
m.d.comb += [
self.pfv.mem.r_mask.word_select(half_offset, width=2).eq(0x3),
load_result.eq(byte_reversed(load_half, ~msr_le).as_unsigned()),
self.pfv.ra.r_stb.eq(1),
ea_base.eq(self.pfv.ra.r_data),
]
elif isinstance(self.insn, (LHA, LHAX, LHAU, LHAUX)):
m.d.comb += [
self.pfv.mem.r_mask.word_select(half_offset, width=2).eq(0x3),
load_result.eq(byte_reversed(load_half, ~msr_le).as_signed())
]
elif isinstance(self.insn, (LWZ, LWZX, LWZU, LWZUX)):
m.d.comb += [
self.pfv.mem.r_mask.word_select(word_offset, width=4).eq(0xf),
load_result.eq(byte_reversed(load_word, ~msr_le).as_unsigned()),
]
elif isinstance(self.insn, LWBRX):
else:
assert False

# ea_offset : EXTS(D) or (RB)

if isinstance(self.insn, (
LBZ, LBZU, LHZ, LHZU, LHA, LHAU, LWZ, LWZU,
STB, STBU, STH, STHU, STW, STWU,
)):
m.d.comb += ea_offset.eq(self.insn.D.as_signed())
elif isinstance(self.insn, (
LBZX, LBZUX, LHZX, LHZUX, LHAX, LHAUX, LWZX, LWZUX,
STBX, STBUX, STHX, STHUX, STWX, STWUX,
LWBRX, STHBRX, STWBRX,
)):
m.d.comb += [
self.pfv.mem.r_mask.word_select(word_offset, width=4).eq(0xf),
load_result.eq(byte_reversed(load_word, msr_le).as_unsigned()),
self.pfv.rb.index.eq(self.insn.RB),
self.pfv.rb.r_stb.eq(1),
ea_offset.eq(self.pfv.rb.r_data)
]
else:
assert False

m.d.comb += [
self.pfv.rt.index .eq(self.insn.RT),
self.pfv.rt.w_stb .eq(1),
self.pfv.rt.w_data.eq(load_result),
]
m.d.comb += ea.eq(iea(ea_base + ea_offset, self.pfv.msr.r_data.sf))

# Store: read from RS, then write the result to memory.
byte_offset = Signal(3)
half_offset = Signal(2)
word_offset = Signal(1)

elif isinstance(self.insn, (
STB, STBX, STBU, STBUX,
STH, STHX, STHU, STHUX,
STW, STWX, STWU, STWUX,
STHBRX, STWBRX,
)):
store_byte = Signal(64)
store_half = Signal(64)
store_word = Signal(64)
# If `pfv.mem_aligned` is set, `pfv.mem.addr` points to the dword containing EA.
# If `pfv.mem_aligned` is unset, `pfv.mem.addr` is equal to EA.

m.d.comb += [
self.pfv.rs.index.eq(self.insn.RS),
self.pfv.rs.r_stb.eq(1),
m.d.comb += self.pfv.mem.addr[3:].eq(ea[3:])

store_byte.eq(Repl(self.pfv.rs.r_data[: 8], count=8)),
store_half.eq(Repl(self.pfv.rs.r_data[:16], count=4)),
store_word.eq(Repl(self.pfv.rs.r_data[:32], count=2)),
]

if isinstance(self.insn, (STB, STBX, STBU, STBUX)):
m.d.comb += [
self.pfv.mem.w_mask.word_select(byte_offset, width=1).eq(0x1),
self.pfv.mem.w_data.eq(store_byte),
]
elif isinstance(self.insn, (STH, STHX, STHU, STHUX)):
m.d.comb += [
self.pfv.mem.w_mask.word_select(half_offset, width=2).eq(0x3),
self.pfv.mem.w_data.eq(byte_reversed(store_half, ~msr_le)),
]
elif isinstance(self.insn, (STW, STWX, STWU, STWUX)):
m.d.comb += [
self.pfv.mem.w_mask.word_select(word_offset, width=4).eq(0xf),
self.pfv.mem.w_data.eq(byte_reversed(store_word, ~msr_le)),
]
elif isinstance(self.insn, STHBRX):
if self.pfv.mem_aligned:
m.d.comb += [
self.pfv.mem.w_mask.word_select(half_offset, width=2).eq(0x3),
self.pfv.mem.w_data.eq(byte_reversed(store_half, msr_le)),
self.pfv.mem.addr[:3].eq(0),
byte_offset.eq(ea[:3]),
]
elif isinstance(self.insn, STWBRX):
else:
m.d.comb += [
self.pfv.mem.w_mask.word_select(word_offset, width=4).eq(0xf),
self.pfv.mem.w_data.eq(byte_reversed(store_word, msr_le)),
self.pfv.mem.addr[:3].eq(ea[:3]),
byte_offset.eq(0),
]
else:
assert False

else:
assert False

# Load/store with update: write EA to RA.

if isinstance(self.insn, (
LBZU, LBZUX, LHZU, LHZUX, LHAU, LHAUX, LWZU, LWZUX,
STBU, STBUX, STHU, STHUX, STWU, STWUX,
)):
m.d.comb += [
self.pfv.ra.w_stb .eq(1),
self.pfv.ra.w_data.eq(ea),
half_offset.eq(byte_offset[1:]),
word_offset.eq(byte_offset[2:]),
]

# Interrupt causes
# Raise an Alignment Interrupt if EA is misaligned wrt. `pfv.mem`

ea_misaligned = Signal()

if isinstance(self.insn, (
LBZ, LBZX, LBZU, LBZUX,
STB, STBX, STBU, STBUX,
)):
m.d.comb += ea_misaligned.eq(0)
elif isinstance(self.insn, (
LHZ, LHZX, LHZU, LHZUX, LHA, LHAX, LHAU, LHAUX,
STH, STHX, STHU, STHUX,
STHBRX,
)):
m.d.comb += ea_misaligned.eq(byte_offset[0])
elif isinstance(self.insn, (
LWZ, LWZX, LWZU, LWZUX,
STW, STWX, STWU, STWUX,
LWBRX, STWBRX,
)):
m.d.comb += ea_misaligned.eq(byte_offset[:1].any())
else:
assert False

intr = Record([
("misaligned", 1),
("update_zero", 1),
("update_rt", 1),
])
with m.If(ea_misaligned):
m.d.comb += [
self.pfv.intr.eq(1),
self.pfv.nia .eq(INTR_ALIGNMENT.vector_addr),
INTR_ALIGNMENT.write_msr(self.pfv.msr),

if isinstance(self.insn, (
LBZ, LBZX, LBZU, LBZUX,
STB, STBX, STBU, STBUX,
)):
m.d.comb += intr.misaligned.eq(0)
elif isinstance(self.insn, (
LHZ, LHZX, LHZU, LHZUX, LHA, LHAX, LHAU, LHAUX,
STH, STHX, STHU, STHUX,
STHBRX,
)):
m.d.comb += intr.misaligned.eq(byte_offset[0])
elif isinstance(self.insn, (
LWZ, LWZX, LWZU, LWZUX,
STW, STWX, STWU, STWUX,
LWBRX, STWBRX,
)):
m.d.comb += intr.misaligned.eq(byte_offset[:1].any())
else:
assert False
self.pfv.srr0.w_mask.eq(Repl(1, 64)),
self.pfv.srr0.w_data.eq(iea(self.pfv.cia, self.pfv.msr.r_data.sf)),

if isinstance(self.insn, (
LBZU, LBZUX, LHZU, LHZUX, LHAU, LHAUX, LWZU, LWZUX,
STBU, STBUX, STHU, STHUX, STWU, STWUX,
)):
m.d.comb += intr.update_zero.eq(self.insn.RA == 0)
else:
m.d.comb += intr.update_zero.eq(0)
self.pfv.srr1.w_mask[63-36:64-33].eq(Repl(1, 4)),
self.pfv.srr1.w_mask[63-36:64-33].eq(0),
self.pfv.srr1.w_mask[63-47:64-42].eq(Repl(1, 6)),
self.pfv.srr1.w_mask[63-47:64-42].eq(0),

if isinstance(self.insn, (
LBZU, LBZUX, LHZU, LHZUX, LHAU, LHAUX, LWZU, LWZUX,
)):
m.d.comb += intr.update_rt.eq(self.insn.RA == self.insn.RT)
else:
m.d.comb += intr.update_rt.eq(0)
msr_to_srr1(self.pfv.msr, self.pfv.srr1, 0, 32),
msr_to_srr1(self.pfv.msr, self.pfv.srr1, 37, 41),
msr_to_srr1(self.pfv.msr, self.pfv.srr1, 48, 63),
]

m.d.comb += self.pfv.intr.eq(intr.any())
with m.Else():
m.d.comb += self.pfv.msr.r_mask.le.eq(1)
msr_le = self.pfv.msr.r_data.le

# Load: read from memory, then write the result to RT.

if isinstance(self.insn, (
LBZ, LBZX, LBZU, LBZUX,
LHZ, LHZX, LHZU, LHZUX,
LHA, LHAX, LHAU, LHAUX,
LWZ, LWZX, LWZU, LWZUX,
LWBRX,
)):
load_byte = Signal( 8)
load_half = Signal(16)
load_word = Signal(32)
load_result = Signal(64)

m.d.comb += [
load_byte.eq(self.pfv.mem.r_data.word_select(byte_offset, width= 8)),
load_half.eq(self.pfv.mem.r_data.word_select(half_offset, width=16)),
load_word.eq(self.pfv.mem.r_data.word_select(word_offset, width=32)),
]

if isinstance(self.insn, (LBZ, LBZX, LBZU, LBZUX)):
m.d.comb += [
self.pfv.mem.r_mask.word_select(byte_offset, width=1).eq(0x1),
load_result.eq(load_byte.as_unsigned()),
]
elif isinstance(self.insn, (LHZ, LHZX, LHZU, LHZUX)):
m.d.comb += [
self.pfv.mem.r_mask.word_select(half_offset, width=2).eq(0x3),
load_result.eq(byte_reversed(load_half, ~msr_le).as_unsigned()),
]
elif isinstance(self.insn, (LHA, LHAX, LHAU, LHAUX)):
m.d.comb += [
self.pfv.mem.r_mask.word_select(half_offset, width=2).eq(0x3),
load_result.eq(byte_reversed(load_half, ~msr_le).as_signed())
]
elif isinstance(self.insn, (LWZ, LWZX, LWZU, LWZUX)):
m.d.comb += [
self.pfv.mem.r_mask.word_select(word_offset, width=4).eq(0xf),
load_result.eq(byte_reversed(load_word, ~msr_le).as_unsigned()),
]
elif isinstance(self.insn, LWBRX):
m.d.comb += [
self.pfv.mem.r_mask.word_select(word_offset, width=4).eq(0xf),
load_result.eq(byte_reversed(load_word, msr_le).as_unsigned()),
]
else:
assert False

m.d.comb += [
self.pfv.rt.index .eq(self.insn.RT),
self.pfv.rt.w_stb .eq(1),
self.pfv.rt.w_data.eq(load_result),
]

# Store: read from RS, then write the result to memory.

elif isinstance(self.insn, (
STB, STBX, STBU, STBUX,
STH, STHX, STHU, STHUX,
STW, STWX, STWU, STWUX,
STHBRX, STWBRX,
)):
store_byte = Signal(64)
store_half = Signal(64)
store_word = Signal(64)

m.d.comb += [
self.pfv.rs.index.eq(self.insn.RS),
self.pfv.rs.r_stb.eq(1),

store_byte.eq(Repl(self.pfv.rs.r_data[: 8], count=8)),
store_half.eq(Repl(self.pfv.rs.r_data[:16], count=4)),
store_word.eq(Repl(self.pfv.rs.r_data[:32], count=2)),
]

if isinstance(self.insn, (STB, STBX, STBU, STBUX)):
m.d.comb += [
self.pfv.mem.w_mask.word_select(byte_offset, width=1).eq(0x1),
self.pfv.mem.w_data.eq(store_byte),
]
elif isinstance(self.insn, (STH, STHX, STHU, STHUX)):
m.d.comb += [
self.pfv.mem.w_mask.word_select(half_offset, width=2).eq(0x3),
self.pfv.mem.w_data.eq(byte_reversed(store_half, ~msr_le)),
]
elif isinstance(self.insn, (STW, STWX, STWU, STWUX)):
m.d.comb += [
self.pfv.mem.w_mask.word_select(word_offset, width=4).eq(0xf),
self.pfv.mem.w_data.eq(byte_reversed(store_word, ~msr_le)),
]
elif isinstance(self.insn, STHBRX):
m.d.comb += [
self.pfv.mem.w_mask.word_select(half_offset, width=2).eq(0x3),
self.pfv.mem.w_data.eq(byte_reversed(store_half, msr_le)),
]
elif isinstance(self.insn, STWBRX):
m.d.comb += [
self.pfv.mem.w_mask.word_select(word_offset, width=4).eq(0xf),
self.pfv.mem.w_data.eq(byte_reversed(store_word, msr_le)),
]
else:
assert False

else:
assert False

# Load/store with update: write EA to RA.

if isinstance(self.insn, (
LBZU, LBZUX, LHZU, LHZUX, LHAU, LHAUX, LWZU, LWZUX,
STBU, STBUX, STHU, STHUX, STWU, STWUX,
)):
m.d.comb += [
self.pfv.ra.w_stb .eq(1),
self.pfv.ra.w_data.eq(ea),
]

# Update NIA

m.d.comb += self.pfv.nia.eq(iea(self.pfv.cia + 4, self.pfv.msr.r_data.sf))

return m

@ -2,10 +2,11 @@ from amaranth import *

from power_fv import pfv
from power_fv.insn.const import *
from power_fv.intr import *
from power_fv.reg import msr_layout

from . import InsnSpec
from .utils import iea
from .utils import iea, msr_to_srr1


__all__ = ["MSRMoveSpec"]
@ -18,92 +19,118 @@ class MSRMoveSpec(InsnSpec, Elaboratable):
m.d.comb += [
self.pfv.stb .eq(1),
self.pfv.insn.eq(Cat(Const(0, 32), self.insn.as_value())),
self.pfv.nia .eq(iea(self.pfv.cia + 4, self.pfv.msr.r_data.sf)),
self.pfv.msr.r_mask.sf.eq(1),

# mtmsr/mfmsr are privileged
self.pfv.intr.eq(self.pfv.msr.r_data.pr),
self.pfv.msr.r_mask.pr.eq(1),
self.pfv.msr.r_mask.pr.eq(1)
]

rs_as_msr = Record(msr_layout)
ultravisor = Signal()
# Raise a Program Interrupt if executing from Problem State

if isinstance(self.insn, MTMSR):
with m.If(self.pfv.msr.r_data.pr):
m.d.comb += [
self.pfv.rs.index .eq(self.insn.RS),
self.pfv.rs.r_stb .eq(1),

self.pfv.msr.r_mask.s .eq(1),
self.pfv.msr.r_mask.hv.eq(1),

rs_as_msr .eq(self.pfv.rs.r_data),
ultravisor.eq(self.pfv.msr.r_data.s & self.pfv.msr.r_data.hv & ~rs_as_msr.pr),
self.pfv.intr.eq(1),
self.pfv.nia .eq(INTR_PROGRAM.vector_addr),
INTR_PROGRAM.write_msr(self.pfv.msr),

self.pfv.srr0.w_mask.eq(Repl(1, 64)),
self.pfv.srr0.w_data.eq(iea(self.pfv.cia, self.pfv.msr.r_data.sf)),

self.pfv.srr1.w_mask[63-36:64-33].eq(Repl(1, 4)),
self.pfv.srr1.w_data[63-36:64-33].eq(0),
self.pfv.srr1.w_mask[63-42].eq(1),
self.pfv.srr1.w_data[63-42].eq(0),
self.pfv.srr1.w_mask[63-46:64-43].eq(Repl(1, 4)),
self.pfv.srr1.w_data[63-46:64-43].eq(0b0010), # Privileged Instruction type
self.pfv.srr1.w_mask[63-47].eq(1),
self.pfv.srr1.w_data[63-47].eq(0),

msr_to_srr1(self.pfv.msr, self.pfv.srr1, 0, 32),
msr_to_srr1(self.pfv.msr, self.pfv.srr1, 37, 41),
msr_to_srr1(self.pfv.msr, self.pfv.srr1, 48, 63),
]

with m.If(self.insn.L):
# Write bits 48 62
with m.Else():
rs_as_msr = Record(msr_layout)
ultravisor = Signal()

if isinstance(self.insn, MTMSR):
m.d.comb += [
self.pfv.msr.w_mask.ee.eq(1),
self.pfv.msr.w_data.ee.eq(rs_as_msr.ee),
self.pfv.msr.w_mask.ri.eq(1),
self.pfv.msr.w_data.ri.eq(rs_as_msr.ri),
self.pfv.rs.index .eq(self.insn.RS),
self.pfv.rs.r_stb .eq(1),

self.pfv.msr.r_mask.s .eq(1),
self.pfv.msr.r_mask.hv.eq(1),

rs_as_msr .eq(self.pfv.rs.r_data),
ultravisor.eq(self.pfv.msr.r_data.s & self.pfv.msr.r_data.hv & ~rs_as_msr.pr),
]
with m.Else():
# Write bits:

with m.If(self.insn.L):
# Write bits 48 62
m.d.comb += [
self.pfv.msr.w_mask.ee.eq(1),
self.pfv.msr.w_data.ee.eq(rs_as_msr.ee),
self.pfv.msr.w_mask.ri.eq(1),
self.pfv.msr.w_data.ri.eq(rs_as_msr.ri),
]
with m.Else():
# Write bits:
m.d.comb += [
# 48 58 59
self.pfv.msr.w_mask.ee .eq(1),
self.pfv.msr.w_data.ee .eq(rs_as_msr.ee | rs_as_msr.pr),
self.pfv.msr.w_mask.ir .eq(1),
self.pfv.msr.w_data.ir .eq((rs_as_msr.ir | rs_as_msr.pr) & ~ultravisor),
self.pfv.msr.w_mask.dr .eq(1),
self.pfv.msr.w_data.dr .eq((rs_as_msr.dr | rs_as_msr.pr) & ~ultravisor),
# 32:40
self.pfv.msr.w_mask._32.eq(Repl(1, 6)),
self.pfv.msr.w_data._32.eq(rs_as_msr._32),
self.pfv.msr.w_mask.vec.eq(1),
self.pfv.msr.w_data.vec.eq(rs_as_msr.vec),
self.pfv.msr.w_mask._39.eq(1),
self.pfv.msr.w_data._39.eq(rs_as_msr._39),
self.pfv.msr.w_mask.vsx.eq(1),
self.pfv.msr.w_data.vsx.eq(rs_as_msr.vsx),
# 42:47
self.pfv.msr.w_mask._42.eq(Repl(1, 6)),
self.pfv.msr.w_data._42.eq(rs_as_msr._42),
# 49:50
self.pfv.msr.w_mask.pr .eq(1),
self.pfv.msr.w_data.pr .eq(rs_as_msr.pr),
self.pfv.msr.w_mask.fp .eq(1),
self.pfv.msr.w_data.fp .eq(rs_as_msr.fp),
# 52:57
self.pfv.msr.w_mask.fe0.eq(1),
self.pfv.msr.w_data.fe0.eq(rs_as_msr.fe0),
self.pfv.msr.w_mask.te .eq(Repl(1, 2)),
self.pfv.msr.w_data.te .eq(rs_as_msr.te),
self.pfv.msr.w_mask.fe1.eq(1),
self.pfv.msr.w_data.fe1.eq(rs_as_msr.fe1),
self.pfv.msr.w_mask._56.eq(Repl(1, 2)),
self.pfv.msr.w_data._56.eq(rs_as_msr._56),
# 60:62
self.pfv.msr.w_mask._60.eq(1),
self.pfv.msr.w_data._60.eq(rs_as_msr._60),
self.pfv.msr.w_mask.pmm.eq(1),
self.pfv.msr.w_data.pmm.eq(rs_as_msr.pmm),
self.pfv.msr.w_mask.ri .eq(1),
self.pfv.msr.w_data.ri .eq(rs_as_msr.ri),
]

elif isinstance(self.insn, MFMSR):
m.d.comb += [
# 48 58 59
self.pfv.msr.w_mask.ee .eq(1),
self.pfv.msr.w_data.ee .eq(rs_as_msr.ee | rs_as_msr.pr),
self.pfv.msr.w_mask.ir .eq(1),
self.pfv.msr.w_data.ir .eq((rs_as_msr.ir | rs_as_msr.pr) & ~ultravisor),
self.pfv.msr.w_mask.dr .eq(1),
self.pfv.msr.w_data.dr .eq((rs_as_msr.dr | rs_as_msr.pr) & ~ultravisor),
# 32:40
self.pfv.msr.w_mask._32.eq(Repl(1, 6)),
self.pfv.msr.w_data._32.eq(rs_as_msr._32),
self.pfv.msr.w_mask.vec.eq(1),
self.pfv.msr.w_data.vec.eq(rs_as_msr.vec),
self.pfv.msr.w_mask._39.eq(1),
self.pfv.msr.w_data._39.eq(rs_as_msr._39),
self.pfv.msr.w_mask.vsx.eq(1),
self.pfv.msr.w_data.vsx.eq(rs_as_msr.vsx),
# 42:47
self.pfv.msr.w_mask._42.eq(Repl(1, 6)),
self.pfv.msr.w_data._42.eq(rs_as_msr._42),
# 49:50
self.pfv.msr.w_mask.pr .eq(1),
self.pfv.msr.w_data.pr .eq(rs_as_msr.pr),
self.pfv.msr.w_mask.fp .eq(1),
self.pfv.msr.w_data.fp .eq(rs_as_msr.fp),
# 52:57
self.pfv.msr.w_mask.fe0.eq(1),
self.pfv.msr.w_data.fe0.eq(rs_as_msr.fe0),
self.pfv.msr.w_mask.te .eq(Repl(1, 2)),
self.pfv.msr.w_data.te .eq(rs_as_msr.te),
self.pfv.msr.w_mask.fe1.eq(1),
self.pfv.msr.w_data.fe1.eq(rs_as_msr.fe1),
self.pfv.msr.w_mask._56.eq(Repl(1, 2)),
self.pfv.msr.w_data._56.eq(rs_as_msr._56),
# 60:62
self.pfv.msr.w_mask._60.eq(1),
self.pfv.msr.w_data._60.eq(rs_as_msr._60),
self.pfv.msr.w_mask.pmm.eq(1),
self.pfv.msr.w_data.pmm.eq(rs_as_msr.pmm),
self.pfv.msr.w_mask.ri .eq(1),
self.pfv.msr.w_data.ri .eq(rs_as_msr.ri),
self.pfv.msr.r_mask.eq(Repl(1, 64)),

self.pfv.rt.index .eq(self.insn.RT),
self.pfv.rt.w_stb .eq(1),
self.pfv.rt.w_data.eq(self.pfv.msr.r_data),
]

elif isinstance(self.insn, MFMSR):
m.d.comb += [
self.pfv.msr.r_mask.eq(Repl(1, 64)),
else:
assert False

self.pfv.rt.index .eq(self.insn.RT),
self.pfv.rt.w_stb .eq(1),
self.pfv.rt.w_data.eq(self.pfv.msr.r_data),
]
# Update NIA

else:
assert False
m.d.comb += self.pfv.nia.eq(iea(self.pfv.cia + 4, self.pfv.msr.r_data.sf))

return m

@ -2,9 +2,10 @@ from amaranth import *

from power_fv import pfv
from power_fv.insn.const import *
from power_fv.intr import *

from . import InsnSpec
from .utils import iea
from .utils import iea, msr_to_srr1


__all__ = ["RotateShiftSpec"]
@ -17,136 +18,161 @@ class RotateShiftSpec(InsnSpec, Elaboratable):
m.d.comb += [
self.pfv.stb .eq(1),
self.pfv.insn.eq(Cat(Const(0, 32), self.insn.as_value())),
self.pfv.intr.eq(0),
self.pfv.nia .eq(iea(self.pfv.cia + 4, self.pfv.msr.r_data.sf)),
self.pfv.msr.r_mask.sf.eq(1),
]

src = Signal(unsigned(64))
shamt = Signal(unsigned( 6))
rotl = Signal(unsigned(64))
mask = Signal(unsigned(64))
result = Signal(unsigned(64))
# Raise an interrupt if MB or ME are invalid

# Source operand : (RS)(32:63)||(RS)(32:63)
illegal_insn = Record([
("mask", 1),
])

m.d.comb += [
self.pfv.rs.index.eq(self.insn.RS),
self.pfv.rs.r_stb.eq(1),
src.eq(self.pfv.rs.r_data),
]
if isinstance(self.insn, (RLWINM, RLWINM_, RLWNM, RLWNM_, RLWIMI, RLWIMI_)):
m.d.comb += illegal_insn.mask.eq((self.insn.MB >= 32) | (self.insn.ME >= 32))

# Shift amount : SH or (RB)(59:63)
with m.If(illegal_insn.any()):
if self.pfv.illegal_insn_heai:
raise NotImplementedError

if isinstance(self.insn, (RLWINM, RLWINM_, RLWIMI, RLWIMI_, SRAWI, SRAWI_)):
m.d.comb += shamt.eq(self.insn.SH)
elif isinstance(self.insn, (RLWNM, RLWNM_, SLW, SLW_, SRW, SRW_, SRAW, SRAW_)):
m.d.comb += [
self.pfv.rb.index.eq(self.insn.RB),
self.pfv.rb.r_stb.eq(1),
shamt.eq(self.pfv.rb.r_data[:6]),
self.pfv.intr.eq(1),
self.pfv.nia .eq(INTR_PROGRAM.vector_addr),
INTR_PROGRAM.write_msr(self.pfv.msr),

self.pfv.srr0.w_mask.eq(Repl(1, 64)),
self.pfv.srr0.w_data.eq(iea(self.pfv.cia, self.pfv.msr.r_data.sf)),

self.pfv.srr1.w_mask[63-36:64-33].eq(Repl(1, 4)),
self.pfv.srr1.w_data[63-36:64-33].eq(0),
self.pfv.srr1.w_mask[63-42].eq(1),
self.pfv.srr1.w_data[63-42].eq(0),
self.pfv.srr1.w_mask[63-46:64-43].eq(Repl(1, 4)),
self.pfv.srr1.w_data[63-46:64-43].eq(0b0100), # Illegal Instruction type (deprecated)
self.pfv.srr1.w_mask[63-47].eq(1),
self.pfv.srr1.w_data[63-47].eq(0),

msr_to_srr1(self.pfv.msr, self.pfv.srr1, 0, 32),
msr_to_srr1(self.pfv.msr, self.pfv.srr1, 37, 41),
msr_to_srr1(self.pfv.msr, self.pfv.srr1, 48, 63),
]
else:
assert False

# Mask
with m.Else():
src = Signal(unsigned(64))
shamt = Signal(unsigned( 6))
rotl = Signal(unsigned(64))
mask = Signal(unsigned(64))
result = Signal(unsigned(64))

def _mask(mstart, mstop):
mask = ((1 << 64-mstart) - 1) & ~((1 << 63-mstop ) - 1)
mask_inv = ~((1 << 63-mstop ) - 1) | ((1 << 64-mstart) - 1)
return Mux(mstart <= mstop, mask, mask_inv)
# Source operand : (RS)(32:63)||(RS)(32:63)

if isinstance(self.insn, (RLWINM, RLWINM_, RLWNM, RLWNM_, RLWIMI, RLWIMI_)):
m.d.comb += mask.eq(_mask(self.insn.MB+32, self.insn.ME+32))
elif isinstance(self.insn, (SLW, SLW_)):
m.d.comb += mask.eq(Mux(shamt[5], 0, _mask(32, 63-shamt)))
elif isinstance(self.insn, (SRW, SRW_, SRAW, SRAW_)):
m.d.comb += mask.eq(Mux(shamt[5], 0, _mask(shamt+32, 63)))
elif isinstance(self.insn, (SRAWI, SRAWI_)):
m.d.comb += mask.eq(_mask(shamt+32, 63))
else:
assert False

# Rotation

def _rotl32(src, n):
v = Repl(src[:32], 2)
return ((v << n) | (v >> 64-n)) & Repl(1, 64)

if isinstance(self.insn, (
RLWINM, RLWINM_, RLWNM, RLWNM_, RLWIMI, RLWIMI_,
SLW, SLW_,
)):
m.d.comb += rotl.eq(_rotl32(src, shamt))
elif isinstance(self.insn, (SRW, SRW_, SRAWI, SRAWI_, SRAW, SRAW_)):
m.d.comb += rotl.eq(_rotl32(src, 64-shamt))
else:
assert False

# Write result to RA

m.d.comb += [
self.pfv.ra.index .eq(self.insn.RA),
self.pfv.ra.w_stb .eq(1),
self.pfv.ra.w_data.eq(result),
]
m.d.comb += [
self.pfv.rs.index.eq(self.insn.RS),
self.pfv.rs.r_stb.eq(1),
src.eq(self.pfv.rs.r_data),
]

if isinstance(self.insn, (RLWINM, RLWINM_, RLWNM, RLWNM_, SLW, SLW_, SRW, SRW_)):
m.d.comb += result.eq(rotl & mask)
elif isinstance(self.insn, (RLWIMI, RLWIMI_)):
m.d.comb += self.pfv.ra.r_stb.eq(1)
m.d.comb += result.eq(rotl & mask | self.pfv.ra.r_data & ~mask)
elif isinstance(self.insn, (SRAWI, SRAWI_, SRAW, SRAW_)):
m.d.comb += result.eq(rotl & mask | Repl(src[31], 64) & ~mask)
else:
assert False
# Shift amount : SH or (RB)(59:63)

if isinstance(self.insn, (RLWINM, RLWINM_, RLWIMI, RLWIMI_, SRAWI, SRAWI_)):
m.d.comb += shamt.eq(self.insn.SH)
elif isinstance(self.insn, (RLWNM, RLWNM_, SLW, SLW_, SRW, SRW_, SRAW, SRAW_)):
m.d.comb += [
self.pfv.rb.index.eq(self.insn.RB),
self.pfv.rb.r_stb.eq(1),
shamt.eq(self.pfv.rb.r_data[:6]),
]
else:
assert False

# Mask

def _mask(mstart, mstop):
mask = ((1 << 64-mstart) - 1) & ~((1 << 63-mstop ) - 1)
mask_inv = ~((1 << 63-mstop ) - 1) | ((1 << 64-mstart) - 1)
return Mux(mstart <= mstop, mask, mask_inv)

if isinstance(self.insn, (RLWINM, RLWINM_, RLWNM, RLWNM_, RLWIMI, RLWIMI_)):
m.d.comb += mask.eq(_mask(self.insn.MB+32, self.insn.ME+32))
elif isinstance(self.insn, (SLW, SLW_)):
m.d.comb += mask.eq(Mux(shamt[5], 0, _mask(32, 63-shamt)))
elif isinstance(self.insn, (SRW, SRW_, SRAW, SRAW_)):
m.d.comb += mask.eq(Mux(shamt[5], 0, _mask(shamt+32, 63)))
elif isinstance(self.insn, (SRAWI, SRAWI_)):
m.d.comb += mask.eq(_mask(shamt+32, 63))
else:
assert False

# Rotation

def _rotl32(src, n):
v = Repl(src[:32], 2)
return ((v << n) | (v >> 64-n)) & Repl(1, 64)

if isinstance(self.insn, (
RLWINM, RLWINM_, RLWNM, RLWNM_, RLWIMI, RLWIMI_,
SLW, SLW_,
)):
m.d.comb += rotl.eq(_rotl32(src, shamt))
elif isinstance(self.insn, (SRW, SRW_, SRAWI, SRAWI_, SRAW, SRAW_)):
m.d.comb += rotl.eq(_rotl32(src, 64-shamt))
else:
assert False

# Write result to RA

# Write CR0
m.d.comb += [
self.pfv.ra.index .eq(self.insn.RA),
self.pfv.ra.w_stb .eq(1),
self.pfv.ra.w_data.eq(result),
]

if isinstance(self.insn, (
RLWINM_, RLWNM_, RLWIMI_, SLW_, SRW_, SRAWI_, SRAW_,
)):
cr0_w_mask = Record([("so", 1), ("eq_", 1), ("gt", 1), ("lt", 1)])
cr0_w_data = Record([("so", 1), ("eq_", 1), ("gt", 1), ("lt", 1)])
if isinstance(self.insn, (RLWINM, RLWINM_, RLWNM, RLWNM_, SLW, SLW_, SRW, SRW_)):
m.d.comb += result.eq(rotl & mask)
elif isinstance(self.insn, (RLWIMI, RLWIMI_)):
m.d.comb += self.pfv.ra.r_stb.eq(1)
m.d.comb += result.eq(rotl & mask | self.pfv.ra.r_data & ~mask)
elif isinstance(self.insn, (SRAWI, SRAWI_, SRAW, SRAW_)):
m.d.comb += result.eq(rotl & mask | Repl(src[31], 64) & ~mask)
else:
assert False

m.d.comb += [
self.pfv.xer.r_mask.so.eq(1),
# Write CR0

cr0_w_mask .eq(0b1111),
cr0_w_data.so .eq(self.pfv.xer.r_data.so),
cr0_w_data.eq_.eq(~Mux(self.pfv.msr.r_data.sf, result[:64].any(), result[:32].any())),
cr0_w_data.gt .eq(~(cr0_w_data.lt | cr0_w_data.eq_)),
cr0_w_data.lt .eq(Mux(self.pfv.msr.r_data.sf, result[63], result[31])),
if isinstance(self.insn, (
RLWINM_, RLWNM_, RLWIMI_, SLW_, SRW_, SRAWI_, SRAW_,
)):
cr0_w_mask = Record([("so", 1), ("eq_", 1), ("gt", 1), ("lt", 1)])
cr0_w_data = Record([("so", 1), ("eq_", 1), ("gt", 1), ("lt", 1)])

self.pfv.cr.w_mask.cr0.eq(cr0_w_mask),
self.pfv.cr.w_data.cr0.eq(cr0_w_data),
]
m.d.comb += [
self.pfv.xer.r_mask.so.eq(1),

# Write XER
cr0_w_mask .eq(0b1111),
cr0_w_data.so .eq(self.pfv.xer.r_data.so),
cr0_w_data.eq_.eq(~Mux(self.pfv.msr.r_data.sf, result[:64].any(), result[:32].any())),
cr0_w_data.gt .eq(~(cr0_w_data.lt | cr0_w_data.eq_)),
cr0_w_data.lt .eq(Mux(self.pfv.msr.r_data.sf, result[63], result[31])),

if isinstance(self.insn, (SRAWI, SRAWI_, SRAW, SRAW_)):
carry = Signal()
self.pfv.cr.w_mask.cr0.eq(cr0_w_mask),
self.pfv.cr.w_data.cr0.eq(cr0_w_data),
]

m.d.comb += [
carry.eq(src[31] & (rotl & ~mask)[:32].any()),
# Write XER

self.pfv.xer.w_mask.ca .eq(1),
self.pfv.xer.w_data.ca .eq(carry),
self.pfv.xer.w_mask.ca32.eq(1),
self.pfv.xer.w_data.ca32.eq(carry),
]
if isinstance(self.insn, (SRAWI, SRAWI_, SRAW, SRAW_)):
carry = Signal()

# Interrupt causes
m.d.comb += [
carry.eq(src[31] & (rotl & ~mask)[:32].any()),

intr = Record([
("rotl32_mask", 1),
])
self.pfv.xer.w_mask.ca .eq(1),
self.pfv.xer.w_data.ca .eq(carry),
self.pfv.xer.w_mask.ca32.eq(1),
self.pfv.xer.w_data.ca32.eq(carry),
]

if isinstance(self.insn, (RLWINM, RLWINM_, RLWNM, RLWNM_, RLWIMI, RLWIMI_)):
m.d.comb += intr.rotl32_mask.eq((self.insn.MB >= 32) | (self.insn.ME >= 32))
else:
m.d.comb += intr.rotl32_mask.eq(0)
# Update NIA

m.d.comb += self.pfv.intr.eq(intr.any())
m.d.comb += self.pfv.nia.eq(iea(self.pfv.cia + 4, self.pfv.msr.r_data.sf))

return m

@ -2,10 +2,11 @@ from amaranth import *

from power_fv import pfv
from power_fv.insn.const import *
from power_fv.intr import *
from power_fv.reg import xer_layout

from . import InsnSpec
from .utils import iea
from .utils import iea, msr_to_srr1


__all__ = ["SPRMoveSpec"]
@ -18,12 +19,11 @@ class SPRMoveSpec(InsnSpec, Elaboratable):
m.d.comb += [
self.pfv.stb .eq(1),
self.pfv.insn.eq(Cat(Const(0, 32), self.insn.as_value())),
self.pfv.nia .eq(iea(self.pfv.cia + 4, self.pfv.msr.r_data.sf)),
self.pfv.msr.r_mask.sf.eq(1),
self.pfv.msr.r_mask.pr.eq(1),
]

# If SPR(0) = 1, this instruction is privileged.
# If SPR(0)=1, raise a Program Interrupt if executing from Problem State

spr_privileged = Signal()
spr_access_err = Signal()
@ -31,62 +31,89 @@ class SPRMoveSpec(InsnSpec, Elaboratable):
m.d.comb += [
spr_privileged.eq(self.insn.SPR[9 - 0]),
spr_access_err.eq(spr_privileged & self.pfv.msr.r_data.pr),
self.pfv.intr.eq(spr_access_err),
]

def mXspr_spec(pfv_spr, mtspr_cls, mfspr_cls, reserved_mask):
if isinstance(self.insn, mtspr_cls):
# Copy (RS) to SPR.
m.d.comb += [
self.pfv.rs.index.eq(self.insn.RS),
self.pfv.rs.r_stb.eq(1),
pfv_spr.w_mask.eq(~reserved_mask),
pfv_spr.w_data.eq(self.pfv.rs.r_data & ~reserved_mask),
]
with m.If(spr_access_err):
m.d.comb += [
self.pfv.intr.eq(1),
self.pfv.nia .eq(INTR_PROGRAM.vector_addr),
INTR_PROGRAM.write_msr(self.pfv.msr),

self.pfv.srr0.w_mask.eq(Repl(1, 64)),
self.pfv.srr0.w_data.eq(iea(self.pfv.cia, self.pfv.msr.r_data.sf)),

self.pfv.srr1.w_mask[63-36:64-33].eq(Repl(1, 4)),
self.pfv.srr1.w_data[63-36:64-33].eq(0),
self.pfv.srr1.w_mask[63-42].eq(1),
self.pfv.srr1.w_data[63-42].eq(0),
self.pfv.srr1.w_mask[63-46:64-43].eq(Repl(1, 4)),
self.pfv.srr1.w_data[63-46:64-43].eq(0b0010), # Privileged Instruction type
self.pfv.srr1.w_mask[63-47].eq(1),
self.pfv.srr1.w_data[63-47].eq(0),

msr_to_srr1(self.pfv.msr, self.pfv.srr1, 0, 32),
msr_to_srr1(self.pfv.msr, self.pfv.srr1, 37, 41),
msr_to_srr1(self.pfv.msr, self.pfv.srr1, 48, 63),
]

if isinstance(self.insn, mfspr_cls):
# Copy SPR to (RT).
with m.Else():
def mXspr_spec(pfv_spr, mtspr_cls, mfspr_cls, reserved_mask):
if isinstance(self.insn, mtspr_cls):
# Copy (RS) to SPR.
m.d.comb += [
self.pfv.rs.index.eq(self.insn.RS),
self.pfv.rs.r_stb.eq(1),
pfv_spr.w_mask.eq(~reserved_mask),
pfv_spr.w_data.eq(self.pfv.rs.r_data & ~reserved_mask),
]

if isinstance(self.insn, mfspr_cls):
# Copy SPR to (RT).
m.d.comb += [
self.pfv.rt.index.eq(self.insn.RT),
self.pfv.rt.w_stb.eq(1),
pfv_spr.r_mask.eq(~reserved_mask),
]
# In problem state, reading reserved bits returns 0.
with m.If(self.pfv.msr.r_data.pr):
m.d.comb += self.pfv.rt.w_data.eq(pfv_spr.r_data & ~reserved_mask)
with m.Else():
m.d.comb += self.pfv.rt.w_data.eq(pfv_spr.r_data)

if isinstance(self.insn, (MTXER, MFXER)):
xer_reserved_mask = Record(xer_layout)
m.d.comb += [
self.pfv.rt.index.eq(self.insn.RT),
self.pfv.rt.w_stb.eq(1),
pfv_spr.r_mask.eq(~reserved_mask),
xer_reserved_mask._56.eq(Repl(1, 1)),
xer_reserved_mask._46.eq(Repl(1, 2)),
xer_reserved_mask._35.eq(Repl(1, 9)),
xer_reserved_mask._0 .eq(Repl(1, 32)),
]
# In problem state, reading reserved bits returns 0.
with m.If(self.pfv.msr.r_data.pr):
m.d.comb += self.pfv.rt.w_data.eq(pfv_spr.r_data & ~reserved_mask)
with m.Else():
m.d.comb += self.pfv.rt.w_data.eq(pfv_spr.r_data)

if isinstance(self.insn, (MTXER, MFXER)):
xer_reserved_mask = Record(xer_layout)
m.d.comb += [
xer_reserved_mask._56.eq(Repl(1, 1)),
xer_reserved_mask._46.eq(Repl(1, 2)),
xer_reserved_mask._35.eq(Repl(1, 9)),
xer_reserved_mask._0 .eq(Repl(1, 32)),
]
mXspr_spec(self.pfv.xer, MTXER, MFXER, xer_reserved_mask)
mXspr_spec(self.pfv.xer, MTXER, MFXER, xer_reserved_mask)

elif isinstance(self.insn, (MTLR, MFLR)):
mXspr_spec(self.pfv.lr, MTLR, MFLR, Const(0, 64))

elif isinstance(self.insn, (MTCTR, MFCTR)):
mXspr_spec(self.pfv.ctr, MTCTR, MFCTR, Const(0, 64))

elif isinstance(self.insn, (MTLR, MFLR)):
mXspr_spec(self.pfv.lr, MTLR, MFLR, Const(0, 64))
elif isinstance(self.insn, (MTSRR0, MFSRR0)):
mXspr_spec(self.pfv.srr0, MTSRR0, MFSRR0, Const(0, 64))

elif isinstance(self.insn, (MTCTR, MFCTR)):
mXspr_spec(self.pfv.ctr, MTCTR, MFCTR, Const(0, 64))
elif isinstance(self.insn, (MTSRR1, MFSRR1)):
# SRR1 bits should be treated as reserved if their corresponding MSR bits are also
# reserved; which is implementation-specific.
# We treat all bits as defined for now, but this may cause false positives.
srr1_reserved_mask = Const(0, 64)
mXspr_spec(self.pfv.srr1, MTSRR1, MFSRR1, srr1_reserved_mask)

elif isinstance(self.insn, (MTSRR0, MFSRR0)):
mXspr_spec(self.pfv.srr0, MTSRR0, MFSRR0, Const(0, 64))
elif isinstance(self.insn, (MTTAR, MFTAR)):
mXspr_spec(self.pfv.tar, MTTAR, MFTAR, Const(0, 64))

elif isinstance(self.insn, (MTSRR1, MFSRR1)):
# SRR1 bits should be treated as reserved if their corresponding MSR bits are also
# reserved; which is implementation-specific.
# We treat all bits as defined for now, but this may cause false positives.
srr1_reserved_mask = Const(0, 64)
mXspr_spec(self.pfv.srr1, MTSRR1, MFSRR1, srr1_reserved_mask)
else:
assert False

elif isinstance(self.insn, (MTTAR, MFTAR)):
mXspr_spec(self.pfv.tar, MTTAR, MFTAR, Const(0, 64))
# Update NIA

else:
assert False
m.d.comb += self.pfv.nia.eq(iea(self.pfv.cia + 4, self.pfv.msr.r_data.sf))

return m

@ -2,9 +2,10 @@ from amaranth import *

from power_fv import pfv
from power_fv.insn.const import *
from power_fv.intr import *

from . import InsnSpec
from .utils import iea
from .utils import iea, msr_to_srr1


__all__ = ["SystemCallSpec"]
@ -17,19 +18,15 @@ class SystemCallSpec(InsnSpec, Elaboratable):
m.d.comb += [
self.pfv.stb .eq(1),
self.pfv.insn.eq(Cat(Const(0, 32), self.insn.as_value())),
self.pfv.msr.r_mask.sf.eq(1),
]

if isinstance(self.insn, SC):
def _msr_to_srr1(start, stop):
stmts = [
self.pfv.msr .r_mask[63-stop:64-start].eq(Repl(1, stop-start+1)),
self.pfv.srr1.w_mask[63-stop:64-start].eq(Repl(1, stop-start+1)),
self.pfv.srr1.w_data[63-stop:64-start].eq(self.pfv.msr.r_data[63-stop:64-start]),
]
return stmts

m.d.comb += [
self.pfv.intr.eq(1),
self.pfv.nia .eq(INTR_SYSTEM_CALL.vector_addr),
INTR_SYSTEM_CALL.write_msr(self.pfv.msr),

self.pfv.msr .r_mask.sf.eq(1),
self.pfv.srr0.w_mask.eq(Repl(1, 64)),
self.pfv.srr0.w_data.eq(iea(self.pfv.cia + 4, self.pfv.msr.r_data.sf)),

@ -38,14 +35,10 @@ class SystemCallSpec(InsnSpec, Elaboratable):
self.pfv.srr1.w_mask[63-47:64-42].eq(Repl(1, 6)),
self.pfv.srr1.w_data[63-47:64-42].eq(0),

_msr_to_srr1( 0, 32),
_msr_to_srr1(37, 41),
_msr_to_srr1(48, 63),

self.pfv.intr.eq(1),
self.pfv.nia .eq(0xc00),
msr_to_srr1(self.pfv.msr, self.pfv.srr1, 0, 32),
msr_to_srr1(self.pfv.msr, self.pfv.srr1, 37, 41),
msr_to_srr1(self.pfv.msr, self.pfv.srr1, 48, 63),
]

else:
assert False


@ -2,9 +2,10 @@ from amaranth import *

from power_fv import pfv
from power_fv.insn.const import *
from power_fv.intr import *

from . import InsnSpec
from .utils import iea
from .utils import iea, msr_to_srr1


__all__ = ["TrapSpec"]
@ -17,8 +18,6 @@ class TrapSpec(InsnSpec, Elaboratable):
m.d.comb += [
self.pfv.stb .eq(1),
self.pfv.insn.eq(Cat(Const(0, 32), self.insn.as_value())),
self.pfv.nia .eq(iea(self.pfv.cia + 4, self.pfv.msr.r_data.sf)),
self.pfv.msr.r_mask.sf.eq(1),
]

src_a = Signal(signed(64))
@ -47,7 +46,7 @@ class TrapSpec(InsnSpec, Elaboratable):
else:
assert False

# Compare operands, then trap if a condition is met.
# Compare operands

m.d.comb += [
cond.eq(self.insn.TO),
@ -57,8 +56,36 @@ class TrapSpec(InsnSpec, Elaboratable):
trap.eq_.eq(cond.eq_ & (src_a == src_b)),
trap.ltu.eq(cond.ltu & (src_a.as_unsigned() < src_b.as_unsigned())),
trap.gtu.eq(cond.gtu & (src_a.as_unsigned() > src_b.as_unsigned())),

self.pfv.intr.eq(trap.any()),
]

# Trap if a condition is met

m.d.comb += self.pfv.msr.r_mask.sf.eq(1)

with m.If(trap.any()):
m.d.comb += [
self.pfv.intr.eq(1),
self.pfv.nia .eq(INTR_PROGRAM.vector_addr),
INTR_PROGRAM.write_msr(self.pfv.msr),

self.pfv.srr0.w_mask.eq(Repl(1, 64)),
self.pfv.srr0.w_data.eq(iea(self.pfv.cia, self.pfv.msr.r_data.sf)),

self.pfv.srr1.w_mask[63-36:64-33].eq(0xf),
self.pfv.srr1.w_data[63-36:64-33].eq(0x0),

self.pfv.srr1.w_mask[63-42].eq(1),
self.pfv.srr1.w_data[63-42].eq(0),
self.pfv.srr1.w_mask[63-46:64-43].eq(Repl(1, 4)),
self.pfv.srr1.w_data[63-46:64-43].eq(0b0001), # Trap type
self.pfv.srr1.w_mask[63-47].eq(1),
self.pfv.srr1.w_data[63-47].eq(0),

msr_to_srr1(self.pfv.msr, self.pfv.srr1, 0, 32),
msr_to_srr1(self.pfv.msr, self.pfv.srr1, 37, 41),
msr_to_srr1(self.pfv.msr, self.pfv.srr1, 48, 63),
]
with m.Else():
m.d.comb += self.pfv.nia.eq(iea(self.pfv.cia + 4, self.pfv.msr.r_data.sf))

return m

@ -1,7 +1,7 @@
from amaranth import *


__all__ = ["iea", "byte_reversed"]
__all__ = ["iea", "byte_reversed", "msr_to_srr1"]


def iea(addr, msr_sf):
@ -17,3 +17,12 @@ def byte_reversed(src, en=0):
assert len(src) in {8, 16, 32, 64}
res = Cat(src.word_select(i, width=8) for i in reversed(range(len(src) // 8)))
return Mux(en, res, src)


def msr_to_srr1(msr, srr1, start, stop):
stmts = [
msr .r_mask[63-stop:64-start].eq(Repl(1, stop-start+1)),
srr1.w_mask[63-stop:64-start].eq(Repl(1, stop-start+1)),
srr1.w_data[63-stop:64-start].eq(msr.r_data[63-stop:64-start]),
]
return stmts

@ -0,0 +1,60 @@
__all__ = [
"Interrupt",
"INTR_ALIGNMENT",
"INTR_PROGRAM",
"INTR_SYSTEM_CALL",
]


class Interrupt:
def __init__(self, vector_addr, ir, dr, ee, ri, me, hv, s):
self.vector_addr = vector_addr
self.ir = ir
self.dr = dr
self.ee = ee
self.ri = ri
self.me = me
self.hv = hv
self.s = s

def write_msr(self, msr):
def _write_field(field, value):
stmts = []
if value is not None:
stmts.append(getattr(msr.w_mask, field).eq(-1))
stmts.append(getattr(msr.w_data, field).eq(value))
return stmts

# See PowerISA v3.1, Book III, Section 7.5, Figure 67
stmts = [
_write_field("ir" , self.ir),
_write_field("dr" , self.dr),
_write_field("fe0", 0),
_write_field("fe1", 0),
_write_field("ee" , self.ee),
_write_field("ri" , self.ri),
_write_field("me" , self.me),
_write_field("hv" , self.hv),
_write_field("s" , self.s),

_write_field("pr" , 0),
_write_field("pmm", 0),
_write_field("te" , 0),
_write_field("fp" , 0),
_write_field("vec", 0),
_write_field("vsx", 0),
_write_field("sf" , 1),

msr.w_mask[63- 5].eq(1),
msr.w_data[63- 5].eq(0),
msr.w_mask[63-31].eq(1),
msr.w_data[63-31].eq(0),
]
return stmts


# TODO: Support MSR.{IR,DR,HV,S,LE} bits, which depend on context (e.g. LPCR)

INTR_ALIGNMENT = Interrupt(0x600, ir=None, dr=None, ee=0, ri=0, me=None, hv=None, s=None)
INTR_PROGRAM = Interrupt(0x700, ir=None, dr=None, ee=0, ri=0, me=None, hv=None, s=None)
INTR_SYSTEM_CALL = Interrupt(0xC00, ir=None, dr=None, ee=0, ri=0, me=None, hv=None, s=None)

@ -51,8 +51,10 @@ class Interface(Record):
Instruction strobe. Asserted when the processor retires an instruction. Other signals are
only valid when ``stb`` is asserted.
"""
def __init__(self, *, mem_aligned=False, name=None, src_loc_at=0):
self.mem_aligned = bool(mem_aligned)
def __init__(self, *, mem_aligned=False, illegal_insn_heai=False,
name=None, src_loc_at=0):
self.mem_aligned = bool(mem_aligned)
self.illegal_insn_heai = bool(illegal_insn_heai)

layout = [
("stb" , unsigned( 1)),

Loading…
Cancel
Save