|
|
|
--
|
|
|
|
-- Set associative dcache write-through
|
|
|
|
--
|
|
|
|
--
|
|
|
|
library ieee;
|
|
|
|
use ieee.std_logic_1164.all;
|
|
|
|
use ieee.numeric_std.all;
|
|
|
|
|
|
|
|
library work;
|
|
|
|
use work.utils.all;
|
|
|
|
use work.common.all;
|
|
|
|
use work.helpers.all;
|
|
|
|
use work.wishbone_types.all;
|
|
|
|
|
|
|
|
entity dcache is
|
|
|
|
generic (
|
|
|
|
-- Line size in bytes
|
|
|
|
LINE_SIZE : positive := 64;
|
|
|
|
-- Number of lines in a set
|
|
|
|
NUM_LINES : positive := 32;
|
|
|
|
-- Number of ways
|
|
|
|
NUM_WAYS : positive := 4;
|
|
|
|
-- L1 DTLB entries per set
|
|
|
|
TLB_SET_SIZE : positive := 64;
|
|
|
|
-- L1 DTLB number of sets
|
|
|
|
TLB_NUM_WAYS : positive := 2;
|
|
|
|
-- L1 DTLB log_2(page_size)
|
|
|
|
TLB_LG_PGSZ : positive := 12;
|
|
|
|
-- Non-zero to enable log data collection
|
|
|
|
LOG_LENGTH : natural := 0
|
|
|
|
);
|
|
|
|
port (
|
|
|
|
clk : in std_ulogic;
|
|
|
|
rst : in std_ulogic;
|
|
|
|
|
|
|
|
d_in : in Loadstore1ToDcacheType;
|
|
|
|
d_out : out DcacheToLoadstore1Type;
|
|
|
|
|
|
|
|
m_in : in MmuToDcacheType;
|
|
|
|
m_out : out DcacheToMmuType;
|
|
|
|
|
|
|
|
snoop_in : in wishbone_master_out := wishbone_master_out_init;
|
|
|
|
|
|
|
|
stall_out : out std_ulogic;
|
|
|
|
|
|
|
|
wishbone_out : out wishbone_master_out;
|
|
|
|
wishbone_in : in wishbone_slave_out;
|
|
|
|
|
|
|
|
events : out DcacheEventType;
|
|
|
|
|
|
|
|
log_out : out std_ulogic_vector(19 downto 0)
|
|
|
|
);
|
|
|
|
end entity dcache;
|
|
|
|
|
|
|
|
architecture rtl of dcache is
|
|
|
|
-- BRAM organisation: We never access more than wishbone_data_bits at
|
|
|
|
-- a time so to save resources we make the array only that wide, and
|
|
|
|
-- use consecutive indices to make a cache "line"
|
|
|
|
--
|
|
|
|
-- ROW_SIZE is the width in bytes of the BRAM (based on WB, so 64-bits)
|
|
|
|
constant ROW_SIZE : natural := wishbone_data_bits / 8;
|
|
|
|
-- ROW_PER_LINE is the number of row (wishbone transactions) in a line
|
|
|
|
constant ROW_PER_LINE : natural := LINE_SIZE / ROW_SIZE;
|
|
|
|
-- BRAM_ROWS is the number of rows in BRAM needed to represent the full
|
|
|
|
-- dcache
|
|
|
|
constant BRAM_ROWS : natural := NUM_LINES * ROW_PER_LINE;
|
|
|
|
|
|
|
|
-- Bit fields counts in the address
|
|
|
|
|
|
|
|
-- ROW_BITS is the number of bits to select a row
|
|
|
|
constant ROW_BITS : natural := log2(BRAM_ROWS);
|
|
|
|
-- ROW_LINEBITS is the number of bits to select a row within a line
|
|
|
|
constant ROW_LINEBITS : natural := log2(ROW_PER_LINE);
|
|
|
|
-- LINE_OFF_BITS is the number of bits for the offset in a cache line
|
|
|
|
constant LINE_OFF_BITS : natural := log2(LINE_SIZE);
|
|
|
|
-- ROW_OFF_BITS is the number of bits for the offset in a row
|
|
|
|
constant ROW_OFF_BITS : natural := log2(ROW_SIZE);
|
|
|
|
-- INDEX_BITS is the number if bits to select a cache line
|
|
|
|
constant INDEX_BITS : natural := log2(NUM_LINES);
|
|
|
|
-- SET_SIZE_BITS is the log base 2 of the set size
|
|
|
|
constant SET_SIZE_BITS : natural := LINE_OFF_BITS + INDEX_BITS;
|
|
|
|
-- TAG_BITS is the number of bits of the tag part of the address
|
|
|
|
constant TAG_BITS : natural := REAL_ADDR_BITS - SET_SIZE_BITS;
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
-- TAG_WIDTH is the width in bits of each way of the tag RAM
|
|
|
|
constant TAG_WIDTH : natural := TAG_BITS + 7 - ((TAG_BITS + 7) mod 8);
|
|
|
|
-- WAY_BITS is the number of bits to select a way
|
|
|
|
constant WAY_BITS : natural := log2(NUM_WAYS);
|
|
|
|
|
|
|
|
-- Example of layout for 32 lines of 64 bytes:
|
|
|
|
--
|
|
|
|
-- .. tag |index| line |
|
|
|
|
-- .. | row | |
|
|
|
|
-- .. | |---| | ROW_LINEBITS (3)
|
|
|
|
-- .. | |--- - --| LINE_OFF_BITS (6)
|
|
|
|
-- .. | |- --| ROW_OFF_BITS (3)
|
|
|
|
-- .. |----- ---| | ROW_BITS (8)
|
|
|
|
-- .. |-----| | INDEX_BITS (5)
|
|
|
|
-- .. --------| | TAG_BITS (45)
|
|
|
|
|
|
|
|
subtype row_t is unsigned(ROW_BITS-1 downto 0);
|
|
|
|
subtype index_t is unsigned(INDEX_BITS-1 downto 0);
|
|
|
|
subtype way_t is unsigned(WAY_BITS-1 downto 0);
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
subtype row_in_line_t is unsigned(ROW_LINEBITS-1 downto 0);
|
|
|
|
|
|
|
|
-- The cache data BRAM organized as described above for each way
|
|
|
|
subtype cache_row_t is std_ulogic_vector(wishbone_data_bits-1 downto 0);
|
|
|
|
|
|
|
|
-- The cache tags LUTRAM has a row per set. Vivado is a pain and will
|
|
|
|
-- not handle a clean (commented) definition of the cache tags as a 3d
|
|
|
|
-- memory. For now, work around it by putting all the tags
|
|
|
|
subtype cache_tag_t is std_logic_vector(TAG_BITS-1 downto 0);
|
|
|
|
-- type cache_tags_set_t is array(way_t) of cache_tag_t;
|
|
|
|
-- type cache_tags_array_t is array(0 to NUM_LINES-1) of cache_tags_set_t;
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
constant TAG_RAM_WIDTH : natural := TAG_WIDTH * NUM_WAYS;
|
|
|
|
subtype cache_tags_set_t is std_logic_vector(TAG_RAM_WIDTH-1 downto 0);
|
|
|
|
type cache_tags_array_t is array(0 to NUM_LINES-1) of cache_tags_set_t;
|
|
|
|
|
|
|
|
-- The cache valid bits
|
|
|
|
subtype cache_way_valids_t is std_ulogic_vector(NUM_WAYS-1 downto 0);
|
|
|
|
type cache_valids_t is array(0 to NUM_LINES-1) of cache_way_valids_t;
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
type row_per_line_valid_t is array(0 to ROW_PER_LINE - 1) of std_ulogic;
|
|
|
|
|
dcache: Fix bug with forwarding of stores
We have two stages of forwarding to cover the two cycles of latency
between when something is written to BRAM and when that new data can
be read from BRAM. When the writes to BRAM result from store
instructions, the write may write only some bytes of a row (8 bytes)
and not others, so we have a mask to enable only the written bytes to
be forwarded. However, we only forward written data from either the
first stage of forwarding or the second, not both. So if we have
two stores in succession that write different bytes of the same row,
and then a load from the row, we will only forward the data from the
second store, and miss the data from the first store; thus the load
will get the wrong value.
To fix this, we make the decision on which forward stage to use for
each byte individually. This results in a 4-input multiplexer feeding
r1.data_out, with its inputs being the BRAM, the wishbone, the current
write data, and the 2nd-stage forwarding register. Each byte of the
multiplexer is separately controlled. The code for this multiplexer
is moved to the dcache_fast_hit process since it is used for cache
hits as well as cache misses.
This also simplifies the BRAM code by ensuring that we can use the
same source for the BRAM address and way selection for writes, whether
we are writing store data or cache line refill data from memory.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
4 years ago
|
|
|
-- Storage. Hopefully implemented in LUTs
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
signal cache_tags : cache_tags_array_t;
|
|
|
|
signal cache_tag_set : cache_tags_set_t;
|
|
|
|
signal cache_valids : cache_valids_t;
|
|
|
|
|
|
|
|
attribute ram_style : string;
|
|
|
|
attribute ram_style of cache_tags : signal is "distributed";
|
|
|
|
|
|
|
|
-- L1 TLB.
|
|
|
|
constant TLB_SET_BITS : natural := log2(TLB_SET_SIZE);
|
|
|
|
constant TLB_WAY_BITS : natural := log2(TLB_NUM_WAYS);
|
|
|
|
constant TLB_EA_TAG_BITS : natural := 64 - (TLB_LG_PGSZ + TLB_SET_BITS);
|
|
|
|
constant TLB_TAG_WAY_BITS : natural := TLB_NUM_WAYS * TLB_EA_TAG_BITS;
|
|
|
|
constant TLB_PTE_BITS : natural := 64;
|
|
|
|
constant TLB_PTE_WAY_BITS : natural := TLB_NUM_WAYS * TLB_PTE_BITS;
|
|
|
|
|
|
|
|
subtype tlb_way_t is integer range 0 to TLB_NUM_WAYS - 1;
|
|
|
|
subtype tlb_way_sig_t is unsigned(TLB_WAY_BITS-1 downto 0);
|
|
|
|
subtype tlb_index_t is integer range 0 to TLB_SET_SIZE - 1;
|
|
|
|
subtype tlb_index_sig_t is unsigned(TLB_SET_BITS-1 downto 0);
|
|
|
|
subtype tlb_way_valids_t is std_ulogic_vector(TLB_NUM_WAYS-1 downto 0);
|
|
|
|
type tlb_valids_t is array(tlb_index_t) of tlb_way_valids_t;
|
|
|
|
subtype tlb_tag_t is std_ulogic_vector(TLB_EA_TAG_BITS - 1 downto 0);
|
|
|
|
subtype tlb_way_tags_t is std_ulogic_vector(TLB_TAG_WAY_BITS-1 downto 0);
|
|
|
|
type tlb_tags_t is array(tlb_index_t) of tlb_way_tags_t;
|
|
|
|
subtype tlb_pte_t is std_ulogic_vector(TLB_PTE_BITS - 1 downto 0);
|
|
|
|
subtype tlb_way_ptes_t is std_ulogic_vector(TLB_PTE_WAY_BITS-1 downto 0);
|
|
|
|
type tlb_ptes_t is array(tlb_index_t) of tlb_way_ptes_t;
|
|
|
|
type hit_way_set_t is array(tlb_way_t) of way_t;
|
|
|
|
|
|
|
|
signal dtlb_valids : tlb_valids_t;
|
|
|
|
signal dtlb_tags : tlb_tags_t;
|
|
|
|
signal dtlb_ptes : tlb_ptes_t;
|
|
|
|
attribute ram_style of dtlb_tags : signal is "distributed";
|
|
|
|
attribute ram_style of dtlb_ptes : signal is "distributed";
|
|
|
|
|
|
|
|
-- Record for storing permission, attribute, etc. bits from a PTE
|
|
|
|
type perm_attr_t is record
|
|
|
|
reference : std_ulogic;
|
|
|
|
changed : std_ulogic;
|
|
|
|
nocache : std_ulogic;
|
|
|
|
priv : std_ulogic;
|
|
|
|
rd_perm : std_ulogic;
|
|
|
|
wr_perm : std_ulogic;
|
|
|
|
end record;
|
|
|
|
|
|
|
|
function extract_perm_attr(pte : std_ulogic_vector(TLB_PTE_BITS - 1 downto 0)) return perm_attr_t is
|
|
|
|
variable pa : perm_attr_t;
|
|
|
|
begin
|
|
|
|
pa.reference := pte(8);
|
|
|
|
pa.changed := pte(7);
|
|
|
|
pa.nocache := pte(5);
|
|
|
|
pa.priv := pte(3);
|
|
|
|
pa.rd_perm := pte(2);
|
|
|
|
pa.wr_perm := pte(1);
|
|
|
|
return pa;
|
|
|
|
end;
|
|
|
|
|
|
|
|
constant real_mode_perm_attr : perm_attr_t := (nocache => '0', others => '1');
|
|
|
|
|
|
|
|
-- Type of operation on a "valid" input
|
|
|
|
type op_t is (OP_NONE,
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
OP_BAD, -- NC cache hit, TLB miss, prot/RC failure
|
|
|
|
OP_STCX_FAIL, -- conditional store w/o reservation
|
|
|
|
OP_LOAD_HIT, -- Cache hit on load
|
|
|
|
OP_LOAD_MISS, -- Load missing cache
|
|
|
|
OP_LOAD_NC, -- Non-cachable load
|
|
|
|
OP_STORE_HIT, -- Store hitting cache
|
|
|
|
OP_STORE_MISS); -- Store missing cache
|
|
|
|
|
|
|
|
-- Cache state machine
|
|
|
|
type state_t is (IDLE, -- Normal load hit processing
|
|
|
|
RELOAD_WAIT_ACK, -- Cache reload wait ack
|
|
|
|
STORE_WAIT_ACK, -- Store wait ack
|
|
|
|
NC_LOAD_WAIT_ACK);-- Non-cachable load wait ack
|
|
|
|
|
|
|
|
|
|
|
|
--
|
|
|
|
-- Dcache operations:
|
|
|
|
--
|
|
|
|
-- In order to make timing, we use the BRAMs with an output buffer,
|
|
|
|
-- which means that the BRAM output is delayed by an extra cycle.
|
|
|
|
--
|
|
|
|
-- Thus, the dcache has a 2-stage internal pipeline for cache hits
|
|
|
|
-- with no stalls. Stores also complete in 2 cycles in most
|
|
|
|
-- circumstances.
|
|
|
|
--
|
|
|
|
-- A request proceeds through the pipeline as follows.
|
|
|
|
--
|
|
|
|
-- Cycle 0: Request is received from loadstore or mmu if either
|
|
|
|
-- d_in.valid or m_in.valid is 1 (not both). In this cycle portions
|
|
|
|
-- of the address are presented to the TLB tag RAM and data RAM
|
|
|
|
-- and the cache tag RAM and data RAM.
|
|
|
|
--
|
|
|
|
-- Clock edge between cycle 0 and cycle 1:
|
|
|
|
-- Request is stored in r0 (assuming r0_full was 0). TLB tag and
|
|
|
|
-- data RAMs are read, and the cache tag RAM is read. (Cache data
|
|
|
|
-- comes out a cycle later due to its output register, giving the
|
|
|
|
-- whole of cycle 1 to read the cache data RAM.)
|
|
|
|
--
|
|
|
|
-- Cycle 1: TLB and cache tag matching is done, the real address
|
|
|
|
-- (RA) for the access is calculated, and the type of operation is
|
|
|
|
-- determined (the OP_* values above). This gives the TLB way for
|
|
|
|
-- a TLB hit, and the cache way for a hit or the way to replace
|
|
|
|
-- for a load miss.
|
|
|
|
--
|
|
|
|
-- Clock edge between cycle 1 and cycle 2:
|
|
|
|
-- Request is stored in r1 (assuming r1.full was 0)
|
|
|
|
-- The state machine transitions out of IDLE state for a load miss,
|
|
|
|
-- a store, a dcbz, or a non-cacheable load. r1.full is set to 1
|
|
|
|
-- for a load miss, dcbz or non-cacheable load but not a store.
|
|
|
|
--
|
|
|
|
-- Cycle 2: Completion signals are asserted for a load hit,
|
|
|
|
-- a store (excluding dcbz), a TLB operation, a conditional
|
|
|
|
-- store which failed due to no matching reservation, or an error
|
|
|
|
-- (cache hit on non-cacheable operation, TLB miss, or protection
|
|
|
|
-- fault).
|
|
|
|
--
|
|
|
|
-- For a load miss, store, or dcbz, the state machine initiates
|
|
|
|
-- a wishbone cycle, which takes at least 2 cycles. For a store,
|
|
|
|
-- if another store comes in with the same cache tag (therefore
|
|
|
|
-- in the same 4k page), it can be added on to the existing cycle,
|
|
|
|
-- subject to some constraints.
|
|
|
|
-- While r1.full = 1, no new requests can go from r0 to r1, but
|
|
|
|
-- requests can come in to r0 and be satisfied if they are
|
|
|
|
-- cacheable load hits or stores with the same cache tag.
|
|
|
|
--
|
|
|
|
-- Writing to the cache data RAM is done at the clock edge
|
|
|
|
-- at the end of cycle 2 for a store hit (excluding dcbz).
|
|
|
|
-- Stores that miss are not written to the cache data RAM
|
|
|
|
-- but just stored through to memory.
|
|
|
|
-- Dcbz is done like a cache miss, but the wishbone cycle
|
|
|
|
-- is a write rather than a read, and zeroes are written to
|
|
|
|
-- the cache data RAM. Thus dcbz will allocate the line in
|
|
|
|
-- the cache as well as zeroing memory.
|
|
|
|
--
|
|
|
|
-- Since stores are written to the cache data RAM at the end of
|
|
|
|
-- cycle 2, and loads can come in and hit on the data just stored,
|
|
|
|
-- there is a two-stage bypass from store data to load data to
|
|
|
|
-- make sure that loads always see previously-stored data even
|
|
|
|
-- if it has not yet made it to the cache data RAM.
|
|
|
|
--
|
|
|
|
-- Load misses read the requested dword of the cache line first in
|
|
|
|
-- the memory read request and then cycle around through the other
|
|
|
|
-- dwords. The load is completed on the cycle after the requested
|
|
|
|
-- dword comes back from memory (using a forwarding path, rather
|
|
|
|
-- than going via the cache data RAM). We maintain an array of
|
|
|
|
-- valid bits per dword for the line being refilled so that
|
|
|
|
-- subsequent load requests to the same line can be completed as
|
|
|
|
-- soon as the necessary data comes in from memory, without
|
|
|
|
-- waiting for the whole line to be read.
|
|
|
|
|
|
|
|
-- Stage 0 register, basically contains just the latched request
|
|
|
|
type reg_stage_0_t is record
|
|
|
|
req : Loadstore1ToDcacheType;
|
|
|
|
tlbie : std_ulogic; -- indicates a tlbie request (from MMU)
|
|
|
|
doall : std_ulogic; -- with tlbie, indicates flush whole TLB
|
|
|
|
tlbld : std_ulogic; -- indicates a TLB load request (from MMU)
|
MMU: Implement radix page table machinery
This adds the necessary machinery to the MMU for it to do radix page
table walks. The core elements are a shifter that can shift the
address right by between 0 and 47 bits, a mask generator that can
generate a mask of between 5 and 16 bits, a final mask generator,
and new states in the state machine.
(The final mask generator is used for transferring bits of the
original address into the resulting TLB entry when the leaf PTE
corresponds to a page size larger than 4kB.)
The hardware does not implement a partition table or a process table.
Software is expected to load the appropriate process table entry
into a new SPR called PGTBL0, SPR 720. The contents should be
formatted as described in Book III section 5.7.6.2 of the Power ISA
v3.0B. PGTBL0 is set to 0 on hard reset. At present, the top two bits
of the address (the quadrant) are ignored.
There is currently no caching of any step in the translation process
or of the final result, other than the entry created in the dTLB.
That entry is a 4k page entry even if the leaf PTE found in the walk
corresponds to a larger page size.
This implementation can handle almost any page table layout and any
page size. The RTS field (in PGTBL0) can have any value between 0
and 31, corresponding to a total address space size between 2^31
and 2^62 bytes. The RPDS field of PGTBL0 can be any value between
5 and 16, except that a value of 0 is taken to disable radix page
table walking (for use when one is using software loading of TLB
entries). The NLS field of the page directory entries can have any
value between 5 and 16. The minimum page size is 4kB, meaning that
the sum of RPDS and the NLS values of the PDEs found on the path to
a leaf PTE must be less than or equal to RTS + 31 - 12.
The PGTBL0 SPR is in the mmu module; thus this adds a path for
loadstore1 to read and write SPRs in mmu. This adds code in dcache
to service doubleword read requests from the MMU, as well as requests
to write dTLB entries.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
mmu_req : std_ulogic; -- indicates source of request
|
|
|
|
d_valid : std_ulogic; -- indicates req.data is valid now
|
|
|
|
end record;
|
|
|
|
|
|
|
|
signal r0 : reg_stage_0_t;
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
signal r0_full : std_ulogic;
|
|
|
|
|
|
|
|
type mem_access_request_t is record
|
|
|
|
op : op_t;
|
|
|
|
valid : std_ulogic;
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
dcbz : std_ulogic;
|
|
|
|
real_addr : real_addr_t;
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
data : std_ulogic_vector(63 downto 0);
|
|
|
|
byte_sel : std_ulogic_vector(7 downto 0);
|
|
|
|
hit_way : way_t;
|
|
|
|
same_tag : std_ulogic;
|
|
|
|
mmu_req : std_ulogic;
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
end record;
|
|
|
|
|
|
|
|
-- First stage register, contains state for stage 1 of load hits
|
|
|
|
-- and for the state machine used by all other operations
|
|
|
|
--
|
|
|
|
type reg_stage_1_t is record
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
-- Info about the request
|
|
|
|
full : std_ulogic; -- have uncompleted request
|
|
|
|
mmu_req : std_ulogic; -- request is from MMU
|
|
|
|
req : mem_access_request_t;
|
|
|
|
|
|
|
|
-- Cache hit state
|
|
|
|
hit_way : way_t;
|
|
|
|
hit_load_valid : std_ulogic;
|
|
|
|
hit_index : index_t;
|
|
|
|
cache_hit : std_ulogic;
|
|
|
|
|
|
|
|
-- TLB hit state
|
|
|
|
tlb_hit : std_ulogic;
|
|
|
|
tlb_hit_way : tlb_way_sig_t;
|
|
|
|
tlb_hit_index : tlb_index_sig_t;
|
|
|
|
|
|
|
|
-- data buffer for data forwarded from writes to reads
|
|
|
|
forward_data : std_ulogic_vector(63 downto 0);
|
|
|
|
forward_tag : cache_tag_t;
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
forward_sel : std_ulogic_vector(7 downto 0);
|
|
|
|
forward_valid : std_ulogic;
|
|
|
|
forward_row : row_t;
|
|
|
|
data_out : std_ulogic_vector(63 downto 0);
|
|
|
|
|
|
|
|
-- Cache miss state (reload state machine)
|
|
|
|
state : state_t;
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
dcbz : std_ulogic;
|
|
|
|
write_bram : std_ulogic;
|
|
|
|
write_tag : std_ulogic;
|
|
|
|
slow_valid : std_ulogic;
|
|
|
|
wb : wishbone_master_out;
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
reload_tag : cache_tag_t;
|
|
|
|
store_way : way_t;
|
|
|
|
store_row : row_t;
|
|
|
|
store_index : index_t;
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
end_row_ix : row_in_line_t;
|
|
|
|
rows_valid : row_per_line_valid_t;
|
|
|
|
acks_pending : unsigned(2 downto 0);
|
|
|
|
inc_acks : std_ulogic;
|
|
|
|
dec_acks : std_ulogic;
|
|
|
|
|
|
|
|
-- Signals to complete (possibly with error)
|
|
|
|
ls_valid : std_ulogic;
|
|
|
|
ls_error : std_ulogic;
|
|
|
|
mmu_done : std_ulogic;
|
|
|
|
mmu_error : std_ulogic;
|
|
|
|
cache_paradox : std_ulogic;
|
|
|
|
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
-- Signal to complete a failed stcx.
|
|
|
|
stcx_fail : std_ulogic;
|
|
|
|
end record;
|
|
|
|
|
|
|
|
signal r1 : reg_stage_1_t;
|
|
|
|
|
|
|
|
signal ev : DcacheEventType;
|
|
|
|
|
|
|
|
-- Reservation information
|
|
|
|
--
|
|
|
|
type reservation_t is record
|
|
|
|
valid : std_ulogic;
|
|
|
|
addr : std_ulogic_vector(63 downto LINE_OFF_BITS);
|
|
|
|
end record;
|
|
|
|
|
|
|
|
signal reservation : reservation_t;
|
|
|
|
|
|
|
|
-- Async signals on incoming request
|
|
|
|
signal req_index : index_t;
|
|
|
|
signal req_hit_way : way_t;
|
|
|
|
signal req_tag : cache_tag_t;
|
|
|
|
signal req_op : op_t;
|
|
|
|
signal req_data : std_ulogic_vector(63 downto 0);
|
|
|
|
signal req_same_tag : std_ulogic;
|
|
|
|
signal req_go : std_ulogic;
|
|
|
|
|
|
|
|
signal early_req_row : row_t;
|
|
|
|
signal early_rd_valid : std_ulogic;
|
|
|
|
|
|
|
|
signal cancel_store : std_ulogic;
|
|
|
|
signal set_rsrv : std_ulogic;
|
|
|
|
signal clear_rsrv : std_ulogic;
|
|
|
|
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
signal r0_valid : std_ulogic;
|
|
|
|
signal r0_stall : std_ulogic;
|
|
|
|
|
|
|
|
signal fwd_same_tag : std_ulogic;
|
dcache: Fix bug with forwarding of stores
We have two stages of forwarding to cover the two cycles of latency
between when something is written to BRAM and when that new data can
be read from BRAM. When the writes to BRAM result from store
instructions, the write may write only some bytes of a row (8 bytes)
and not others, so we have a mask to enable only the written bytes to
be forwarded. However, we only forward written data from either the
first stage of forwarding or the second, not both. So if we have
two stores in succession that write different bytes of the same row,
and then a load from the row, we will only forward the data from the
second store, and miss the data from the first store; thus the load
will get the wrong value.
To fix this, we make the decision on which forward stage to use for
each byte individually. This results in a 4-input multiplexer feeding
r1.data_out, with its inputs being the BRAM, the wishbone, the current
write data, and the 2nd-stage forwarding register. Each byte of the
multiplexer is separately controlled. The code for this multiplexer
is moved to the dcache_fast_hit process since it is used for cache
hits as well as cache misses.
This also simplifies the BRAM code by ensuring that we can use the
same source for the BRAM address and way selection for writes, whether
we are writing store data or cache line refill data from memory.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
4 years ago
|
|
|
signal use_forward_st : std_ulogic;
|
|
|
|
signal use_forward_rl : std_ulogic;
|
|
|
|
signal use_forward2 : std_ulogic;
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
|
|
|
|
-- Cache RAM interface
|
|
|
|
type cache_ram_out_t is array(0 to NUM_WAYS-1) of cache_row_t;
|
|
|
|
signal cache_out : cache_ram_out_t;
|
dcache: Fix bug with forwarding of stores
We have two stages of forwarding to cover the two cycles of latency
between when something is written to BRAM and when that new data can
be read from BRAM. When the writes to BRAM result from store
instructions, the write may write only some bytes of a row (8 bytes)
and not others, so we have a mask to enable only the written bytes to
be forwarded. However, we only forward written data from either the
first stage of forwarding or the second, not both. So if we have
two stores in succession that write different bytes of the same row,
and then a load from the row, we will only forward the data from the
second store, and miss the data from the first store; thus the load
will get the wrong value.
To fix this, we make the decision on which forward stage to use for
each byte individually. This results in a 4-input multiplexer feeding
r1.data_out, with its inputs being the BRAM, the wishbone, the current
write data, and the 2nd-stage forwarding register. Each byte of the
multiplexer is separately controlled. The code for this multiplexer
is moved to the dcache_fast_hit process since it is used for cache
hits as well as cache misses.
This also simplifies the BRAM code by ensuring that we can use the
same source for the BRAM address and way selection for writes, whether
we are writing store data or cache line refill data from memory.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
4 years ago
|
|
|
signal ram_wr_data : cache_row_t;
|
|
|
|
signal ram_wr_select : std_ulogic_vector(ROW_SIZE - 1 downto 0);
|
|
|
|
|
|
|
|
-- PLRU output interface
|
|
|
|
type plru_out_t is array(0 to NUM_LINES-1) of std_ulogic_vector(WAY_BITS-1 downto 0);
|
|
|
|
signal plru_victim : plru_out_t;
|
|
|
|
signal replace_way : way_t;
|
|
|
|
|
|
|
|
-- Wishbone read/write/cache write formatting signals
|
|
|
|
signal bus_sel : std_ulogic_vector(7 downto 0);
|
|
|
|
|
|
|
|
-- TLB signals
|
|
|
|
signal tlb_tag_way : tlb_way_tags_t;
|
|
|
|
signal tlb_pte_way : tlb_way_ptes_t;
|
|
|
|
signal tlb_valid_way : tlb_way_valids_t;
|
|
|
|
signal tlb_req_index : tlb_index_sig_t;
|
|
|
|
signal tlb_read_valid : std_ulogic;
|
|
|
|
signal tlb_hit : std_ulogic;
|
|
|
|
signal tlb_hit_way : tlb_way_sig_t;
|
|
|
|
signal pte : tlb_pte_t;
|
|
|
|
signal ra : real_addr_t;
|
|
|
|
signal valid_ra : std_ulogic;
|
|
|
|
signal perm_attr : perm_attr_t;
|
|
|
|
signal rc_ok : std_ulogic;
|
|
|
|
signal perm_ok : std_ulogic;
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
signal access_ok : std_ulogic;
|
|
|
|
signal tlb_miss : std_ulogic;
|
|
|
|
|
|
|
|
-- TLB PLRU output interface
|
|
|
|
type tlb_plru_out_t is array(tlb_index_t) of std_ulogic_vector(TLB_WAY_BITS-1 downto 0);
|
|
|
|
signal tlb_plru_victim : tlb_plru_out_t;
|
|
|
|
|
|
|
|
signal snoop_tag_set : cache_tags_set_t;
|
|
|
|
signal snoop_valid : std_ulogic;
|
|
|
|
signal snoop_wrtag : cache_tag_t;
|
|
|
|
signal snoop_index : index_t;
|
|
|
|
|
|
|
|
--
|
|
|
|
-- Helper functions to decode incoming requests
|
|
|
|
--
|
|
|
|
|
|
|
|
-- Return the cache line index (tag index) for an address
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
function get_index(addr: std_ulogic_vector) return index_t is
|
|
|
|
begin
|
|
|
|
return unsigned(addr(SET_SIZE_BITS - 1 downto LINE_OFF_BITS));
|
|
|
|
end;
|
|
|
|
|
|
|
|
-- Return the cache row index (data memory) for an address
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
function get_row(addr: std_ulogic_vector) return row_t is
|
|
|
|
begin
|
|
|
|
return unsigned(addr(SET_SIZE_BITS - 1 downto ROW_OFF_BITS));
|
|
|
|
end;
|
|
|
|
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
-- Return the index of a row within a line
|
|
|
|
function get_row_of_line(row: row_t) return row_in_line_t is
|
|
|
|
begin
|
|
|
|
return row(ROW_LINEBITS-1 downto 0);
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
end;
|
|
|
|
|
|
|
|
-- Returns whether this is the last row of a line
|
|
|
|
function is_last_row_wb_addr(addr: wishbone_addr_type; last: row_in_line_t) return boolean is
|
|
|
|
begin
|
|
|
|
return unsigned(addr(LINE_OFF_BITS - ROW_OFF_BITS - 1 downto 0)) = last;
|
|
|
|
end;
|
|
|
|
|
|
|
|
-- Returns whether this is the last row of a line
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
function is_last_row(row: row_t; last: row_in_line_t) return boolean is
|
|
|
|
begin
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
return get_row_of_line(row) = last;
|
|
|
|
end;
|
|
|
|
|
|
|
|
-- Return the address of the next row in the current cache line
|
|
|
|
function next_row_wb_addr(addr: wishbone_addr_type) return std_ulogic_vector is
|
|
|
|
variable row_idx : std_ulogic_vector(ROW_LINEBITS-1 downto 0);
|
|
|
|
variable result : wishbone_addr_type;
|
|
|
|
begin
|
|
|
|
-- Is there no simpler way in VHDL to generate that 3 bits adder ?
|
|
|
|
row_idx := addr(ROW_LINEBITS - 1 downto 0);
|
|
|
|
row_idx := std_ulogic_vector(unsigned(row_idx) + 1);
|
|
|
|
result := addr;
|
|
|
|
result(ROW_LINEBITS - 1 downto 0) := row_idx;
|
|
|
|
return result;
|
|
|
|
end;
|
|
|
|
|
|
|
|
-- Return the next row in the current cache line. We use a dedicated
|
|
|
|
-- function in order to limit the size of the generated adder to be
|
|
|
|
-- only the bits within a cache line (3 bits with default settings)
|
|
|
|
--
|
|
|
|
function next_row(row: row_t) return row_t is
|
|
|
|
variable row_v : std_ulogic_vector(ROW_BITS-1 downto 0);
|
|
|
|
variable row_idx : std_ulogic_vector(ROW_LINEBITS-1 downto 0);
|
|
|
|
variable result : std_ulogic_vector(ROW_BITS-1 downto 0);
|
|
|
|
begin
|
|
|
|
row_v := std_ulogic_vector(row);
|
|
|
|
row_idx := row_v(ROW_LINEBITS-1 downto 0);
|
|
|
|
row_v(ROW_LINEBITS-1 downto 0) := std_ulogic_vector(unsigned(row_idx) + 1);
|
|
|
|
return unsigned(row_v);
|
|
|
|
end;
|
|
|
|
|
|
|
|
-- Get the tag value from the address
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
function get_tag(addr: std_ulogic_vector) return cache_tag_t is
|
|
|
|
begin
|
|
|
|
return addr(REAL_ADDR_BITS - 1 downto SET_SIZE_BITS);
|
|
|
|
end;
|
|
|
|
|
|
|
|
-- Read a tag from a tag memory row
|
|
|
|
function read_tag(way: integer; tagset: cache_tags_set_t) return cache_tag_t is
|
|
|
|
begin
|
dcache: Reduce latencies and improve timing
This implements various improvements to the dcache with the aim of
making it go faster.
- We can now execute operations that don't need to access main memory
(cacheable loads that hit in the cache and TLB operations) as soon
as any previous operation has completed, without waiting for the
state machine to become idle.
- Cache line refills start with the doubleword that is needed to
satisfy the load that initiated them.
- Cacheable loads that miss return their data and complete as soon as
the requested doubleword comes back from memory; they don't wait for
the refill to finish.
- We now have per-doubleword valid bits for the cache line being
refilled, meaning that if a load comes in for a line that is in the
process of being refilled, we can return the data and complete it
within a couple of cycles of the doubleword coming in from memory.
- There is now a bypass path for data being written to the cache RAM
so that we can do a store hit followed immediately by a load hit to
the same doubleword. This also makes the data from a refill
available to load hits one cycle earlier than it would be otherwise.
- Stores complete in the cycle where their wishbone operation is
initiated, without waiting for the wishbone cycle to complete.
- During the wishbone cycle for a store, if another store comes in
that is to the same page, and we don't have a stall from the
wishbone, we can send out the write for the second store in the same
wishbone cycle and without going through the IDLE state first. We
limit it to 7 outstanding writes that have not yet been
acknowledged.
- The cache tag RAM is now read on a clock edge rather than being
combinatorial for reading. Its width is rounded up to a multiple of
8 bits per way so that byte enables can be used for writing
individual tags.
- The cache tag RAM is now written a cycle later than previously, in
order to ease timing.
- Data for a store hit is now written one cycle later than
previously. This eases timing since we don't have to get through
the tag matching and on to the write enable within a single cycle.
The 2-stage bypass path means we can still handle a load hit on
either of the two cycles after the store and return the correct
data. (A load hit 3 or more cycles later will get the correct data
from the BRAM.)
- Operations can sit in r0 while there is an uncompleted operation in
r1. Once the operation in r1 is completed, the operation in r0
spends one cycle in r0 for TLB/cache tag lookup and then gets put
into r1.req. This can happen before r1 gets to the IDLE state.
Some operations can then be completed before r1 gets to the IDLE
state - a load miss to the cache line being refilled, or a store to
the same page as a previous store.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
5 years ago
|
|
|
return tagset(way * TAG_WIDTH + TAG_BITS - 1 downto way * TAG_WIDTH);
|
|
|
|
end;
|
|
|
|
|
|
|
|
-- Read a TLB tag from a TLB tag memory row
|
|
|
|
function read_tlb_tag(way: tlb_way_t; tags: tlb_way_tags_t) return tlb_tag_t is
|
|
|
|
variable j : integer;
|
|
|
|
begin
|
|
|
|
j := way * TLB_EA_TAG_BITS;
|
|
|
|
return tags(j + TLB_EA_TAG_BITS - 1 downto j);
|
|
|
|
end;
|
|
|
|
|
|
|
|
-- Write a TLB tag to a TLB tag memory row
|
|
|
|
procedure write_tlb_tag(way: tlb_way_t; tags: inout tlb_way_tags_t;
|
|
|
|
tag: tlb_tag_t) is
|
|
|
|
variable j : integer;
|
|
|
|
begin
|
|
|
|
j := way * TLB_EA_TAG_BITS;
|
|
|
|
tags(j + TLB_EA_TAG_BITS - 1 downto j) := tag;
|
|
|
|
end;
|
|
|
|
|
|
|
|
-- Read a PTE from a TLB PTE memory row
|
|
|
|
function read_tlb_pte(way: tlb_way_t; ptes: tlb_way_ptes_t) return tlb_pte_t is
|
|
|
|
variable j : integer;
|
|
|
|
begin
|
|
|
|
j := way * TLB_PTE_BITS;
|
|
|
|
return ptes(j + TLB_PTE_BITS - 1 downto j);
|
|
|
|
end;
|
|
|
|
|
|
|
|
procedure write_tlb_pte(way: tlb_way_t; ptes: inout tlb_way_ptes_t; newpte: tlb_pte_t) is
|
|
|
|
variable j : integer;
|
|