1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
use std::mem;
use RegT;

const PAGE_SHIFT: usize = 10;   // 1 kByte page size = (1<<10)
const PAGE_SIZE: usize = (1 << PAGE_SHIFT);
const PAGE_MASK: usize = PAGE_SIZE - 1;
const HEAP_SIZE: usize = 128 * PAGE_SIZE;
const NUM_PAGES: usize = (1 << 16) / PAGE_SIZE;
const NUM_LAYERS: usize = 4;

#[derive(Clone,Copy)]
struct Page {
    pub offset: usize, // offset into heap
    pub writable: bool, // true if the page is writable
    pub mapped: bool, // true if currently mapped
}

impl Page {
    /// return a new, unmapped page
    pub fn new() -> Page {
        Page {
            offset: 0,
            writable: false,
            mapped: false,
        }
    }
    /// map page to chunk of heap memory
    pub fn map(&mut self, offset: usize, writable: bool) {
        self.offset = offset;
        self.writable = writable;
        self.mapped = true;
    }
    /// unmap page
    pub fn unmap(&mut self) {
        self.offset = 0;
        self.writable = false;
        self.mapped = false;
    }
}

/// memory access
///
/// The Memory object wraps access to the Z80's 64 KByte
/// address space. All memory access goes through a
/// page table with a page-size of 1 KByte. The page table
/// mapping allows a very simple implementation of
/// bank-switching, which was a popular way in 8-bit computers to
/// manage more than 64 KBytes of memory.
///
/// ## Memory Layers
///
/// Mapped memory is assigned to 1 out of (currently) 4 layers. If
/// 2 memory chunks are mapped to the same CPU address range on
/// different layers, only the memory assigned to the higher-priority
/// layer is visible to the CPU (layer number 0 has the highest
/// priority and layer number 3 the lowest).
///
/// The layer concept is easier to visualize than to describe:
///
/// ```text
///                 +---------------------------------------+
/// LAYER 3         |333333333333333333333333333333333333333|
///                 +-------+---------------+---------------+
/// LAYER 2                 |222222222222222|
///                         +---------------+       +-------+
/// LAYER 1                                         |1111111|
///                               +---------+       +-------+
/// LAYER 0                       |000000000|
///                               +---------+
///                 +-------+-----+---------+-------+-------+
/// CPU VISIBLE:    |3333333|22222|000000000|3333333|1111111|
///                 +-------+-----+---------+-------+-------+
/// ```
///
/// ## The Heap
///
/// The Memory class will never keep references to external memory, instead it
/// comes with it's own few hundred KBytes of embedded memory which is used
/// as 'heap'. A single memory page maps 1 KByte of memory from the Z80
/// address range to 1 KByte of memory somewhere on the embedded heap.
///
/// ## Mapping Memory
///
/// This 'maps' a chunk of memory in Z80 address range to a chunk of memory
/// of the same size in the embedded heap on one of the four memory layers.
///
/// The simple form performs the memory mapping but does not copy
/// any data into the mapped memory region:
///
/// ```
/// use rz80::Memory;
/// let mut mem = Memory::new();
///
/// // map 32 KByte at heap address 0x08000 to CPU addr 0x0000
/// // on layer 0 as writable:
/// mem.map(0, 0x08000, 0x0000, true, 32*1024);
///
/// // map another 32 KByte at heap address 0x10000 to CPU addr 0x8000
/// // on layer 1 as read-only:
/// mem.map(1, 0x10000, 0x8000, false, 32*1024);
/// ```
///
/// The method **map_bytes()** performs a memory mapping as above,
/// but also copies a range of bytes into the mapped memory. This is
/// useful to initialize the memory with a ROM dump.
///
/// ```
/// use rz80::Memory;
/// let mut mem = Memory::new();
/// let rom = [0xFFu8; 4096];
///
/// // assume that 'rom' is a system ROM dump, and map it as read-only to CPU address 0xF000
/// mem.map_bytes(0, 0x00000, 0xF000, false, &rom);
/// ```
///
/// ## Reading and Writing Memory
///
/// The most common operations are reading and writing 8- and 16-bit unsigned values:
///
/// ```
/// use rz80::Memory;
/// // new_64k() is a shortcut method to get a 64k RAM mapping
/// let mut mem = Memory::new_64k();
///
/// // write and read unsigned bytes
/// mem.w8(0x0100, 0x23);
/// let b = mem.r8(0x0100);
/// assert!(b == 0x23);
///
/// // ...same with 16-bit unsigned words
/// mem.w16(0x0200, 0x1234);
/// let w = mem.r16(0x0200);
/// assert!(w == 0x1234);
///
/// // memory is little endian and wraps around at 64k
/// mem.w16(0xFFFF, 0x1122);
/// let l = mem.r8(0xFFFF);
/// let h = mem.r8(0x0000);
/// assert!(l == 0x22);
/// assert!(h == 0x11);
/// ```
/// There is a special method to read an 8-bit signed value. This exists for the
/// Z80's indexed and relative addressing instructions:
///
/// ```
/// use rz80::Memory;
/// let mut mem = Memory::new_64k();
///
/// mem.w8(0x0100, 0xF0);
/// let s = mem.rs8(0x0100);
/// assert!(s == -16);
/// ```
///
/// Trying to write to ROM areas will silently fail, unless the w8f() method is used:
///
/// ```
/// use rz80::Memory;
/// let mut mem = Memory::new();
/// let rom = [0x11u8; 1024];
/// mem.map_bytes(0, 0x00000, 0x0000, false, &rom);
/// let b0 = mem.r8(0x0100);
/// assert!(b0 == 0x11);
///
/// // try to write read-only memory
/// mem.w8(0x0100, 0x33);
/// let b1 = mem.r8(0x0100);
/// assert!(b1 == 0x11);
///
/// // force-write to read-only memory
/// mem.w8f(0x0100, 0x33);
/// let b2 = mem.r8(0x0100);
/// assert!(b2 == 0x33);
/// ```
///
/// You can write a whole chunk of memory, ignoring write protection, this is useful
/// to load program dumps into emulator memory:
///
/// ```
/// use rz80::Memory;
/// let mut mem = Memory::new_64k();
/// let dump : &[u8] = &[1, 2, 3];
/// mem.write(0x0100, dump);
/// assert!(mem.r8(0x0100) == 1 && mem.r8(0x0101) == 2 && mem.r8(0x0102) == 3);
///
/// ```
///
pub struct Memory {
    /// currently CPU-visible pages
    pages: [Page; NUM_PAGES],
    /// currently mapped layers
    layers: [[Page; NUM_PAGES]; NUM_LAYERS],
    /// 'host' memory
    pub heap: [u8; HEAP_SIZE],
}

impl Memory {
    /// return new, unmapped memory object
    pub fn new() -> Memory {
        Memory {
            pages: [Page::new(); NUM_PAGES],
            layers: [[Page::new(); NUM_PAGES]; NUM_LAYERS],
            heap: [0; HEAP_SIZE],
        }
    }

    /// return new memory object with 64 kByte mapped, writable memory (for testing)
    pub fn new_64k() -> Memory {
        let mut mem = Memory::new();
        mem.map(0, 0, 0, true, (1 << 16));
        mem
    }

    /// map a chunk of uninitialized heap memory to CPU-mapped memory
    pub fn map(&mut self,
               layer: usize,
               heap_offset: usize,
               addr: usize,
               writable: bool,
               size: usize) {
        assert!((size & PAGE_MASK) == 0);
        assert!((addr & PAGE_MASK) == 0);
        let num = size >> PAGE_SHIFT;
        for i in 0..num {
            let map_offset = i * PAGE_SIZE;
            let page_index = ((addr + map_offset) & 0xFFFF) >> PAGE_SHIFT;
            let page = &mut self.layers[layer][page_index];
            page.map(heap_offset + map_offset, writable);
        }
        self.update_mapping();
    }

    /// map a chunk of heap memory, and initialize it
    pub fn map_bytes(&mut self,
                     layer: usize,
                     heap_offset: usize,
                     addr: usize,
                     writable: bool,
                     content: &[u8]) {
        assert!((addr & PAGE_MASK) == 0);
        let size = mem::size_of_val(content);
        assert!((size & PAGE_MASK) == 0);
        self.map(layer, heap_offset, addr, writable, size);
        let dst = &mut self.heap[heap_offset..heap_offset + size];
        dst.clone_from_slice(content);
    }

    /// unmap a chunk heap memory
    pub fn unmap(&mut self, layer: usize, size: usize, addr: usize) {
        assert!((size & PAGE_MASK) == 0);
        assert!((addr & PAGE_MASK) == 0);
        let num = size >> PAGE_SHIFT;
        for i in 0..num {
            let map_offset = i * PAGE_SIZE;
            let page_index = ((addr + map_offset) & 0xFFFF) >> PAGE_SHIFT;
            let page = &mut self.layers[layer][page_index];
            page.unmap();
        }
        self.update_mapping();
    }

    /// unmap all pages in a layer
    pub fn unmap_layer(&mut self, layer: usize) {
        for page in self.layers[layer].iter_mut() {
            page.unmap();
        }
        self.update_mapping();
    }

    /// unmap all pages in all layers
    pub fn unmap_all(&mut self) {
        for layer in self.layers.iter_mut() {
            for page in layer.iter_mut() {
                page.unmap();
            }
        }
        self.update_mapping();
    }

    /// private method to update internal CPU-visible mapping from mapped layers
    fn update_mapping(&mut self) {
        // for each cpu-visible page, find the highest-priority layer
        // which maps this memory range and copy it into the
        // cpu-visible page
        for page_index in 0..NUM_PAGES {
            let mut layer_page: Option<&Page> = None;
            for layer_index in 0..NUM_LAYERS {
                if self.layers[layer_index][page_index].mapped {
                    layer_page = Some(&self.layers[layer_index][page_index]);
                    break;
                }
            }
            match layer_page {
                Some(page) => self.pages[page_index] = *page,
                None => self.pages[page_index].unmap(),
            }
        }
    }

    /// read unsigned byte from 16-bit address
    #[inline(always)]
    pub fn r8(&self, addr: RegT) -> RegT {
        let uaddr = (addr & 0xFFFF) as usize;
        let page = &self.pages[uaddr >> PAGE_SHIFT];
        if page.mapped {
            let heap_offset = page.offset + (uaddr & PAGE_MASK);
            self.heap[heap_offset] as RegT
        } else {
            0xFF
        }
    }

    /// read signed byte from 16-bit address
    #[inline(always)]
    pub fn rs8(&self, addr: RegT) -> RegT {
        let uaddr = (addr & 0xFFFF) as usize;
        let page = &self.pages[uaddr >> PAGE_SHIFT];
        if page.mapped {
            let heap_offset = page.offset + (uaddr & PAGE_MASK);
            self.heap[heap_offset] as i8 as RegT
        } else {
            0xFF
        }
    }

    /// write unsigned byte to 16-bit address
    #[inline(always)]
    pub fn w8(&mut self, addr: RegT, val: RegT) {
        let uaddr = (addr & 0xFFFF) as usize;
        let page = &self.pages[uaddr >> PAGE_SHIFT];
        if page.mapped && page.writable {
            let heap_offset = page.offset + (uaddr & PAGE_MASK);
            self.heap[heap_offset] = val as u8;
        }
    }

    /// write unsigned byte, ignore write-protection flag
    pub fn w8f(&mut self, addr: RegT, val: RegT) {
        let uaddr = (addr & 0xFFFF) as usize;
        let page = &self.pages[uaddr >> PAGE_SHIFT];
        if page.mapped {
            let heap_offset = page.offset + (uaddr & PAGE_MASK);
            self.heap[heap_offset] = val as u8;
        }
    }

    /// read unsigned word from 16-bit address
    #[inline(always)]
    pub fn r16(&self, addr: RegT) -> RegT {
        let l = self.r8(addr);
        let h = self.r8(addr + 1);
        h << 8 | l
    }

    /// write unsigned word to 16-bit address
    #[inline(always)]
    pub fn w16(&mut self, addr: RegT, val: RegT) {
        let l = val & 0xff;
        let h = (val >> 8) & 0xff;
        self.w8(addr, l);
        self.w8(addr + 1, h);
    }

    /// write a whole chunk of memory, ignore write-protection
    pub fn write(&mut self, addr: RegT, data: &[u8]) {
        let mut offset = 0;
        for b in data {
            self.w8f(addr + offset, *b as RegT);
            offset += 1;
        }
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn mem_readwrite() {
        let mut mem = Memory::new_64k();
        mem.w8(0x1234, 0x12);
        assert!(mem.r8(0x1234) == 0x12);

        mem.w8(0x2345, 0x32);
        assert!(mem.r8(0x2345) == 0x32);

        mem.w16(0x1000, 0x1234);
        assert!(mem.r16(0x1000) == 0x1234);
        assert!(mem.r8(0x1000) == 0x34);
        assert!(mem.r8(0x1001) == 0x12);

        mem.w16(0xFFFF, 0x2233);
        assert!(mem.r16(0xFFFF) == 0x2233);
        assert!(mem.r8(0xFFFF) == 0x33);
        assert!(mem.r8(0x0000) == 0x22);
    }

    #[test]
    fn mem_map() {
        let mut mem = Memory::new();
        const SIZE: usize = 0x4000;  // 16k
        let x11 = [0x11u8; SIZE];
        let x22 = [0x22u8; SIZE];
        let x33 = [0x33u8; SIZE];
        let x44 = [0x44u8; SIZE];
        mem.map_bytes(0, 0x0000, 0x0000, true, &x11);
        mem.map_bytes(0, 0x4000, 0x4000, true, &x22);
        mem.map_bytes(0, 0x8000, 0x8000, true, &x33);
        mem.map_bytes(0, 0xC000, 0xC000, false, &x44);
        assert!(mem.r8(0x0000) == 0x11);
        assert!(mem.r8(0x4000) == 0x22);
        assert!(mem.r8(0x8000) == 0x33);
        assert!(mem.r8(0xC000) == 0x44);
        assert!(mem.r8(0x3FFF) == 0x11);
        assert!(mem.r8(0x7FFF) == 0x22);
        assert!(mem.r8(0xBFFF) == 0x33);
        assert!(mem.r8(0xFFFF) == 0x44);
        assert!(mem.r16(0x3FFF) == 0x2211);
        assert!(mem.r16(0x7FFF) == 0x3322);
        assert!(mem.r16(0xBFFF) == 0x4433);
        assert!(mem.r16(0xFFFF) == 0x1144);
        mem.w16(0xBFFF, 0x1234);
        assert!(mem.r8(0xBFFF) == 0x34);
        assert!(mem.r8(0xC000) == 0x44);
        mem.unmap(0, 0x4000, SIZE);
        assert!(mem.r8(0x4000) == 0xFF);
        assert!(mem.r8(0x7FFF) == 0xFF);
        assert!(mem.r8(0x3FFF) == 0x11);
        assert!(mem.r8(0x8000) == 0x33);
        mem.w8(0x4000, 0x55);
        assert!(mem.r8(0x4000) == 0xFF);
        mem.w8(0x0000, 0x66);
        assert!(mem.r8(0x0000) == 0x66);
    }

    #[test]
    fn mem_layers() {
        let mut mem = Memory::new();
        const SIZE: usize = 0x8000;  // 32k
        let x11 = [0x11u8; SIZE];
        let x22 = [0x22u8; SIZE];
        let x33 = [0x33u8; SIZE];
        let x44 = [0x44u8; SIZE];
        mem.map_bytes(3, 0x00000, 0x0000, true, &x11);
        mem.map_bytes(2, 0x08000, 0x4000, true, &x22);
        mem.map_bytes(1, 0x10000, 0x8000, true, &x33);
        mem.map_bytes(0, 0x18000, 0xC000, true, &x44);
        assert!(mem.r8(0x0000) == 0x44);    // layer 0 is wrapping around at 0xFFFF
        assert!(mem.r8(0x4000) == 0x22);
        assert!(mem.r8(0x8000) == 0x33);
        assert!(mem.r8(0xC000) == 0x44);
        mem.unmap(0, 0xC000, SIZE);
        assert!(mem.r8(0x0000) == 0x11);
        assert!(mem.r8(0x4000) == 0x22);
        assert!(mem.r8(0x8000) == 0x33);
        assert!(mem.r8(0xC000) == 0x33);
    }
}