support aligning allocations

This commit is contained in:
playX18 2025-02-13 17:24:08 +07:00
parent 9e77c6a542
commit e20870c604
6 changed files with 139 additions and 25 deletions

View file

@ -113,7 +113,7 @@ impl NodeRef {
let node = MemoryManager::<BenchVM>::allocate( let node = MemoryManager::<BenchVM>::allocate(
thread, thread,
size_of::<Node>(), size_of::<Node>(),
16, 32,
&METADATA, &METADATA,
AllocationSemantics::Default, AllocationSemantics::Default,
); );

View file

@ -1,4 +1,7 @@
use std::{marker::PhantomData, sync::atomic::{AtomicBool, AtomicUsize}}; use std::{
marker::PhantomData,
sync::atomic::{AtomicBool, AtomicUsize},
};
use mm::{aslr::aslr_vm_layout, traits::SlotExtra, MemoryManager}; use mm::{aslr::aslr_vm_layout, traits::SlotExtra, MemoryManager};
use mmtk::{MMTKBuilder, MMTK}; use mmtk::{MMTKBuilder, MMTK};
@ -8,12 +11,12 @@ pub mod machine_context;
pub mod mm; pub mod mm;
pub mod object_model; pub mod object_model;
pub mod options; pub mod options;
pub mod platform;
pub mod semaphore; pub mod semaphore;
pub mod sync; pub mod sync;
pub mod threading; pub mod threading;
pub mod platform;
#[cfg(feature="uncooperative")] #[cfg(feature = "uncooperative")]
pub mod bdwgc_shim; pub mod bdwgc_shim;
pub trait VirtualMachine: Sized + 'static + Send + Sync { pub trait VirtualMachine: Sized + 'static + Send + Sync {
@ -22,7 +25,8 @@ pub trait VirtualMachine: Sized + 'static + Send + Sync {
type Metadata: object_model::metadata::Metadata<Self>; type Metadata: object_model::metadata::Metadata<Self>;
type Slot: SlotExtra; type Slot: SlotExtra;
const MAX_ALIGNMENT: usize = 16; const ALIGNMENT_VALUE: u32 = 0xdead_beef;
const MAX_ALIGNMENT: usize = 32;
const MIN_ALIGNMENT: usize = 8; const MIN_ALIGNMENT: usize = 8;
/// Does this VM use conservative tracing? If `true` then VM can /// Does this VM use conservative tracing? If `true` then VM can

View file

@ -10,7 +10,10 @@ use crate::{
use easy_bitfield::{AtomicBitfieldContainer, ToBitfield}; use easy_bitfield::{AtomicBitfieldContainer, ToBitfield};
use mmtk::{ use mmtk::{
util::{ util::{
alloc::{AllocatorSelector, BumpAllocator, ImmixAllocator}, conversions::raw_align_up, metadata::side_metadata::GLOBAL_SIDE_METADATA_VM_BASE_ADDRESS, VMMutatorThread alloc::{AllocatorSelector, BumpAllocator, ImmixAllocator},
conversions::raw_align_up,
metadata::side_metadata::GLOBAL_SIDE_METADATA_VM_BASE_ADDRESS,
VMMutatorThread,
}, },
vm::{ vm::{
slot::{Slot, UnimplementedMemorySlice}, slot::{Slot, UnimplementedMemorySlice},
@ -44,6 +47,7 @@ impl<VM: VirtualMachine> VMBinding for MemoryManager<VM> {
} }
pub mod active_plan; pub mod active_plan;
pub mod align;
pub mod aslr; pub mod aslr;
pub mod collection; pub mod collection;
pub mod conservative_roots; pub mod conservative_roots;
@ -137,7 +141,9 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
AllocationSemantics::Default => match thread.alloc_fastpath() { AllocationSemantics::Default => match thread.alloc_fastpath() {
AllocFastPath::TLAB => unsafe { AllocFastPath::TLAB => unsafe {
let tlab = thread.tlab.get().as_mut().unwrap(); let tlab = thread.tlab.get().as_mut().unwrap();
let object_start = tlab.allocate(size, alignment); let object_start =
tlab.allocate::<VM>(size, alignment, OBJECT_REF_OFFSET as usize);
if !object_start.is_zero() { if !object_start.is_zero() {
object_start.store(HeapObjectHeader::<VM> { object_start.store(HeapObjectHeader::<VM> {
metadata: AtomicBitfieldContainer::new(metadata.to_bitfield()), metadata: AtomicBitfieldContainer::new(metadata.to_bitfield()),
@ -167,14 +173,13 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
alignment: usize, alignment: usize,
metadata: VM::Metadata, metadata: VM::Metadata,
) -> VMKitObject { ) -> VMKitObject {
unsafe { unsafe {
Self::flush_tlab(thread); Self::flush_tlab(thread);
let object_start = mmtk::memory_manager::alloc( let object_start = mmtk::memory_manager::alloc(
thread.mutator(), thread.mutator(),
size, size,
alignment, alignment,
0, OBJECT_REF_OFFSET as usize,
AllocationSemantics::Los, AllocationSemantics::Los,
); );
@ -186,7 +191,12 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
//Self::set_vo_bit(object); //Self::set_vo_bit(object);
Self::refill_tlab(thread); Self::refill_tlab(thread);
mmtk::memory_manager::post_alloc(thread.mutator(), object.as_object_unchecked(), size, AllocationSemantics::Los); mmtk::memory_manager::post_alloc(
thread.mutator(),
object.as_object_unchecked(),
size,
AllocationSemantics::Los,
);
object object
} }
} }
@ -206,7 +216,7 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
thread.mutator(), thread.mutator(),
size, size,
alignment, alignment,
0, OBJECT_REF_OFFSET as usize,
AllocationSemantics::NonMoving, AllocationSemantics::NonMoving,
); );
@ -236,7 +246,7 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
thread.mutator(), thread.mutator(),
size, size,
alignment, alignment,
0, OBJECT_REF_OFFSET as usize,
AllocationSemantics::Immortal, AllocationSemantics::Immortal,
); );
@ -264,8 +274,13 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
) -> VMKitObject { ) -> VMKitObject {
unsafe { unsafe {
Self::flush_tlab(thread); Self::flush_tlab(thread);
let object_start = let object_start = mmtk::memory_manager::alloc_slow(
mmtk::memory_manager::alloc_slow(thread.mutator(), size, alignment, 0, semantics); thread.mutator(),
size,
alignment,
OBJECT_REF_OFFSET as usize,
semantics,
);
object_start.store(HeapObjectHeader::<VM> { object_start.store(HeapObjectHeader::<VM> {
metadata: AtomicBitfieldContainer::new(metadata.to_bitfield()), metadata: AtomicBitfieldContainer::new(metadata.to_bitfield()),
@ -393,7 +408,6 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
let shift = (addr >> 3) & 0b111; let shift = (addr >> 3) & 0b111;
let byte_val = meta_addr.load::<u8>(); let byte_val = meta_addr.load::<u8>();
if (byte_val >> shift) & 1 == 1 { if (byte_val >> shift) & 1 == 1 {
thread.mutator().barrier().object_reference_write_slow( thread.mutator().barrier().object_reference_write_slow(
src.as_object_unchecked(), src.as_object_unchecked(),
slot, slot,
@ -452,17 +466,26 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
Self::object_reference_write_post(thread, src, slot, target); Self::object_reference_write_post(thread, src, slot, target);
} }
pub fn disable_gc() { pub fn disable_gc() {
VM::get().vmkit().gc_disabled_depth.fetch_add(1, atomic::Ordering::SeqCst); VM::get()
.vmkit()
.gc_disabled_depth
.fetch_add(1, atomic::Ordering::SeqCst);
} }
pub fn enable_gc() { pub fn enable_gc() {
VM::get().vmkit().gc_disabled_depth.fetch_sub(1, atomic::Ordering::SeqCst); VM::get()
.vmkit()
.gc_disabled_depth
.fetch_sub(1, atomic::Ordering::SeqCst);
} }
pub fn is_gc_enabled() -> bool { pub fn is_gc_enabled() -> bool {
VM::get().vmkit().gc_disabled_depth.load(atomic::Ordering::SeqCst) == 0 VM::get()
.vmkit()
.gc_disabled_depth
.load(atomic::Ordering::SeqCst)
== 0
} }
} }

79
vmkit/src/mm/align.rs Normal file
View file

@ -0,0 +1,79 @@
use mmtk::util::Address;
use crate::VirtualMachine;
pub const fn round_down(x: isize, align: isize) -> isize {
x & -align
}
pub const fn round_up(x: isize, align: isize, offset: isize) -> isize {
round_down(x + align - 1 + offset, align) - offset
}
pub const fn is_aligned(x: isize, alignment: isize, offset: isize) -> bool {
(x & (alignment - 1)) == offset
}
#[inline(always)]
pub fn align_allocation_inner<VM: VirtualMachine>(
region: Address,
alignment: usize,
offset: usize,
_known_alignment: usize,
fill: bool,
) -> Address {
// No alignment ever required.
/*if alignment <= known_alignment || VM::MAX_ALIGNMENT <= VM::MIN_ALIGNMENT {
return region;
}*/
// May require alignment
let mask = (alignment - 1) as isize;
let neg_off: isize = -(offset as isize);
let delta = neg_off.wrapping_sub(region.as_usize() as isize) & mask;
if fill && VM::ALIGNMENT_VALUE != 0 {
fill_alignment_gap::<VM>(region, region + delta as usize);
}
let x = region + delta;
x
}
/// Fill the specified region with the alignment value.
pub fn fill_alignment_gap<VM: VirtualMachine>(immut_start: Address, end: Address) {
let mut start = immut_start;
if VM::MAX_ALIGNMENT - VM::MIN_ALIGNMENT == size_of::<u32>() {
// At most a single hole
if end - start != 0 {
unsafe {
start.store(VM::ALIGNMENT_VALUE);
}
}
} else {
while start < end {
unsafe {
start.store(VM::ALIGNMENT_VALUE);
}
start += size_of::<u32>();
}
}
}
pub fn align_allocation_no_fill<VM: VirtualMachine>(
region: Address,
alignment: usize,
offset: usize,
) -> Address {
align_allocation_inner::<VM>(region, alignment, offset, VM::MIN_ALIGNMENT, false)
}
pub fn align_allocation<VM: VirtualMachine>(
region: Address,
alignment: usize,
offset: usize,
) -> Address {
align_allocation_inner::<VM>(region, alignment, offset, VM::MIN_ALIGNMENT, true)
}

View file

@ -1,5 +1,9 @@
use mmtk::util::{conversions::raw_align_up, Address}; use mmtk::util::{conversions::raw_align_up, Address};
use crate::VirtualMachine;
use super::align::align_allocation_no_fill;
/// Thread-local allocation buffer. /// Thread-local allocation buffer.
pub struct TLAB { pub struct TLAB {
pub cursor: Address, pub cursor: Address,
@ -14,13 +18,18 @@ impl TLAB {
} }
} }
pub fn allocate(&mut self, size: usize, alignment: usize) -> Address { pub fn allocate<VM: VirtualMachine>(
let aligned_size = raw_align_up(size, alignment); &mut self,
let result = self.cursor.align_up(alignment); size: usize,
if result + aligned_size > self.limit { alignment: usize,
offset: usize,
) -> Address {
let size = raw_align_up(size, alignment);
let result = align_allocation_no_fill::<VM>(self.cursor, alignment, offset);
if result + size > self.limit {
return Address::ZERO; return Address::ZERO;
} else { } else {
self.cursor = result.add(aligned_size); self.cursor = result.add(size);
return result; return result;
} }
} }

View file

@ -24,7 +24,6 @@ use crate::{
tlab::TLAB, tlab::TLAB,
AllocFastPath, MemoryManager, AllocFastPath, MemoryManager,
}, },
object_model::compression::CompressedOps,
semaphore::Semaphore, semaphore::Semaphore,
sync::{Monitor, MonitorGuard}, sync::{Monitor, MonitorGuard},
VirtualMachine, VirtualMachine,