allow VMs to override forwarding; address based hashing is optional

This commit is contained in:
playX18 2025-02-15 11:58:00 +07:00
parent 8a7828299a
commit 5cfc38b6d6
9 changed files with 132 additions and 29 deletions

View file

@ -23,15 +23,16 @@ sysinfo = "0.33.1"
[features]
default = ["uncooperative"]
default = ["cooperative", "address_based_hashing", "object_pinning"]
vmside_forwarding = []
object_pinning = ["mmtk/object_pinning"]
address_based_hashing = []
uncooperative = ["cooperative", "mmtk/immix_non_moving", "mmtk/immix_zero_on_release"]
# VMKit is built for use in cooperative runtime. Such runtime
# would be able to use write barriers and safepoints. Such environment
# must also provide precise object layout (stack can be uncooperative).
cooperative = ["mmtk/vo_bit", "mmtk/is_mmtk_object", "mmtk/vo_bit_access"]
cooperative = ["mmtk/vo_bit", "mmtk/is_mmtk_object", "mmtk/vo_bit_access", "mmtk/immix_non_moving"]
# VMKit is built for use in full-precise runtime. Such runtime
# would be able to use precise write barriers and safepoints, object
# layout is fully precise.

View file

@ -68,6 +68,12 @@ impl VirtualMachine for BenchVM {
type Slot = SimpleSlot;
type ThreadContext = ThreadBenchContext;
type MemorySlice = UnimplementedMemorySlice;
#[cfg(feature="vmside_forwarding")]
const LOCAL_FORWARDING_POINTER_SPEC: mmtk::vm::VMLocalForwardingPointerSpec = mmtk::vm::VMLocalForwardingPointerSpec::in_header(0);
#[cfg(feature="vmside_forwarding")]
const LOCAL_FORWARDING_BITS_SPEC: mmtk::vm::VMLocalForwardingBitsSpec = mmtk::vm::VMLocalForwardingBitsSpec::side_first();
fn get() -> &'static Self {
VM.get().unwrap()
}

View file

@ -156,6 +156,11 @@ impl VirtualMachine for BDWGC {
type Slot = SimpleSlot;
type MemorySlice = UnimplementedMemorySlice<Self::Slot>;
#[cfg(feature="vmside_forwarding")]
const LOCAL_FORWARDING_BITS_SPEC: mmtk::vm::VMLocalForwardingBitsSpec = mmtk::vm::VMLocalForwardingBitsSpec::side_first();
#[cfg(feature="vmside_forwarding")]
const LOCAL_FORWARDING_POINTER_SPEC: mmtk::vm::VMLocalForwardingPointerSpec = mmtk::vm::VMLocalForwardingPointerSpec::in_header(0);
fn get() -> &'static Self {
BDWGC_VM.get().expect("GC is not initialized")
}

View file

@ -26,6 +26,14 @@ pub trait VirtualMachine: Sized + 'static + Send + Sync {
type Metadata: object_model::metadata::Metadata<Self>;
type Slot: SlotExtra;
type MemorySlice: MemorySlice<SlotType = Self::Slot>;
#[cfg(feature="vmside_forwarding")]
const LOCAL_FORWARDING_POINTER_SPEC: mmtk::vm::VMLocalForwardingPointerSpec;
#[cfg(feature="vmside_forwarding")]
const LOCAL_FORWARDING_BITS_SPEC: mmtk::vm::VMLocalForwardingBitsSpec;
const ALIGNMENT_VALUE: u32 = 0xdead_beef;
const MAX_ALIGNMENT: usize = 32;
const MIN_ALIGNMENT: usize = 8;
@ -146,6 +154,18 @@ pub trait VirtualMachine: Sized + 'static + Send + Sync {
let _ = tls;
unimplemented!()
}
/// Compute the hashcode of an object. When feature `address_based_hashing` is enabled,
/// this function is ignored. Otherwise VMKit calls into this function to compute hashcode of an object.
///
/// In case VM uses moving plans it's strongly advised to *not* compute hashcode based on address
/// as the object's address may change during GC, instead store hashcode as a field or use some bits
/// in header to store the hashcode. This function must be fast as it's called for each `VMKitObject::hashcode()`
/// invocation.
fn compute_hashcode(object: VMKitObject) -> usize {
let _ = object;
unimplemented!("VM currently does not support hashcode computation, override this method to do so");
}
}
pub struct VMKit<VM: VirtualMachine> {

View file

@ -46,9 +46,13 @@ impl ConservativeRoots {
///
/// `start` and `end` must be valid addresses.
pub unsafe fn add_span(&mut self, mut start: Address, mut end: Address) {
assert!(!start.is_zero() && !end.is_zero(), "provided NULL address to ConservativeRoots::add_span");
if start > end {
std::mem::swap(&mut start, &mut end);
}
let start = start.align_down(size_of::<Address>());
let end = end.align_up(size_of::<Address>());
let mut current = start;
while current < end {
let addr = current.load::<Address>();

View file

@ -1,22 +1,22 @@
use std::marker::PhantomData;
use crate::{mm::MemoryManager, VirtualMachine};
use easy_bitfield::BitFieldTrait;
use header::{HashState, HashStateField, HASHCODE_OFFSET, OBJECT_HEADER_OFFSET, OBJECT_REF_OFFSET};
use header::{HashState, HASHCODE_OFFSET, OBJECT_HEADER_OFFSET, OBJECT_REF_OFFSET};
use mmtk::{
util::{alloc::fill_alignment_gap, constants::LOG_BYTES_IN_ADDRESS, ObjectReference},
vm::*,
};
use object::{MoveTarget, VMKitObject};
pub mod compression;
pub mod finalization;
pub mod header;
pub mod metadata;
pub mod object;
pub mod finalization;
pub mod compression;
pub struct VMKitObjectModel<VM: VirtualMachine>(PhantomData<VM>);
/*
pub const LOGGING_SIDE_METADATA_SPEC: VMGlobalLogBitSpec = VMGlobalLogBitSpec::side_first();
pub const FORWARDING_POINTER_METADATA_SPEC: VMLocalForwardingPointerSpec =
VMLocalForwardingPointerSpec::in_header(0);
@ -24,15 +24,47 @@ pub const FORWARDING_BITS_METADATA_SPEC: VMLocalForwardingBitsSpec =
VMLocalForwardingBitsSpec::in_header(HashStateField::NEXT_BIT as _);
pub const MARKING_METADATA_SPEC: VMLocalMarkBitSpec = VMLocalMarkBitSpec::side_first();
pub const LOS_METADATA_SPEC: VMLocalLOSMarkNurserySpec =
VMLocalLOSMarkNurserySpec::in_header(HashStateField::NEXT_BIT as _);
VMLocalLOSMarkNurserySpec::in_header(HashStateField::NEXT_BIT as _);*/
pub const LOGGING_SIDE_METADATA_SPEC: VMGlobalLogBitSpec = VMGlobalLogBitSpec::side_first();
/// Overwrite first field of the object header
pub const LOCAL_FORWARDING_POINTER_SPEC: VMLocalForwardingPointerSpec =
VMLocalForwardingPointerSpec::in_header(OBJECT_REF_OFFSET);
impl<VM: VirtualMachine> ObjectModel<MemoryManager<VM>> for VMKitObjectModel<VM> {
const GLOBAL_LOG_BIT_SPEC: mmtk::vm::VMGlobalLogBitSpec = LOGGING_SIDE_METADATA_SPEC;
/*const GLOBAL_LOG_BIT_SPEC: mmtk::vm::VMGlobalLogBitSpec = LOGGING_SIDE_METADATA_SPEC;
const LOCAL_FORWARDING_POINTER_SPEC: mmtk::vm::VMLocalForwardingPointerSpec =
FORWARDING_POINTER_METADATA_SPEC;
const LOCAL_FORWARDING_BITS_SPEC: mmtk::vm::VMLocalForwardingBitsSpec =
FORWARDING_BITS_METADATA_SPEC;
const LOCAL_MARK_BIT_SPEC: mmtk::vm::VMLocalMarkBitSpec = MARKING_METADATA_SPEC;
const LOCAL_LOS_MARK_NURSERY_SPEC: mmtk::vm::VMLocalLOSMarkNurserySpec = LOS_METADATA_SPEC;
const LOCAL_LOS_MARK_NURSERY_SPEC: mmtk::vm::VMLocalLOSMarkNurserySpec = LOS_METADATA_SPEC;*/
const GLOBAL_LOG_BIT_SPEC: VMGlobalLogBitSpec = VMGlobalLogBitSpec::side_first();
#[cfg(not(feature = "vmside_forwarding"))]
const LOCAL_FORWARDING_BITS_SPEC: VMLocalForwardingBitsSpec =
VMLocalForwardingBitsSpec::side_first();
#[cfg(not(feature = "vmside_forwarding"))]
const LOCAL_FORWARDING_POINTER_SPEC: VMLocalForwardingPointerSpec =
VMLocalForwardingPointerSpec::in_header(0);
#[cfg(feature = "vmside_forwarding")]
const LOCAL_FORWARDING_BITS_SPEC: VMLocalForwardingBitsSpec = VM::LOCAL_FORWARDING_BITS_SPEC;
#[cfg(feature = "vmside_forwarding")]
const LOCAL_FORWARDING_POINTER_SPEC: VMLocalForwardingPointerSpec =
VM::LOCAL_FORWARDING_POINTER_SPEC;
const LOCAL_MARK_BIT_SPEC: VMLocalMarkBitSpec =
if Self::LOCAL_FORWARDING_BITS_SPEC.as_spec().is_on_side() {
VMLocalMarkBitSpec::side_after(&Self::LOCAL_FORWARDING_BITS_SPEC.as_spec())
} else {
VMLocalMarkBitSpec::side_first()
};
const LOCAL_LOS_MARK_NURSERY_SPEC: VMLocalLOSMarkNurserySpec =
VMLocalLOSMarkNurserySpec::side_after(&Self::LOCAL_MARK_BIT_SPEC.as_spec());
const LOCAL_PINNING_BIT_SPEC: VMLocalPinningBitSpec =
VMLocalPinningBitSpec::side_after(&Self::LOCAL_LOS_MARK_NURSERY_SPEC.as_spec());
const OBJECT_REF_OFFSET_LOWER_BOUND: isize = OBJECT_REF_OFFSET;
const UNIFIED_OBJECT_REFERENCE_ADDRESS: bool = false;
@ -92,11 +124,8 @@ impl<VM: VirtualMachine> ObjectModel<MemoryManager<VM>> for VMKitObjectModel<VM>
let res_addr = to + OBJECT_REF_OFFSET + vmkit_from.hashcode_overhead::<VM, true>();
debug_assert!(!res_addr.is_zero());
// SAFETY: we just checked that the address is not zero
unsafe {
ObjectReference::from_raw_address_unchecked(res_addr)
unsafe { ObjectReference::from_raw_address_unchecked(res_addr) }
}
}
fn get_type_descriptor(_reference: mmtk::util::ObjectReference) -> &'static [i8] {
unreachable!()
@ -118,7 +147,6 @@ impl<VM: VirtualMachine> ObjectModel<MemoryManager<VM>> for VMKitObjectModel<VM>
VMKitObject::from_objref_nullable(Some(object)).get_size_when_copied::<VM>()
}
fn ref_to_object_start(reference: mmtk::util::ObjectReference) -> mmtk::util::Address {
VMKitObject::from_objref_nullable(Some(reference)).object_start::<VM>()
}
@ -133,7 +161,12 @@ impl<VM: VirtualMachine> ObjectModel<MemoryManager<VM>> for VMKitObjectModel<VM>
}
impl<VM: VirtualMachine> VMKitObjectModel<VM> {
fn move_object(from_obj: VMKitObject, mut to: MoveTarget, num_bytes: usize) -> VMKitObject {
log::trace!("move_object: from_obj: {}, to: {}, bytes={}", from_obj.as_address(), to, num_bytes);
log::trace!(
"move_object: from_obj: {}, to: {}, bytes={}",
from_obj.as_address(),
to,
num_bytes
);
let mut copy_bytes = num_bytes;
let mut obj_ref_offset = OBJECT_REF_OFFSET;
let hash_state = from_obj.header::<VM>().hash_state();
@ -141,7 +174,6 @@ impl<VM: VirtualMachine> VMKitObjectModel<VM> {
// Adjust copy bytes and object reference offset based on hash state
match hash_state {
HashState::Hashed => {
copy_bytes -= size_of::<usize>(); // Exclude hash code from copy
if let MoveTarget::ToAddress(ref mut addr) = to {
*addr += size_of::<usize>(); // Adjust address for hash code

View file

@ -3,13 +3,22 @@ use std::marker::PhantomData;
use crate::VirtualMachine;
use easy_bitfield::*;
use super::object::ADDRESS_BASED_HASHING;
/// Offset from allocation pointer to the actual object start.
pub const OBJECT_REF_OFFSET: isize = 8;
/// Object header behind object.
pub const OBJECT_HEADER_OFFSET: isize = -OBJECT_REF_OFFSET;
pub const HASHCODE_OFFSET: isize = -(OBJECT_REF_OFFSET + size_of::<usize>() as isize);
pub type MetadataField = BitField<u64, usize, 0, 58, false>;
pub const METADATA_BIT_LIMIT: usize = if ADDRESS_BASED_HASHING {
61
} else {
63
};
pub type MetadataField = BitField<u64, usize, 0, METADATA_BIT_LIMIT, false>;
pub type HashStateField = BitField<u64, HashState, { MetadataField::NEXT_BIT }, 2, false>;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
@ -71,6 +80,9 @@ impl<VM: VirtualMachine> HeapObjectHeader<VM> {
}
pub fn hash_state(&self) -> HashState {
if !ADDRESS_BASED_HASHING {
return HashState::Unhashed;
}
self.metadata.read::<HashStateField>()
}

View file

@ -19,6 +19,13 @@ use super::{
metadata::Metadata,
};
/// Is address based hash enabled? If true
/// then object header uses 2 bits to indicate hash state and if GC moves
/// the object, object hash is stored in the object itself.
///
/// When disabled, `hashcode()` instead calls into VM to get the hashcode.
pub const ADDRESS_BASED_HASHING: bool = cfg!(feature="address_based_hashing");
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct VMKitObject(Address);
@ -208,6 +215,9 @@ impl VMKitObject {
/// * `usize` - The hashcode overhead.
#[inline(always)]
pub fn hashcode_overhead<VM: VirtualMachine, const WHEN_COPIED: bool>(&self) -> usize {
if !ADDRESS_BASED_HASHING {
return 0;
}
let hash_state = self.header::<VM>().hash_state();
let has_hashcode = if WHEN_COPIED {
@ -267,7 +277,10 @@ impl VMKitObject {
self.bytes_required_when_copied::<VM>()
}
pub fn hashcode<VM: VirtualMachine>(&self) -> usize {
pub fn hashcode<VM: VirtualMachine>(self) -> usize {
if !ADDRESS_BASED_HASHING {
return VM::compute_hashcode(self);
}
let header = self.header::<VM>();
match header.hash_state() {
HashState::HashedAndMoved => {

View file

@ -365,6 +365,9 @@ impl<VM: VirtualMachine> Thread<VM> {
self.tid.store(libc::gettid() as _, Ordering::Relaxed);
}
init_current_thread(self.clone());
self.stack_bounds
.set(StackBounds::current_thread_stack_bounds())
.unwrap();
let constraints = VM::get().vmkit().mmtk.get_plan().constraints();
self.max_non_los_default_alloc_bytes
.set(constraints.max_non_los_default_alloc_bytes);
@ -386,9 +389,6 @@ impl<VM: VirtualMachine> Thread<VM> {
_ => self.alloc_fastpath.set(AllocFastPath::None),
}
self.stack_bounds
.set(StackBounds::current_thread_stack_bounds())
.unwrap();
let vmkit = VM::get().vmkit();
if !self.is_collector_thread() && !self.ignore_handshakes_and_gc() {
let mutator = mmtk::memory_manager::bind_mutator(
@ -930,7 +930,7 @@ impl<VM: VirtualMachine> Thread<VM> {
pub fn enter_native() {
let t = Self::current();
t.stack_pointer.store(current_stack_pointer().as_usize(), Ordering::Relaxed);
let mut old_state;
loop {
old_state = t.get_exec_status();
@ -1447,7 +1447,7 @@ impl<VM: VirtualMachine> ThreadManager<VM> {
// Deal with terminating threads to ensure that all threads are either dead to MMTk or stopped above.
self.process_about_to_terminate();
self.inner
let threads = self.inner
.lock_no_handshake()
.borrow()
.threads
@ -1455,7 +1455,17 @@ impl<VM: VirtualMachine> ThreadManager<VM> {
.flatten()
.filter(|t| t.is_blocked_for::<GCBlockAdapter>())
.cloned()
.collect::<Vec<_>>()
.collect::<Vec<_>>();
#[cfg(debug_assertions)]
{
for thread in threads.iter() {
assert!(!thread.stack_bounds().is_empty());
assert!(!thread.stack_pointer().is_zero());
}
}
threads
} else {
self.process_about_to_terminate();
let mut handshake_threads = Vec::with_capacity(4);