diff --git a/vmkit/Cargo.toml b/vmkit/Cargo.toml index e68cbc7..4fa17fc 100644 --- a/vmkit/Cargo.toml +++ b/vmkit/Cargo.toml @@ -23,15 +23,16 @@ sysinfo = "0.33.1" [features] - -default = ["uncooperative"] - +default = ["cooperative", "address_based_hashing", "object_pinning"] +vmside_forwarding = [] +object_pinning = ["mmtk/object_pinning"] +address_based_hashing = [] uncooperative = ["cooperative", "mmtk/immix_non_moving", "mmtk/immix_zero_on_release"] # VMKit is built for use in cooperative runtime. Such runtime # would be able to use write barriers and safepoints. Such environment # must also provide precise object layout (stack can be uncooperative). -cooperative = ["mmtk/vo_bit", "mmtk/is_mmtk_object", "mmtk/vo_bit_access"] +cooperative = ["mmtk/vo_bit", "mmtk/is_mmtk_object", "mmtk/vo_bit_access", "mmtk/immix_non_moving"] # VMKit is built for use in full-precise runtime. Such runtime # would be able to use precise write barriers and safepoints, object # layout is fully precise. diff --git a/vmkit/examples/binarytrees.rs b/vmkit/examples/binarytrees.rs index da39de9..97a5693 100644 --- a/vmkit/examples/binarytrees.rs +++ b/vmkit/examples/binarytrees.rs @@ -68,6 +68,12 @@ impl VirtualMachine for BenchVM { type Slot = SimpleSlot; type ThreadContext = ThreadBenchContext; type MemorySlice = UnimplementedMemorySlice; + + #[cfg(feature="vmside_forwarding")] + const LOCAL_FORWARDING_POINTER_SPEC: mmtk::vm::VMLocalForwardingPointerSpec = mmtk::vm::VMLocalForwardingPointerSpec::in_header(0); + #[cfg(feature="vmside_forwarding")] + const LOCAL_FORWARDING_BITS_SPEC: mmtk::vm::VMLocalForwardingBitsSpec = mmtk::vm::VMLocalForwardingBitsSpec::side_first(); + fn get() -> &'static Self { VM.get().unwrap() } diff --git a/vmkit/src/bdwgc_shim.rs b/vmkit/src/bdwgc_shim.rs index ee46759..d3a9fa1 100644 --- a/vmkit/src/bdwgc_shim.rs +++ b/vmkit/src/bdwgc_shim.rs @@ -156,6 +156,11 @@ impl VirtualMachine for BDWGC { type Slot = SimpleSlot; type MemorySlice = UnimplementedMemorySlice; + #[cfg(feature="vmside_forwarding")] + const LOCAL_FORWARDING_BITS_SPEC: mmtk::vm::VMLocalForwardingBitsSpec = mmtk::vm::VMLocalForwardingBitsSpec::side_first(); + #[cfg(feature="vmside_forwarding")] + const LOCAL_FORWARDING_POINTER_SPEC: mmtk::vm::VMLocalForwardingPointerSpec = mmtk::vm::VMLocalForwardingPointerSpec::in_header(0); + fn get() -> &'static Self { BDWGC_VM.get().expect("GC is not initialized") } diff --git a/vmkit/src/lib.rs b/vmkit/src/lib.rs index febe1f9..cac66e3 100644 --- a/vmkit/src/lib.rs +++ b/vmkit/src/lib.rs @@ -26,6 +26,14 @@ pub trait VirtualMachine: Sized + 'static + Send + Sync { type Metadata: object_model::metadata::Metadata; type Slot: SlotExtra; type MemorySlice: MemorySlice; + + + #[cfg(feature="vmside_forwarding")] + const LOCAL_FORWARDING_POINTER_SPEC: mmtk::vm::VMLocalForwardingPointerSpec; + #[cfg(feature="vmside_forwarding")] + const LOCAL_FORWARDING_BITS_SPEC: mmtk::vm::VMLocalForwardingBitsSpec; + + const ALIGNMENT_VALUE: u32 = 0xdead_beef; const MAX_ALIGNMENT: usize = 32; const MIN_ALIGNMENT: usize = 8; @@ -146,6 +154,18 @@ pub trait VirtualMachine: Sized + 'static + Send + Sync { let _ = tls; unimplemented!() } + + /// Compute the hashcode of an object. When feature `address_based_hashing` is enabled, + /// this function is ignored. Otherwise VMKit calls into this function to compute hashcode of an object. + /// + /// In case VM uses moving plans it's strongly advised to *not* compute hashcode based on address + /// as the object's address may change during GC, instead store hashcode as a field or use some bits + /// in header to store the hashcode. This function must be fast as it's called for each `VMKitObject::hashcode()` + /// invocation. + fn compute_hashcode(object: VMKitObject) -> usize { + let _ = object; + unimplemented!("VM currently does not support hashcode computation, override this method to do so"); + } } pub struct VMKit { diff --git a/vmkit/src/mm/conservative_roots.rs b/vmkit/src/mm/conservative_roots.rs index 1bc810e..3128888 100644 --- a/vmkit/src/mm/conservative_roots.rs +++ b/vmkit/src/mm/conservative_roots.rs @@ -46,9 +46,13 @@ impl ConservativeRoots { /// /// `start` and `end` must be valid addresses. pub unsafe fn add_span(&mut self, mut start: Address, mut end: Address) { + assert!(!start.is_zero() && !end.is_zero(), "provided NULL address to ConservativeRoots::add_span"); + if start > end { std::mem::swap(&mut start, &mut end); } + let start = start.align_down(size_of::
()); + let end = end.align_up(size_of::
()); let mut current = start; while current < end { let addr = current.load::
(); diff --git a/vmkit/src/object_model.rs b/vmkit/src/object_model.rs index 493bbda..7b081ba 100644 --- a/vmkit/src/object_model.rs +++ b/vmkit/src/object_model.rs @@ -1,22 +1,22 @@ use std::marker::PhantomData; use crate::{mm::MemoryManager, VirtualMachine}; -use easy_bitfield::BitFieldTrait; -use header::{HashState, HashStateField, HASHCODE_OFFSET, OBJECT_HEADER_OFFSET, OBJECT_REF_OFFSET}; +use header::{HashState, HASHCODE_OFFSET, OBJECT_HEADER_OFFSET, OBJECT_REF_OFFSET}; use mmtk::{ util::{alloc::fill_alignment_gap, constants::LOG_BYTES_IN_ADDRESS, ObjectReference}, vm::*, }; use object::{MoveTarget, VMKitObject}; +pub mod compression; +pub mod finalization; pub mod header; pub mod metadata; pub mod object; -pub mod finalization; -pub mod compression; pub struct VMKitObjectModel(PhantomData); +/* pub const LOGGING_SIDE_METADATA_SPEC: VMGlobalLogBitSpec = VMGlobalLogBitSpec::side_first(); pub const FORWARDING_POINTER_METADATA_SPEC: VMLocalForwardingPointerSpec = VMLocalForwardingPointerSpec::in_header(0); @@ -24,15 +24,47 @@ pub const FORWARDING_BITS_METADATA_SPEC: VMLocalForwardingBitsSpec = VMLocalForwardingBitsSpec::in_header(HashStateField::NEXT_BIT as _); pub const MARKING_METADATA_SPEC: VMLocalMarkBitSpec = VMLocalMarkBitSpec::side_first(); pub const LOS_METADATA_SPEC: VMLocalLOSMarkNurserySpec = - VMLocalLOSMarkNurserySpec::in_header(HashStateField::NEXT_BIT as _); + VMLocalLOSMarkNurserySpec::in_header(HashStateField::NEXT_BIT as _);*/ + +pub const LOGGING_SIDE_METADATA_SPEC: VMGlobalLogBitSpec = VMGlobalLogBitSpec::side_first(); +/// Overwrite first field of the object header +pub const LOCAL_FORWARDING_POINTER_SPEC: VMLocalForwardingPointerSpec = + VMLocalForwardingPointerSpec::in_header(OBJECT_REF_OFFSET); + impl ObjectModel> for VMKitObjectModel { - const GLOBAL_LOG_BIT_SPEC: mmtk::vm::VMGlobalLogBitSpec = LOGGING_SIDE_METADATA_SPEC; + /*const GLOBAL_LOG_BIT_SPEC: mmtk::vm::VMGlobalLogBitSpec = LOGGING_SIDE_METADATA_SPEC; const LOCAL_FORWARDING_POINTER_SPEC: mmtk::vm::VMLocalForwardingPointerSpec = FORWARDING_POINTER_METADATA_SPEC; const LOCAL_FORWARDING_BITS_SPEC: mmtk::vm::VMLocalForwardingBitsSpec = FORWARDING_BITS_METADATA_SPEC; const LOCAL_MARK_BIT_SPEC: mmtk::vm::VMLocalMarkBitSpec = MARKING_METADATA_SPEC; - const LOCAL_LOS_MARK_NURSERY_SPEC: mmtk::vm::VMLocalLOSMarkNurserySpec = LOS_METADATA_SPEC; + const LOCAL_LOS_MARK_NURSERY_SPEC: mmtk::vm::VMLocalLOSMarkNurserySpec = LOS_METADATA_SPEC;*/ + + const GLOBAL_LOG_BIT_SPEC: VMGlobalLogBitSpec = VMGlobalLogBitSpec::side_first(); + + #[cfg(not(feature = "vmside_forwarding"))] + const LOCAL_FORWARDING_BITS_SPEC: VMLocalForwardingBitsSpec = + VMLocalForwardingBitsSpec::side_first(); + #[cfg(not(feature = "vmside_forwarding"))] + const LOCAL_FORWARDING_POINTER_SPEC: VMLocalForwardingPointerSpec = + VMLocalForwardingPointerSpec::in_header(0); + + #[cfg(feature = "vmside_forwarding")] + const LOCAL_FORWARDING_BITS_SPEC: VMLocalForwardingBitsSpec = VM::LOCAL_FORWARDING_BITS_SPEC; + #[cfg(feature = "vmside_forwarding")] + const LOCAL_FORWARDING_POINTER_SPEC: VMLocalForwardingPointerSpec = + VM::LOCAL_FORWARDING_POINTER_SPEC; + const LOCAL_MARK_BIT_SPEC: VMLocalMarkBitSpec = + if Self::LOCAL_FORWARDING_BITS_SPEC.as_spec().is_on_side() { + VMLocalMarkBitSpec::side_after(&Self::LOCAL_FORWARDING_BITS_SPEC.as_spec()) + } else { + VMLocalMarkBitSpec::side_first() + }; + + const LOCAL_LOS_MARK_NURSERY_SPEC: VMLocalLOSMarkNurserySpec = + VMLocalLOSMarkNurserySpec::side_after(&Self::LOCAL_MARK_BIT_SPEC.as_spec()); + const LOCAL_PINNING_BIT_SPEC: VMLocalPinningBitSpec = + VMLocalPinningBitSpec::side_after(&Self::LOCAL_LOS_MARK_NURSERY_SPEC.as_spec()); const OBJECT_REF_OFFSET_LOWER_BOUND: isize = OBJECT_REF_OFFSET; const UNIFIED_OBJECT_REFERENCE_ADDRESS: bool = false; @@ -68,13 +100,13 @@ impl ObjectModel> for VMKitObjectModel region: mmtk::util::Address, ) -> mmtk::util::Address { let vmkit_from = VMKitObject::from(from); - + let copy = from != to; let bytes = if copy { let vmkit_to = VMKitObject::from(to); let bytes = vmkit_to.bytes_required_when_copied::(); Self::move_object(vmkit_from, MoveTarget::ToObject(vmkit_to), bytes); - bytes + bytes } else { vmkit_from.bytes_used::() }; @@ -92,12 +124,9 @@ impl ObjectModel> for VMKitObjectModel let res_addr = to + OBJECT_REF_OFFSET + vmkit_from.hashcode_overhead::(); debug_assert!(!res_addr.is_zero()); // SAFETY: we just checked that the address is not zero - unsafe { - ObjectReference::from_raw_address_unchecked(res_addr) - } + unsafe { ObjectReference::from_raw_address_unchecked(res_addr) } } - fn get_type_descriptor(_reference: mmtk::util::ObjectReference) -> &'static [i8] { unreachable!() } @@ -118,7 +147,6 @@ impl ObjectModel> for VMKitObjectModel VMKitObject::from_objref_nullable(Some(object)).get_size_when_copied::() } - fn ref_to_object_start(reference: mmtk::util::ObjectReference) -> mmtk::util::Address { VMKitObject::from_objref_nullable(Some(reference)).object_start::() } @@ -133,7 +161,12 @@ impl ObjectModel> for VMKitObjectModel } impl VMKitObjectModel { fn move_object(from_obj: VMKitObject, mut to: MoveTarget, num_bytes: usize) -> VMKitObject { - log::trace!("move_object: from_obj: {}, to: {}, bytes={}", from_obj.as_address(), to, num_bytes); + log::trace!( + "move_object: from_obj: {}, to: {}, bytes={}", + from_obj.as_address(), + to, + num_bytes + ); let mut copy_bytes = num_bytes; let mut obj_ref_offset = OBJECT_REF_OFFSET; let hash_state = from_obj.header::().hash_state(); @@ -141,7 +174,6 @@ impl VMKitObjectModel { // Adjust copy bytes and object reference offset based on hash state match hash_state { HashState::Hashed => { - copy_bytes -= size_of::(); // Exclude hash code from copy if let MoveTarget::ToAddress(ref mut addr) = to { *addr += size_of::(); // Adjust address for hash code diff --git a/vmkit/src/object_model/header.rs b/vmkit/src/object_model/header.rs index 4c7ec19..7f6b7da 100644 --- a/vmkit/src/object_model/header.rs +++ b/vmkit/src/object_model/header.rs @@ -3,13 +3,22 @@ use std::marker::PhantomData; use crate::VirtualMachine; use easy_bitfield::*; +use super::object::ADDRESS_BASED_HASHING; + /// Offset from allocation pointer to the actual object start. pub const OBJECT_REF_OFFSET: isize = 8; /// Object header behind object. pub const OBJECT_HEADER_OFFSET: isize = -OBJECT_REF_OFFSET; pub const HASHCODE_OFFSET: isize = -(OBJECT_REF_OFFSET + size_of::() as isize); -pub type MetadataField = BitField; + +pub const METADATA_BIT_LIMIT: usize = if ADDRESS_BASED_HASHING { + 61 +} else { + 63 +}; + +pub type MetadataField = BitField; pub type HashStateField = BitField; #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -71,6 +80,9 @@ impl HeapObjectHeader { } pub fn hash_state(&self) -> HashState { + if !ADDRESS_BASED_HASHING { + return HashState::Unhashed; + } self.metadata.read::() } diff --git a/vmkit/src/object_model/object.rs b/vmkit/src/object_model/object.rs index ce54e3e..8efdd65 100644 --- a/vmkit/src/object_model/object.rs +++ b/vmkit/src/object_model/object.rs @@ -19,6 +19,13 @@ use super::{ metadata::Metadata, }; +/// Is address based hash enabled? If true +/// then object header uses 2 bits to indicate hash state and if GC moves +/// the object, object hash is stored in the object itself. +/// +/// When disabled, `hashcode()` instead calls into VM to get the hashcode. +pub const ADDRESS_BASED_HASHING: bool = cfg!(feature="address_based_hashing"); + #[repr(transparent)] #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct VMKitObject(Address); @@ -208,6 +215,9 @@ impl VMKitObject { /// * `usize` - The hashcode overhead. #[inline(always)] pub fn hashcode_overhead(&self) -> usize { + if !ADDRESS_BASED_HASHING { + return 0; + } let hash_state = self.header::().hash_state(); let has_hashcode = if WHEN_COPIED { @@ -267,7 +277,10 @@ impl VMKitObject { self.bytes_required_when_copied::() } - pub fn hashcode(&self) -> usize { + pub fn hashcode(self) -> usize { + if !ADDRESS_BASED_HASHING { + return VM::compute_hashcode(self); + } let header = self.header::(); match header.hash_state() { HashState::HashedAndMoved => { diff --git a/vmkit/src/threading.rs b/vmkit/src/threading.rs index 1008bdd..e6e063b 100644 --- a/vmkit/src/threading.rs +++ b/vmkit/src/threading.rs @@ -365,6 +365,9 @@ impl Thread { self.tid.store(libc::gettid() as _, Ordering::Relaxed); } init_current_thread(self.clone()); + self.stack_bounds + .set(StackBounds::current_thread_stack_bounds()) + .unwrap(); let constraints = VM::get().vmkit().mmtk.get_plan().constraints(); self.max_non_los_default_alloc_bytes .set(constraints.max_non_los_default_alloc_bytes); @@ -386,9 +389,6 @@ impl Thread { _ => self.alloc_fastpath.set(AllocFastPath::None), } - self.stack_bounds - .set(StackBounds::current_thread_stack_bounds()) - .unwrap(); let vmkit = VM::get().vmkit(); if !self.is_collector_thread() && !self.ignore_handshakes_and_gc() { let mutator = mmtk::memory_manager::bind_mutator( @@ -930,7 +930,7 @@ impl Thread { pub fn enter_native() { let t = Self::current(); - + t.stack_pointer.store(current_stack_pointer().as_usize(), Ordering::Relaxed); let mut old_state; loop { old_state = t.get_exec_status(); @@ -1197,7 +1197,7 @@ impl Thread { let thread = Thread::::current(); let _was_at_yieldpoint = thread.at_yieldpoint.load(atomic::Ordering::Relaxed); thread.at_yieldpoint.store(true, atomic::Ordering::Relaxed); - + // If thread is in critical section we can't do anything right now, defer // until later // we do this without acquiring locks, since part of the point of disabling @@ -1447,7 +1447,7 @@ impl ThreadManager { // Deal with terminating threads to ensure that all threads are either dead to MMTk or stopped above. self.process_about_to_terminate(); - self.inner + let threads = self.inner .lock_no_handshake() .borrow() .threads @@ -1455,7 +1455,17 @@ impl ThreadManager { .flatten() .filter(|t| t.is_blocked_for::()) .cloned() - .collect::>() + .collect::>(); + + #[cfg(debug_assertions)] + { + for thread in threads.iter() { + assert!(!thread.stack_bounds().is_empty()); + assert!(!thread.stack_pointer().is_zero()); + } + } + + threads } else { self.process_about_to_terminate(); let mut handshake_threads = Vec::with_capacity(4);