binarytrees

This commit is contained in:
playX18 2025-02-10 14:31:00 +07:00
parent 07e0284c31
commit b17f1ccd33
7 changed files with 311 additions and 159 deletions

View file

@ -1,32 +1,32 @@
use mmtk::util::options::AffinityKind;
use mmtk::util::Address;
use mmtk::{util::options::PlanSelector, vm::slot::SimpleSlot, AllocationSemantics, MMTKBuilder}; use mmtk::{util::options::PlanSelector, vm::slot::SimpleSlot, AllocationSemantics, MMTKBuilder};
use std::mem::offset_of; use std::cell::RefCell;
use std::sync::OnceLock; use std::sync::OnceLock;
use std::sync::Arc;
use vmkit::threading::parked_scope;
use vmkit::{ use vmkit::{
mm::{traits::Trace, MemoryManager}, mm::{traits::Trace, MemoryManager},
object_model::{ object_model::{
metadata::{GCMetadata, TraceCallback}, metadata::{GCMetadata, TraceCallback},
object::VMKitObject, object::VMKitObject,
}, },
sync::Monitor,
threading::{GCBlockAdapter, Thread, ThreadContext}, threading::{GCBlockAdapter, Thread, ThreadContext},
VMKit, VirtualMachine, VMKit, VirtualMachine,
}; };
const CONSERVATIVE_TRACE_NODE: bool = false;
#[repr(C)] #[repr(C)]
struct Node { struct Node {
left: VMKitObject, left: NodeRef,
right: VMKitObject, right: NodeRef,
i: usize,
j: usize,
} }
static METADATA: GCMetadata<BenchVM> = GCMetadata { static METADATA: GCMetadata<BenchVM> = GCMetadata {
trace: TraceCallback::TraceObject(|object, tracer| unsafe { trace: TraceCallback::TraceObject(|object, tracer| unsafe {
let node = object.as_address().as_mut_ref::<Node>(); let node = object.as_address().as_mut_ref::<Node>();
node.left.trace_object(tracer); node.left.0.trace_object(tracer);
node.right.trace_object(tracer); node.right.0.trace_object(tracer);
}), }),
instance_size: size_of::<Node>(), instance_size: size_of::<Node>(),
compute_size: None, compute_size: None,
@ -104,136 +104,155 @@ impl VirtualMachine for BenchVM {
} }
} }
fn make_node( #[repr(transparent)]
thread: &Thread<BenchVM>, #[derive(Clone, Copy, PartialEq, Eq)]
left: VMKitObject, struct NodeRef(VMKitObject);
right: VMKitObject,
i: usize,
j: usize,
) -> VMKitObject {
let node = MemoryManager::allocate(
thread,
size_of::<Node>(),
16,
&METADATA,
AllocationSemantics::Default,
);
unsafe { impl NodeRef {
node.set_field_object_no_write_barrier::<BenchVM, false>(offset_of!(Node, left), left); pub fn new(thread: &Thread<BenchVM>, left: NodeRef, right: NodeRef) -> Self {
node.set_field_object_no_write_barrier::<BenchVM, false>(offset_of!(Node, right), right); let node = MemoryManager::<BenchVM>::allocate(
node.set_field_usize::<BenchVM>(offset_of!(Node, i), i); thread,
node.set_field_usize::<BenchVM>(offset_of!(Node, j), j); size_of::<Node>(),
} 16,
node &METADATA,
} AllocationSemantics::Default,
);
fn tree_size(i: usize) -> usize { unsafe {
(1 << (i + 1)) - 1 let node = node.as_address().as_mut_ref::<Node>();
} node.left = left;
node.right = right;
fn num_iters(stretch_tree_depth: usize, i: usize) -> usize { }
4 + tree_size(stretch_tree_depth) / tree_size(i) Self(node)
}
fn populate(thread: &Thread<BenchVM>, depth: usize, this_node: VMKitObject) {
let mut depth = depth;
if depth <= 0 {
return;
} }
depth -= 1; pub fn left(&self) -> NodeRef {
this_node.set_field_object::<BenchVM, false>( unsafe {
offset_of!(Node, left), let node = self.0.as_address().as_ref::<Node>();
make_node(thread, VMKitObject::NULL, VMKitObject::NULL, 0, 0), node.left
); }
let left = this_node.get_field_object::<BenchVM, false>(offset_of!(Node, left));
this_node.set_field_object::<BenchVM, false>(
offset_of!(Node, right),
make_node(thread, VMKitObject::NULL, VMKitObject::NULL, 0, 0),
);
populate(
thread,
depth,
this_node.get_field_object::<BenchVM, false>(offset_of!(Node, left)),
);
populate(
thread,
depth,
this_node.get_field_object::<BenchVM, false>(offset_of!(Node, right)),
);
}
fn make_tree(thread: &Thread<BenchVM>, depth: usize) -> VMKitObject {
if depth <= 0 {
return make_node(thread, VMKitObject::NULL, VMKitObject::NULL, 0, 0);
} }
let left = make_tree(thread, depth - 1); pub fn right(&self) -> NodeRef {
let right = make_tree(thread, depth - 1); unsafe {
make_node(thread, left, right, 0, 0) let node = self.0.as_address().as_ref::<Node>();
} node.right
}
fn time_construction(thread: &Thread<BenchVM>, stretch_tree_depth: usize, depth: usize) {
let i_num_iters = num_iters(stretch_tree_depth, depth);
println!("creating {} trees of depth {}", i_num_iters, depth);
let start = std::time::Instant::now();
let mut i = 0;
while i < i_num_iters {
let temp_tree = make_node(thread, VMKitObject::NULL, VMKitObject::NULL, 0, 0);
populate(thread, depth, temp_tree);
i += 1;
} }
let finish = std::time::Instant::now(); pub fn null() -> Self {
println!("\tTop down construction took: {:04}ms", finish.duration_since(start).as_micros() as f64 / 1000.0); Self(VMKitObject::NULL)
}
pub fn item_check(&self) -> usize {
if self.left() == NodeRef::null() {
1
} else {
1 + self.left().item_check() + self.right().item_check()
}
}
let duration = start.elapsed(); pub fn leaf(thread: &Thread<BenchVM>) -> Self {
println!("time_construction: {:?}", duration); Self::new(thread, NodeRef::null(), NodeRef::null())
}
} }
fn bottom_up_tree(thread: &Thread<BenchVM>, depth: usize) -> NodeRef {
if thread.take_yieldpoint() != 0 {
Thread::<BenchVM>::yieldpoint(0, Address::ZERO);
}
if depth > 0 {
NodeRef::new(
thread,
bottom_up_tree(thread, depth - 1),
bottom_up_tree(thread, depth - 1),
)
} else {
NodeRef::leaf(thread)
}
}
const MIN_DEPTH: usize = 4;
fn main() { fn main() {
env_logger::init(); env_logger::init();
let mut options = MMTKBuilder::new(); let nthreads = std::env::var("THREADS")
options.options.plan.set(PlanSelector::StickyImmix); .unwrap_or("4".to_string())
options.options.gc_trigger.set(mmtk::util::options::GCTriggerSelector::DynamicHeapSize(64*1024*1024, 8*1024*1024*1024)); .parse::<usize>()
let vm = BenchVM { .unwrap();
vmkit: VMKit::new(options) let mut builder = MMTKBuilder::new();
}; builder.options.plan.set(PlanSelector::Immix);
builder.options.threads.set(nthreads);
VM.set(vm).unwrap_or_else(|_| panic!("Failed to set VM")); builder.options.thread_affinity.set(AffinityKind::RoundRobin(vec![0, 1, 2, 3, 4, 5, 6, 7, 8]));
builder.options.gc_trigger.set(mmtk::util::options::GCTriggerSelector::DynamicHeapSize(1*1024*1024*1024, 3*1024*1024*1024));
VM.set(BenchVM {
vmkit: VMKit::new(builder),
})
.unwrap_or_else(|_| panic!());
Thread::<BenchVM>::main(ThreadBenchContext, || { Thread::<BenchVM>::main(ThreadBenchContext, || {
let tls= Thread::<BenchVM>::current(); let thread = Thread::<BenchVM>::current();
let depth = std::env::var("DEPTH").unwrap_or("18".to_string()).parse::<usize>().unwrap();
let long_lived_tree_depth = depth;
let stretch_tree_depth = depth + 1;
println!("stretching memory with tree of depth: {}", stretch_tree_depth);
let start = std::time::Instant::now(); let start = std::time::Instant::now();
make_tree(tls, stretch_tree_depth as _); let n = std::env::var("DEPTH")
.unwrap_or("18".to_string())
.parse::<usize>()
.unwrap();
let max_depth = if n < MIN_DEPTH + 2 { MIN_DEPTH + 2 } else { n };
println!("creating long-lived tree of depth: {}", long_lived_tree_depth); let stretch_depth = max_depth + 1;
let long_lived_tree = make_node(tls, VMKitObject::NULL, VMKitObject::NULL, 0, 0);
populate(tls, long_lived_tree_depth as _, long_lived_tree);
println!("stretch tree of depth {stretch_depth}");
let mut d = 4; let _ = bottom_up_tree(&thread, stretch_depth);
let duration = start.elapsed();
println!("time: {duration:?}");
while d <= depth { let results = Arc::new(Monitor::new(vec![
time_construction(tls, stretch_tree_depth, d); RefCell::new(String::new());
d += 2; (max_depth - MIN_DEPTH) / 2 + 1
]));
let mut handles = Vec::new();
for d in (MIN_DEPTH..=max_depth).step_by(2) {
let depth = d;
let thread = Thread::<BenchVM>::for_mutator(ThreadBenchContext);
let results = results.clone();
let handle = thread.start(move || {
let thread = Thread::<BenchVM>::current();
let mut check = 0;
let iterations = 1 << (max_depth - depth + MIN_DEPTH);
for _ in 1..=iterations {
if thread.take_yieldpoint() != 0 {
Thread::<BenchVM>::yieldpoint(0, Address::ZERO);
}
let tree_node = bottom_up_tree(&thread, depth);
check += tree_node.item_check();
}
*results.lock_with_handshake::<BenchVM>()[(depth - MIN_DEPTH) / 2].borrow_mut() =
format!("{iterations}\t trees of depth {depth}\t check: {check}");
});
handles.push(handle);
} }
let finish = std::time::Instant::now(); parked_scope::<(), BenchVM>(|| {
println!("total execution time: {:04}ms", finish.duration_since(start).as_micros() as f64 / 1000.0); while let Some(handle) = handles.pop() {
handle.join().unwrap();
}
});
for result in results.lock_with_handshake::<BenchVM>().iter() {
println!("{}", result.borrow());
}
println!(
"long lived tree of depth {max_depth}\t check: {}",
bottom_up_tree(&thread, max_depth).item_check()
);
let duration = start.elapsed();
println!("time: {duration:?}");
}); });
} }

View file

@ -10,7 +10,7 @@ use crate::{
use easy_bitfield::{AtomicBitfieldContainer, ToBitfield}; use easy_bitfield::{AtomicBitfieldContainer, ToBitfield};
use mmtk::{ use mmtk::{
util::{ util::{
alloc::{AllocatorSelector, BumpAllocator, ImmixAllocator}, alloc::{AllocatorSelector, BumpAllocator, FreeListAllocator, ImmixAllocator},
metadata::side_metadata::GLOBAL_SIDE_METADATA_BASE_ADDRESS, metadata::side_metadata::GLOBAL_SIDE_METADATA_BASE_ADDRESS,
VMMutatorThread, VMMutatorThread,
}, },
@ -65,6 +65,49 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
) )
} }
/// General purpose allocation function. Always goes to `mmtk::memory_manager::alloc`
/// and does not attempt to perform fast-path allocation. This is useful for debugging
/// or when your JIT/AOT compiler is not yet able to produce fast-path allocation.
#[inline(never)]
pub extern "C-unwind" fn allocate_out_of_line(
thread: &Thread<VM>,
mut size: usize,
alignment: usize,
metadata: VM::Metadata,
mut semantics: AllocationSemantics,
) -> VMKitObject {
size += size_of::<HeapObjectHeader<VM>>();
if semantics == AllocationSemantics::Default
&& size >= thread.max_non_los_default_alloc_bytes()
{
semantics = AllocationSemantics::Los;
}
match semantics {
AllocationSemantics::Los => Self::allocate_los(thread, size, alignment, metadata),
AllocationSemantics::NonMoving => {
Self::allocate_nonmoving(thread, size, alignment, metadata)
}
AllocationSemantics::Immortal => {
Self::allocate_immortal(thread, size, alignment, metadata)
}
_ => unsafe {
Self::flush_tlab(thread);
let object_start =
mmtk::memory_manager::alloc(thread.mutator(), size, alignment, 0, semantics);
object_start.store(HeapObjectHeader::<VM> {
metadata: AtomicBitfieldContainer::new(metadata.to_bitfield()),
marker: PhantomData,
});
let object = VMKitObject::from_address(object_start + OBJECT_REF_OFFSET);
Self::set_vo_bit(object);
Self::refill_tlab(thread);
object
},
}
}
/// Allocate object with `size`, `alignment`, and `metadata` with specified `semantics`. /// Allocate object with `size`, `alignment`, and `metadata` with specified `semantics`.
/// ///
/// This function is a fast-path for allocation. If you allocate with `Default` semantics, /// This function is a fast-path for allocation. If you allocate with `Default` semantics,
@ -78,41 +121,46 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
metadata: VM::Metadata, metadata: VM::Metadata,
mut semantics: AllocationSemantics, mut semantics: AllocationSemantics,
) -> VMKitObject { ) -> VMKitObject {
let orig_size = size;
let orig_semantics = semantics;
size += size_of::<HeapObjectHeader<VM>>(); size += size_of::<HeapObjectHeader<VM>>();
if semantics == AllocationSemantics::Default if semantics == AllocationSemantics::Default
&& size >= thread.max_non_los_default_alloc_bytes() && size >= thread.max_non_los_default_alloc_bytes()
{ {
semantics = AllocationSemantics::Los; semantics = AllocationSemantics::Los;
} }
// all allocator functions other than this actually invoke `flush_tlab` due to the fact // all allocator functions other than this actually invoke `flush_tlab` due to the fact
// that GC can happen inside them. // that GC can happen inside them.
match semantics { match semantics {
AllocationSemantics::Los => Self::allocate_los(thread, size, alignment, metadata), AllocationSemantics::Default => match thread.alloc_fastpath() {
AllocationSemantics::NonMoving => { AllocFastPath::TLAB => unsafe {
Self::allocate_nonmoving(thread, size, alignment, metadata) let tlab = thread.tlab.get().as_mut().unwrap();
} let object_start = tlab.allocate(size, alignment);
AllocationSemantics::Immortal => { if !object_start.is_zero() {
Self::allocate_immortal(thread, size, alignment, metadata) object_start.store(HeapObjectHeader::<VM> {
} metadata: AtomicBitfieldContainer::new(metadata.to_bitfield()),
_ => unsafe { marker: PhantomData,
let tlab = thread.tlab.get().as_mut().unwrap(); });
let object_start = tlab.allocate(size, alignment); let object = VMKitObject::from_address(object_start + OBJECT_REF_OFFSET);
if !object_start.is_zero() { Self::set_vo_bit(object);
object_start.store(HeapObjectHeader::<VM> { return object;
metadata: AtomicBitfieldContainer::new(metadata.to_bitfield()), }
marker: PhantomData,
});
let object = VMKitObject::from_address(object_start + OBJECT_REF_OFFSET);
Self::set_vo_bit(object);
return object;
}
Self::allocate_slow(thread, size, alignment, metadata, semantics) return Self::allocate_slow(thread, size, alignment, metadata, semantics)
},
_ => ()
}, },
_ => ()
} }
Self::allocate_out_of_line(thread, orig_size, alignment, metadata, orig_semantics)
} }
pub extern "C-unwind" fn allocate_los( #[inline(never)]
extern "C-unwind" fn allocate_los(
thread: &Thread<VM>, thread: &Thread<VM>,
size: usize, size: usize,
alignment: usize, alignment: usize,
@ -142,7 +190,8 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
} }
} }
pub extern "C-unwind" fn allocate_nonmoving( #[inline(never)]
extern "C-unwind" fn allocate_nonmoving(
thread: &Thread<VM>, thread: &Thread<VM>,
size: usize, size: usize,
alignment: usize, alignment: usize,
@ -171,7 +220,8 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
} }
} }
pub extern "C-unwind" fn allocate_immortal( #[inline(never)]
extern "C-unwind" fn allocate_immortal(
thread: &Thread<VM>, thread: &Thread<VM>,
size: usize, size: usize,
alignment: usize, alignment: usize,
@ -227,7 +277,7 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
} }
} }
#[inline(never)] #[inline(always)]
pub extern "C-unwind" fn set_vo_bit(object: VMKitObject) { pub extern "C-unwind" fn set_vo_bit(object: VMKitObject) {
#[cfg(feature = "cooperative")] #[cfg(feature = "cooperative")]
unsafe { unsafe {
@ -400,3 +450,10 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
Self::object_reference_write_post(thread, src, slot, target); Self::object_reference_write_post(thread, src, slot, target);
} }
} }
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum AllocFastPath {
TLAB,
FreeList,
None,
}

View file

@ -10,7 +10,7 @@ pub struct VMKitActivePlan<VM: VirtualMachine>(PhantomData<VM>);
impl<VM: VirtualMachine> ActivePlan<MemoryManager<VM>> for VMKitActivePlan<VM> { impl<VM: VirtualMachine> ActivePlan<MemoryManager<VM>> for VMKitActivePlan<VM> {
fn is_mutator(tls: mmtk::util::VMThread) -> bool { fn is_mutator(tls: mmtk::util::VMThread) -> bool {
let x = Thread::<VM>::from_vm_thread(tls);
Thread::<VM>::from_vm_thread(tls).active_mutator_context() Thread::<VM>::from_vm_thread(tls).active_mutator_context()
} }

View file

@ -1,3 +1,4 @@
#![allow(dead_code, unused_imports, unused_variables)]
//! Address space layout randomization for MMTk. //! Address space layout randomization for MMTk.
use mmtk::util::{ use mmtk::util::{

View file

@ -117,6 +117,7 @@ impl<VM: VirtualMachine> Scanning<MemoryManager<VM>> for VMKitScanning<VM> {
VM::scan_vm_specific_roots(tls, factory); VM::scan_vm_specific_roots(tls, factory);
} }
#[inline(always)]
fn support_slot_enqueuing( fn support_slot_enqueuing(
_tls: mmtk::util::VMWorkerThread, _tls: mmtk::util::VMWorkerThread,
object: mmtk::util::ObjectReference, object: mmtk::util::ObjectReference,

View file

@ -139,13 +139,19 @@ impl VMKitObject {
let overhead = self.hashcode_overhead::<VM, false>(); let overhead = self.hashcode_overhead::<VM, false>();
if metadata.instance_size != 0 { if metadata.instance_size != 0 {
raw_align_up(metadata.instance_size + size_of::<HeapObjectHeader<VM>>(), align_of::<usize>()) + overhead raw_align_up(
metadata.instance_size + size_of::<HeapObjectHeader<VM>>(),
align_of::<usize>(),
) + overhead
} else { } else {
let Some(compute_size) = metadata.compute_size else { let Some(compute_size) = metadata.compute_size else {
panic!("compute_size is not set for object at {}", self.0); panic!("compute_size is not set for object at {}", self.0);
}; };
raw_align_up(compute_size(self) + size_of::<HeapObjectHeader<VM>>(), align_of::<usize>()) + overhead raw_align_up(
compute_size(self) + size_of::<HeapObjectHeader<VM>>(),
align_of::<usize>(),
) + overhead
} }
} }
@ -258,7 +264,7 @@ impl VMKitObject {
"attempt to access field out of bounds" "attempt to access field out of bounds"
); );
let ordering = if !VOLATILE { let ordering = if !VOLATILE {
atomic::Ordering::Relaxed return self.as_address().add(offset).load::<T>();
} else { } else {
atomic::Ordering::SeqCst atomic::Ordering::SeqCst
}; };
@ -282,7 +288,8 @@ impl VMKitObject {
); );
unsafe { unsafe {
let ordering = if !VOLATILE { let ordering = if !VOLATILE {
atomic::Ordering::Relaxed self.as_address().add(offset).store(value);
return;
} else { } else {
atomic::Ordering::SeqCst atomic::Ordering::SeqCst
}; };

View file

@ -11,14 +11,15 @@ use std::{
use atomic::Atomic; use atomic::Atomic;
use mmtk::{ use mmtk::{
util::{Address, VMMutatorThread, VMThread}, util::{alloc::AllocatorSelector, Address, VMMutatorThread, VMThread},
vm::RootsWorkFactory, vm::RootsWorkFactory,
BarrierSelector, Mutator, AllocationSemantics, BarrierSelector, Mutator,
}; };
use crate::{ use crate::{
mm::{ mm::{
conservative_roots::ConservativeRoots, stack_bounds::StackBounds, tlab::TLAB, MemoryManager, conservative_roots::ConservativeRoots, stack_bounds::StackBounds, tlab::TLAB,
AllocFastPath, MemoryManager,
}, },
object_model::compression::CompressedOps, object_model::compression::CompressedOps,
sync::{Monitor, MonitorGuard}, sync::{Monitor, MonitorGuard},
@ -156,6 +157,7 @@ pub struct Thread<VM: VirtualMachine> {
pub tlab: UnsafeCell<TLAB>, pub tlab: UnsafeCell<TLAB>,
max_non_los_default_alloc_bytes: Cell<usize>, max_non_los_default_alloc_bytes: Cell<usize>,
barrier: Cell<BarrierSelector>, barrier: Cell<BarrierSelector>,
alloc_fastpath: Cell<AllocFastPath>,
mmtk_mutator: UnsafeCell<MaybeUninit<Mutator<MemoryManager<VM>>>>, mmtk_mutator: UnsafeCell<MaybeUninit<Mutator<MemoryManager<VM>>>>,
has_collector_context: AtomicBool, has_collector_context: AtomicBool,
exec_status: Atomic<ThreadState>, exec_status: Atomic<ThreadState>,
@ -223,6 +225,7 @@ impl<VM: VirtualMachine> Thread<VM> {
tlab: UnsafeCell::new(TLAB::new()), tlab: UnsafeCell::new(TLAB::new()),
stack_bounds: OnceCell::new(), stack_bounds: OnceCell::new(),
barrier: Cell::new(BarrierSelector::NoBarrier), barrier: Cell::new(BarrierSelector::NoBarrier),
alloc_fastpath: Cell::new(AllocFastPath::None),
max_non_los_default_alloc_bytes: Cell::new(0), max_non_los_default_alloc_bytes: Cell::new(0),
take_yieldpoint: AtomicI32::new(0), take_yieldpoint: AtomicI32::new(0),
context: ctx.unwrap_or_else(|| VM::ThreadContext::new(collector_context)), context: ctx.unwrap_or_else(|| VM::ThreadContext::new(collector_context)),
@ -305,6 +308,10 @@ impl<VM: VirtualMachine> Thread<VM> {
self.barrier.get() self.barrier.get()
} }
pub fn alloc_fastpath(&self) -> AllocFastPath {
self.alloc_fastpath.get()
}
pub fn max_non_los_default_alloc_bytes(&self) -> usize { pub fn max_non_los_default_alloc_bytes(&self) -> usize {
self.max_non_los_default_alloc_bytes.get() self.max_non_los_default_alloc_bytes.get()
} }
@ -321,6 +328,23 @@ impl<VM: VirtualMachine> Thread<VM> {
self.max_non_los_default_alloc_bytes self.max_non_los_default_alloc_bytes
.set(constraints.max_non_los_default_alloc_bytes); .set(constraints.max_non_los_default_alloc_bytes);
self.barrier.set(constraints.barrier); self.barrier.set(constraints.barrier);
let selector = mmtk::memory_manager::get_allocator_mapping(
&VM::get().vmkit().mmtk,
AllocationSemantics::Default,
);
match selector {
AllocatorSelector::BumpPointer(_) | AllocatorSelector::Immix(_) => {
self.alloc_fastpath.set(AllocFastPath::TLAB);
}
AllocatorSelector::FreeList(_) => {
self.alloc_fastpath.set(AllocFastPath::FreeList);
}
_ => self.alloc_fastpath.set(AllocFastPath::None),
}
self.stack_bounds self.stack_bounds
.set(StackBounds::current_thread_stack_bounds()) .set(StackBounds::current_thread_stack_bounds())
.unwrap(); .unwrap();
@ -542,6 +566,10 @@ impl<VM: VirtualMachine> Thread<VM> {
let mut lock = self.monitor().lock_no_handshake(); let mut lock = self.monitor().lock_no_handshake();
self.is_blocking.store(true, Ordering::Relaxed); self.is_blocking.store(true, Ordering::Relaxed);
log::trace!("Thread #{} in check_block_no_save_context", self.thread_id);
let mut had_really_blocked = false;
loop { loop {
// deal with block requests // deal with block requests
self.acknowledge_block_requests(); self.acknowledge_block_requests();
@ -549,11 +577,26 @@ impl<VM: VirtualMachine> Thread<VM> {
if !self.is_blocked() { if !self.is_blocked() {
break; break;
} }
had_really_blocked = true;
log::trace!(
"Thread #{} is really blocked with status {:?}",
self.thread_id,
self.get_exec_status()
);
// what if a GC request comes while we're here for a suspend() // what if a GC request comes while we're here for a suspend()
// request? // request?
// answer: we get awoken, reloop, and acknowledge the GC block // answer: we get awoken, reloop, and acknowledge the GC block
// request. // request.
lock.wait_no_handshake(); lock.wait_no_handshake();
log::trace!(
"Thread #{} has awoken; checking if we're still blocked",
self.thread_id
);
}
if had_really_blocked {
log::trace!("Thread #{} is unblocking", self.thread_id);
} }
// SAFETY: We are holding the monitor lock. // SAFETY: We are holding the monitor lock.
@ -689,22 +732,43 @@ impl<VM: VirtualMachine> Thread<VM> {
let mut lock = self.monitor.lock_no_handshake(); let mut lock = self.monitor.lock_no_handshake();
let token = A::request_block(self); let token = A::request_block(self);
log::trace!(
"Thread #{} is requesting that thread #{} blocks",
current_thread::<VM>().thread_id,
self.thread_id
);
if current_thread::<VM>().thread_id == self.thread_id { if current_thread::<VM>().thread_id == self.thread_id {
log::trace!("Thread #{} is blocking itself", self.thread_id);
self.check_block(); self.check_block();
result = self.get_exec_status(); result = self.get_exec_status();
} else { } else {
if self.is_about_to_terminate() { if self.is_about_to_terminate() {
log::trace!(
"Thread #{} is about to terminate, returning as if blocked in TERMINATED state",
self.thread_id
);
result = ThreadState::Terminated; result = ThreadState::Terminated;
} else { } else {
self.take_yieldpoint.store(1, Ordering::Relaxed); self.take_yieldpoint.store(1, Ordering::Relaxed);
let new_state = self.set_blocked_exec_status(); let new_state = self.set_blocked_exec_status();
result = new_state; result = new_state;
log::trace!(
"Thread #{} is blocking thread #{} which is in state {:?}",
current_thread::<VM>().thread_id,
self.thread_id,
new_state
);
self.monitor.notify_all(); self.monitor.notify_all();
if new_state == ThreadState::InManagedToBlock { if new_state == ThreadState::InManagedToBlock {
if !asynchronous { if !asynchronous {
log::trace!(
"Thread #{} is waiting for thread #{} to block",
current_thread::<VM>().thread_id,
self.thread_id
);
while A::has_block_request_with_token(self, token) while A::has_block_request_with_token(self, token)
&& !A::is_blocked(self) && !A::is_blocked(self)
&& !self.is_about_to_terminate() && !self.is_about_to_terminate()
@ -874,7 +938,7 @@ impl<VM: VirtualMachine> Thread<VM> {
pub fn mutator(&self) -> &'static mut Mutator<MemoryManager<VM>> { pub fn mutator(&self) -> &'static mut Mutator<MemoryManager<VM>> {
unsafe { unsafe {
assert!(Thread::<VM>::current().thread_id == self.thread_id); debug_assert!(Thread::<VM>::current().thread_id == self.thread_id);
self.mutator_unchecked() self.mutator_unchecked()
} }
} }
@ -1042,6 +1106,8 @@ impl<VM: VirtualMachine> Thread<VM> {
/// - `fp`: The frame pointer of the service method that called this method /// - `fp`: The frame pointer of the service method that called this method
/// ///
/// Exposed as `extern "C-unwind"` to allow directly invoking it from JIT/AOT code. /// Exposed as `extern "C-unwind"` to allow directly invoking it from JIT/AOT code.
#[inline(never)]
#[cold]
pub extern "C-unwind" fn yieldpoint(where_from: i32, fp: Address) { pub extern "C-unwind" fn yieldpoint(where_from: i32, fp: Address) {
let thread = Thread::<VM>::current(); let thread = Thread::<VM>::current();
let _was_at_yieldpoint = thread.at_yieldpoint.load(atomic::Ordering::Relaxed); let _was_at_yieldpoint = thread.at_yieldpoint.load(atomic::Ordering::Relaxed);
@ -1094,7 +1160,7 @@ thread_local! {
pub fn current_thread<VM: VirtualMachine>() -> &'static Thread<VM> { pub fn current_thread<VM: VirtualMachine>() -> &'static Thread<VM> {
let addr = CURRENT_THREAD.with(|t| *t.borrow()); let addr = CURRENT_THREAD.with(|t| *t.borrow());
assert!(!addr.is_zero()); debug_assert!(!addr.is_zero());
unsafe { addr.as_ref() } unsafe { addr.as_ref() }
} }
@ -1240,6 +1306,7 @@ impl<VM: VirtualMachine> ThreadManager<VM> {
/// Fixpoint until there are no threads that we haven't blocked. Fixpoint is needed to /// Fixpoint until there are no threads that we haven't blocked. Fixpoint is needed to
/// catch the (unlikely) case that a thread spawns another thread while we are waiting. /// catch the (unlikely) case that a thread spawns another thread while we are waiting.
pub fn block_all_mutators_for_gc(&self) -> Vec<Arc<Thread<VM>>> { pub fn block_all_mutators_for_gc(&self) -> Vec<Arc<Thread<VM>>> {
let mut handshake_threads = Vec::with_capacity(4); let mut handshake_threads = Vec::with_capacity(4);
loop { loop {
let lock = self.inner.lock_no_handshake(); let lock = self.inner.lock_no_handshake();