Compare commits

...

10 commits

Author SHA1 Message Date
playX18
f94c6c5545 refactors 2025-03-05 08:44:24 +07:00
playX18
6d565ad836 proc-macro 2025-02-20 20:46:10 +07:00
playX18
32662e9fc8 add VM::ALWAYS_TRACE 2025-02-15 19:55:06 +07:00
playX18
5518088601 Object header is now 32-bit on 32-bit targets 2025-02-15 19:51:12 +07:00
playX18
228513a638 cleanup 2025-02-15 19:19:09 +07:00
playX18
cd0e1344e4 fix hashstate 2025-02-15 12:56:49 +07:00
playX18
6e294ecaf9 make specs work 2025-02-15 12:12:25 +07:00
playX18
5cfc38b6d6 allow VMs to override forwarding; address based hashing is optional 2025-02-15 11:58:00 +07:00
playX18
8a7828299a feat: finalizers implemented by VMKit 2025-02-14 14:22:29 +07:00
playX18
cdab71fda1 Fix alignment 2025-02-14 14:03:47 +07:00
31 changed files with 2118 additions and 405 deletions

0
.editorconfig Normal file
View file

16
Cargo.lock generated
View file

@ -150,6 +150,12 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "cfgenius"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ae118db0db0e2137671e9b916de8af5d3be65a5fb1ad3a9c7925c38b6644cfb"
[[package]] [[package]]
name = "clap" name = "clap"
version = "4.5.28" version = "4.5.28"
@ -829,6 +835,12 @@ dependencies = [
"windows-targets", "windows-targets",
] ]
[[package]]
name = "paste"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
[[package]] [[package]]
name = "percent-encoding" name = "percent-encoding"
version = "2.3.1" version = "2.3.1"
@ -1223,6 +1235,7 @@ dependencies = [
"atomic", "atomic",
"bytemuck", "bytemuck",
"cfg-if", "cfg-if",
"cfgenius",
"clap", "clap",
"easy-bitfield", "easy-bitfield",
"env_logger", "env_logger",
@ -1231,8 +1244,10 @@ dependencies = [
"log", "log",
"mmtk", "mmtk",
"parking_lot", "parking_lot",
"paste",
"rand", "rand",
"sysinfo 0.33.1", "sysinfo 0.33.1",
"vmkit-proc",
"winapi", "winapi",
] ]
@ -1243,6 +1258,7 @@ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn 2.0.98", "syn 2.0.98",
"synstructure",
] ]
[[package]] [[package]]

View file

@ -7,7 +7,7 @@ edition = "2021"
proc-macro2 = "1.0.93" proc-macro2 = "1.0.93"
quote = "1.0.38" quote = "1.0.38"
syn = { version = "2.0.98", features = ["full"] } syn = { version = "2.0.98", features = ["full"] }
synstructure = { version = "0.13.1" }
[lib] [lib]
proc-macro = true proc-macro = true

View file

@ -0,0 +1,6 @@
//! Bytecode generation DSL
//!
//! This module provides a DSL for generating bytecode instructions.
pub mod decl;
pub mod fits;

View file

@ -0,0 +1,62 @@
use proc_macro2::TokenStream;
use quote::quote;
use super::fits;
/// A single opcode argument
pub struct Argument {
pub name: syn::Ident,
pub index: usize,
pub optional: bool,
pub ty: syn::Type
}
impl Argument {
pub fn new(name: syn::Ident, index: usize, optional: bool, ty: syn::Type) -> Self {
Self {
name,
index,
optional,
ty
}
}
pub fn field(&self) -> TokenStream {
let name = &self.name;
let ty = &self.ty;
quote! {
#name: #ty
}
}
pub fn create_param(&self) -> TokenStream {
let name = &self.name;
let ty = &self.ty;
if self.optional {
quote! {
#name: Option<#ty>
}
} else {
quote! {
#name: #ty
}
}
}
pub fn field_name(&self) -> TokenStream {
let name = &self.name;
quote! {
#name
}
}
pub fn fits_check(&self, size: usize) -> TokenStream {
fits::check(size, self.name.clone(), self.ty.clone())
}
pub fn fits_write(&self, size: usize) -> TokenStream {
fits::write(size, self.name.clone(), self.ty.clone())
}
}

View file

@ -0,0 +1,25 @@
use proc_macro2::{Span, TokenStream};
use quote::quote;
use syn::Ident;
pub fn convert(size: usize, name: syn::Ident, typ: syn::Type) -> TokenStream {
let sz = Ident::new(&format!("S{size}"), Span::call_site());
quote! {
Fits::<#typ, #sz>::convert(#name)
}
}
pub fn check(size: usize, name: syn::Ident, typ: syn::Type) -> TokenStream {
let sz = Ident::new(&format!("S{size}"), Span::call_site());
quote! {
Fits::<#typ, #sz>::check(#name)
}
}
pub fn write(size: usize, name: syn::Ident, typ: syn::Type) -> TokenStream {
let sz = Ident::new(&format!("S{size}"), Span::call_site());
let cvt = convert(size, name, typ);
quote! {
generator.write(#cvt)
}
}

View file

@ -1,6 +1,598 @@
#[proc_macro] //! Proc macro to derive GCMetadata implementation for types.
pub fn define_options(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let _input = proc_macro2::TokenStream::from(input);
proc_macro::TokenStream::new() extern crate proc_macro;
use proc_macro2::Span;
use quote::quote;
use std::collections::HashSet;
use synstructure::{decl_derive, AddBounds};
struct ArrayLike {
pub length_field: Option<syn::Ident>,
/// Getter for length field.
pub length_getter: Option<syn::Expr>,
/// Setter for length field.
#[allow(dead_code)]
pub length_setter: Option<syn::Expr>,
pub element_type: syn::Type,
pub data_field: Option<syn::Ident>,
pub data_getter: Option<syn::Expr>,
pub trace: bool,
} }
impl Default for ArrayLike {
fn default() -> Self {
Self {
length_field: None,
length_getter: None,
length_setter: None,
element_type: syn::parse_str("u8").unwrap(),
data_field: None,
data_getter: None,
trace: true,
}
}
}
fn find_arraylike(attrs: &[syn::Attribute]) -> syn::Result<Option<ArrayLike>> {
let mut length_field = None;
let mut length_getter = None;
let mut length_setter = None;
let mut element_type = None;
let mut data_field = None;
let mut data_getter = None;
let mut trace = None;
let mut has = false;
for attr in attrs.iter() {
if attr.meta.path().is_ident("arraylike") {
has = true;
attr.parse_nested_meta(|meta| {
if meta.path.is_ident("trace") {
let value = meta.value()?.parse::<syn::LitBool>()?;
if value.value() {
trace = Some(true);
} else {
trace = Some(false);
}
}
if meta.path.is_ident("len") {
if length_field.is_some() || length_getter.is_some() || length_setter.is_some()
{
return Err(meta.error("multiple length fields found"));
}
// parse `len = field` or `len(getter = field)` or `len(setter = field)`
if meta.input.peek(syn::Token![=]) {
meta.input.parse::<syn::Token![=]>()?;
let field = meta.input.parse::<syn::Ident>()?;
if length_field.is_some() {
return Err(meta.error("multiple length fields found"));
}
length_field = Some(field);
} else if meta.input.peek(syn::token::Paren) {
meta.parse_nested_meta(|meta| {
if meta.path.is_ident("get") {
if length_getter.is_some() {
return Err(meta.error("multiple length getters found"));
}
let field = meta.input.parse::<syn::Expr>()?;
length_getter = Some(field);
} else if meta.path.is_ident("set") {
if length_setter.is_some() {
return Err(meta.error("multiple length setters found"));
}
let field = meta.input.parse::<syn::Expr>()?;
length_setter = Some(field);
} else {
return Err(meta.error("unknown length field"));
}
Ok(())
})?;
} else {
return Err(meta.error("unknown length attribute for #[arraylike]"));
}
}
if meta.path.is_ident("data") {
if data_field.is_some() || data_getter.is_some() {
return Err(meta.error("multiple data fields found"));
}
if meta.input.peek(syn::token::Paren) {
meta.parse_nested_meta(|meta| {
if meta.path.is_ident("get") {
if data_getter.is_some() {
return Err(meta.error("multiple data getters found"));
}
let field = meta.input.parse::<syn::Expr>()?;
data_getter = Some(field);
} else {
return Err(meta.error("unknown data field"));
}
Ok(())
})?;
} else {
let field = meta.input.parse::<syn::Ident>()?;
data_field = Some(field);
}
}
if meta.path.is_ident("data_type") {
if element_type.is_some() {
return Err(meta.error("multiple data types found"));
}
meta.input.parse::<syn::Token![=]>()?;
let field = meta.input.parse::<syn::Type>()?;
element_type = Some(field);
}
Ok(())
})?;
}
}
if has {
Ok(Some(ArrayLike {
length_field,
length_getter,
length_setter,
element_type: element_type
.ok_or_else(|| syn::Error::new(Span::call_site(), "missing data_type"))?,
data_field,
data_getter,
trace: trace.unwrap_or(true),
}))
} else {
Ok(None)
}
}
enum ObjectAlignment {
Auto,
Const(syn::Expr),
#[allow(dead_code)]
Compute(syn::Expr),
}
enum ObjectSize {
Auto,
Size(syn::Expr),
Compute(syn::Expr),
}
enum ObjectTrace {
NoTrace,
Auto(bool),
Trace(bool, syn::Expr),
}
struct GCMetadata {
vm: syn::Type,
alignment: ObjectAlignment,
size: ObjectSize,
trace: ObjectTrace,
}
fn find_gcmetadata(attrs: &[syn::Attribute]) -> syn::Result<GCMetadata> {
let mut vm = None;
let mut alignment = None;
let mut size = None;
let mut trace = None;
let mut has = false;
for attr in attrs.iter() {
if attr.meta.path().is_ident("gcmetadata") {
has = true;
attr.parse_nested_meta(|meta| {
if meta.path.is_ident("vm") {
if vm.is_some() {
return Err(meta.error("multiple vm fields found"));
}
meta.input.parse::<syn::Token![=]>()?;
let field = meta.input.parse::<syn::Type>()?;
vm = Some(field);
}
if meta.path.is_ident("align") {
if alignment.is_some() {
return Err(meta.error("multiple alignment fields found"));
}
if meta.input.peek(syn::token::Paren) {
let constant = meta.input.parse::<syn::Expr>()?;
alignment = Some(ObjectAlignment::Const(constant));
} else {
let _ = meta.input.parse::<syn::Token![=]>()?;
let field = meta.input.parse::<syn::Expr>()?;
alignment = Some(ObjectAlignment::Compute(field));
}
}
if meta.path.is_ident("size") {
if size.is_some() {
return Err(meta.error("multiple size fields found"));
}
if meta.input.peek(syn::token::Paren) {
let constant = meta.input.parse::<syn::Expr>()?;
size = Some(ObjectSize::Size(constant));
} else {
let _ = meta.input.parse::<syn::Token![=]>()?;
let field = meta.input.parse::<syn::Expr>()?;
size = Some(ObjectSize::Compute(field));
}
}
if meta.path.is_ident("trace") {
if trace.is_some() {
return Err(meta.error("multiple trace fields found"));
}
if meta.input.is_empty() {
trace = Some(ObjectTrace::Auto(false));
} else {
let _ = meta.input.parse::<syn::Token![=]>()?;
let field = meta.input.parse::<syn::Expr>()?;
trace = Some(ObjectTrace::Trace(false, field));
}
} else if meta.path.is_ident("notrace") {
if trace.is_some() {
return Err(meta.error("multiple trace fields found"));
}
trace = Some(ObjectTrace::NoTrace);
} else if meta.path.is_ident("scan") {
if trace.is_some() {
return Err(meta.error("multiple trace fields found"));
}
if meta.input.is_empty() {
trace = Some(ObjectTrace::Auto(true));
} else {
let _ = meta.input.parse::<syn::Token![=]>()?;
let field = meta.input.parse::<syn::Expr>()?;
trace = Some(ObjectTrace::Trace(true, field));
}
}
Ok(())
})?;
}
}
if !has {
return Err(syn::Error::new(Span::call_site(), "missing gcmetadata"));
}
let vm = vm.ok_or_else(|| syn::Error::new(Span::call_site(), "VM for object not specified"))?;
Ok(GCMetadata {
vm,
alignment: alignment.unwrap_or(ObjectAlignment::Auto),
size: size.unwrap_or(ObjectSize::Auto),
trace: trace.unwrap_or(ObjectTrace::Auto(false)),
})
}
/*
#[proc_macro_derive(GCMetadata, attributes(arraylike, gcmetadata))]
pub fn derive_gcmetadata(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
let name = input.ident;
let arraylike = match find_arraylike(&input.attrs) {
Ok(arraylike) => arraylike,
Err(err) => return err.to_compile_error().into(),
};
let gc_meta = match find_gcmetadata(&input.attrs) {
Ok(gc_meta) => gc_meta,
Err(err) => return err.to_compile_error().into(),
};
let mut tracer = match gc_meta.trace {
ObjectTrace::NoTrace => quote! { TraceCallback::None },
ObjectTrace::Auto(scan_slots) => {
let call = |typ: syn::Type, field: syn::Expr| {
if scan_slots {
quote! {
<#typ as vmkit::mm::traits::Scan<_>>::scan_object(&#field, visitor);
}
} else {
quote! {
<#typ as vmkit::mm::traits::Trace>::trace_object(&mut #field, visitor)
}
}
};
let mut body = TokenStream::new();
match input.data {
Data::Enum(enumeration) => {
let mut match_: syn::ExprMatch = syn::parse_quote!(
match object.as_address().as_mut_ref::<#name>() {}
);
for variant in enumeration.variants {
let ident = variant.ident.clone();
let path = quote! { #name::#ident }
}
}
Data::Struct(structure) => {
}
}
}
}
TokenStream::new()
}
*/
decl_derive!([GCMetadata,attributes(gcmetadata, arraylike, ignore_trace)] => derive_gcmetadata);
fn derive_gcmetadata(s: synstructure::Structure<'_>) -> proc_macro2::TokenStream {
let gc_metadata = match find_gcmetadata(&s.ast().attrs) {
Ok(gc) => gc,
Err(err) => return err.to_compile_error(),
};
let arraylike = match find_arraylike(&s.ast().attrs) {
Ok(arraylike) => arraylike,
Err(err) => return err.to_compile_error(),
};
let vm = &gc_metadata.vm;
let name = s.ast().ident.clone();
let mut fields_to_skip = HashSet::new();
if let Some(arraylike) = arraylike.as_ref() {
if let Some(field) = &arraylike.data_field {
fields_to_skip.insert(field.clone());
}
if let Some(field) = &arraylike.length_field {
fields_to_skip.insert(field.clone());
}
}
let (trace_impl, trace_callback) = match gc_metadata.trace {
ObjectTrace::NoTrace => (None, quote! { ::vmkit::prelude::TraceCallback::None }),
ObjectTrace::Auto(scan_slots) => {
let mut filtered = s.clone();
filtered.filter(|bi| {
!bi.ast()
.attrs
.iter()
.any(|attr| attr.path().is_ident("ignore_trace"))
&& (bi.ast().ident.is_none()
|| !fields_to_skip.contains(bi.ast().ident.as_ref().unwrap()))
});
filtered.add_bounds(AddBounds::Fields);
let trace_impl = if scan_slots {
let trace_body = filtered.each(|bi| {
quote! {
mark(#bi, visitor);
}
});
let full_path = quote! { <#vm as vmkit::VirtualMachine>::Slot };
let mut extra = quote! {
fn mark<T: ::vmkit::prelude::Scan<#full_path>>(t: &T, visitor: &mut dyn ::vmkit::prelude::SlotVisitor<#full_path>) {
::vmkit::prelude::Scan::<#full_path>::scan_object(t, visitor);
}
};
if let Some(arraylike) = arraylike.as_ref().filter(|x| x.trace) {
let element = &arraylike.element_type;
let length = if let Some(getter) = &arraylike.length_getter {
quote! { #getter }
} else if let Some(field) = &arraylike.length_field {
quote! { self.#field as usize }
} else {
panic!("length field not found");
};
let data_ptr = if let Some(getter) = &arraylike.data_getter {
quote! { #getter }
} else if let Some(field) = &arraylike.data_field {
quote! { self.#field.as_mut_ptr().cast::<#element>() }
} else {
panic!("data field not found");
};
extra.extend(quote! {
let length = #length;
let data = #data_ptr;
for i in 0..length {
unsafe {
let ptr = data.add(i).as_ref().unwrap();
::vmkit::prelude::Scan::<#full_path>::scan_object(ptr, visitor);
}
}
});
}
filtered.bound_impl(
quote! {::vmkit::prelude::Scan<#full_path> },
quote! {
#[inline]
fn scan_object(&self, visitor: &mut dyn ::vmkit::prelude::SlotVisitor<#full_path>) {
let this = self;
#extra
match *self { #trace_body }
}
}
)
} else {
filtered.bind_with(|_| synstructure::BindStyle::RefMut);
let trace_body = filtered.each(|bi| {
quote! {
mark(#bi, visitor);
}
});
let mut extra = quote! {
#[inline]
fn mark<T: ::vmkit::prelude::Trace>(t: &mut T, visitor: &mut dyn ::vmkit::prelude::ObjectTracer) {
::vmkit::prelude::Trace::trace_object(t, visitor);
}
};
if let Some(arraylike) = arraylike.as_ref().filter(|x| x.trace) {
let element = &arraylike.element_type;
let length = if let Some(getter) = &arraylike.length_getter {
quote! { #getter }
} else if let Some(field) = &arraylike.length_field {
quote! { self.#field as usize }
} else {
panic!("length field not found");
};
let data_ptr = if let Some(getter) = &arraylike.data_getter {
quote! { #getter }
} else if let Some(field) = &arraylike.data_field {
quote! { self.#field.as_mut_ptr().cast::<#element>() }
} else {
panic!("data field not found");
};
extra.extend(quote! {
let length = #length;
let data = #data_ptr;
for i in 0..length {
unsafe {
let ptr = data.add(i).as_mut().unwrap();
::vmkit::prelude::Trace::trace_object(ptr, visitor);
}
}
});
}
filtered.bound_impl(
quote! {::vmkit::prelude::Trace },
quote! {
#[inline]
fn trace_object(&mut self, visitor: &mut dyn ::vmkit::prelude::ObjectTracer) {
match *self { #trace_body };
let this = self;
#extra
}
}
)
};
(
Some(trace_impl),
if scan_slots {
let full_path = quote! { <#vm as vmkit::VirtualMachine>::Slot };
quote! { ::vmkit::prelude::TraceCallback::ScanSlots(|object, visitor| unsafe {
let object = object.as_address().as_ref::<#name>();
::vmkit::prelude::Scan::<#full_path>::scan_object(object, visitor);
}) }
} else {
quote! { ::vmkit::prelude::TraceCallback::TraceObject(|object, visitor| unsafe {
let object = object.as_address().as_mut_ref::<#name>();
::vmkit::prelude::Trace::trace_object(object, visitor);
}) }
},
)
}
ObjectTrace::Trace(scan_slots, expr) => {
if scan_slots {
(
None,
quote! {
::vmkit::prelude::TraceCallback::ScanSlots(|object, visitor| unsafe {
let this = object.as_address().as_ref::<#name>();
#expr
})
},
)
} else {
(
None,
quote! {
::vmkit::prelude::TraceCallback::TraceObject(|object, visitor| unsafe {
let this = object.as_address().as_mut_ref::<#name>();
#expr
})
},
)
}
}
};
let instance_size = match &gc_metadata.size {
ObjectSize::Auto if arraylike.is_some() => quote! { 0 },
ObjectSize::Size(_) if arraylike.is_some() => quote! { 0 },
ObjectSize::Compute(_) => quote! { 0 },
ObjectSize::Auto => quote! { ::std::mem::size_of::<#name>() },
ObjectSize::Size(size) => quote! { #size },
};
let alignment = match &gc_metadata.alignment {
ObjectAlignment::Auto => quote! { ::std::mem::align_of::<#name>() },
ObjectAlignment::Const(expr) => quote! { #expr },
ObjectAlignment::Compute(_) => quote! { 0 },
};
let compute_size = if let Some(arraylike) = arraylike.as_ref() {
match gc_metadata.size {
ObjectSize::Compute(expr) => quote! { Some(|object| unsafe {
let this = object.as_address().as_ref::<#name>();
#expr
}) },
_ => {
let length = if let Some(getter) = &arraylike.length_getter {
quote! { #getter }
} else if let Some(field) = &arraylike.length_field {
quote! { this.#field as usize }
} else {
panic!("length field not found");
};
let element = &arraylike.element_type;
quote! { Some(|object| unsafe {
let this = object.as_address().as_ref::<#name>();
let length = #length;
size_of::<#name>() + (length * ::std::mem::size_of::<#element>())
}) }
}
}
} else {
match gc_metadata.size {
ObjectSize::Compute(expr) => quote! { Some(|object| unsafe {
let this = object.as_address().as_ref::<#name>();
#expr
}) },
_ => quote! { None },
}
};
let mut output = quote! {
impl #name {
pub fn gc_metadata() -> &'static ::vmkit::prelude::GCMetadata<#vm> {
static METADATA: ::vmkit::prelude::GCMetadata<#vm> = ::vmkit::prelude::GCMetadata {
trace: #trace_callback,
instance_size: #instance_size,
compute_size: #compute_size,
alignment: #alignment,
compute_alignment: None,
};
&METADATA
}
}
};
if let Some(trace_impl) = trace_impl {
output.extend(trace_impl);
}
output.into()
}
mod bytecode;

View file

@ -1,3 +0,0 @@
fn main() {
println!("Hello, world!");
}

View file

@ -7,6 +7,7 @@ edition = "2021"
atomic = "0.6.0" atomic = "0.6.0"
bytemuck = "1.21.0" bytemuck = "1.21.0"
cfg-if = "1.0.0" cfg-if = "1.0.0"
cfgenius = "0.1.1"
clap = { version = "4.5.28", features = ["derive"] } clap = { version = "4.5.28", features = ["derive"] }
easy-bitfield = "0.1.0" easy-bitfield = "0.1.0"
env_logger = "0.11.6" env_logger = "0.11.6"
@ -15,23 +16,26 @@ libc = "0.2.169"
log = { version = "0.4.25" } log = { version = "0.4.25" }
mmtk = { git = "https://github.com/mmtk/mmtk-core" } mmtk = { git = "https://github.com/mmtk/mmtk-core" }
parking_lot = "0.12.3" parking_lot = "0.12.3"
paste = "1.0.15"
rand = "0.9.0" rand = "0.9.0"
sysinfo = "0.33.1" sysinfo = "0.33.1"
vmkit-proc = { path = "../vmkit-proc", optional = true }
[features] [features]
default = ["cooperative", "address_based_hashing", "object_pinning", "derive"]
default = ["uncooperative"] vmside_forwarding = []
derive = ["vmkit-proc"]
object_pinning = ["mmtk/object_pinning"]
address_based_hashing = []
uncooperative = ["cooperative", "mmtk/immix_non_moving", "mmtk/immix_zero_on_release"] uncooperative = ["cooperative", "mmtk/immix_non_moving", "mmtk/immix_zero_on_release"]
# VMKit is built for use in cooperative runtime. Such runtime # VMKit is built for use in cooperative runtime. Such runtime
# would be able to use write barriers and safepoints. Such environment # would be able to use write barriers and safepoints. Such environment
# must also provide precise object layout (stack can be uncooperative). # must also provide precise object layout (stack can be uncooperative).
cooperative = ["mmtk/vo_bit", "mmtk/is_mmtk_object", "mmtk/vo_bit_access"] cooperative = ["object_pinning", "mmtk/vo_bit", "mmtk/is_mmtk_object", "mmtk/vo_bit_access"]
# VMKit is built for use in full-precise runtime. Such runtime # VMKit is built for use in full-precise runtime. Such runtime
# would be able to use precise write barriers and safepoints, object # would be able to use precise write barriers and safepoints, object
# layout is fully precise. # layout is fully precise.

View file

@ -0,0 +1,104 @@
#define GC_THREADS 1
#define GC_NO_THREAD_REDIRECTS 1
#include <gc.h>
#include <stdio.h>
#include <time.h>
typedef struct Node {
struct Node *left;
struct Node *right;
} Node;
Node* leaf() {
return GC_malloc(sizeof(Node));
}
Node* new_node(Node* left, Node* right) {
Node* node = GC_malloc(sizeof(Node));
node->left = left;
node->right = right;
return node;
}
int itemCheck(Node* node) {
if (node->left == NULL) {
return 1;
}
return 1 + itemCheck(node->left) + itemCheck(node->right);
}
Node* bottomUpTree(int depth) {
if (depth > 0) {
return new_node(bottomUpTree(depth - 1), bottomUpTree(depth - 1));
}
return leaf();
}
extern void* __data_start;
extern void* _end;
Node* longLivedTree;
char results[16][256];
void* threadWork(void* data) {
struct GC_stack_base base;
GC_get_stack_base(&base);
GC_register_my_thread(&base);
int d = (int)data;
int iterations = 1 << (21 - d + 4);
int check = 0;
for (int i = 0; i < iterations; i++) {
Node* treeNode = bottomUpTree(d);
check += itemCheck(treeNode);
}
//results[(d-4)/2] =
sprintf(&results[(d-4)/2][0],"%d\t trees of depth %d\t check: %d\n", iterations, d, check);
GC_unregister_my_thread();
return NULL;
}
int main() {
printf("DATA START: %p\n", &__data_start);
printf("DATA END: %p\n", &_end);
GC_use_entire_heap = 1;
GC_allow_register_threads();
GC_init();
int maxDepth = 21;
int stretchDepth = maxDepth + 1;
int start = clock();
Node* stretchTree = bottomUpTree(stretchDepth);
printf("stretch tree of depth %d\n", stretchDepth);
printf("time: %f\n", ((double)clock() - start) / CLOCKS_PER_SEC);
longLivedTree = bottomUpTree(maxDepth);
GC_gcollect();
pthread_t threads[16];
printf("long lived tree of depth %d\t check: %d\n", maxDepth, itemCheck(longLivedTree));
for (int d = 4; d <= maxDepth; d += 2) {
/*int iterations = 1 << (maxDepth - d + 4);
int check = 0;
for (int i = 0; i < iterations; i++) {
Node* treeNode = bottomUpTree(d);
check += itemCheck(treeNode);
}
printf("%d\t trees of depth %d\t check: %d\n", iterations, d, check);*/
void* data = (void*)d;
pthread_create(&threads[(d-4)/2], NULL, threadWork, data);
}
for (int d = 4; d <= maxDepth; d += 2) {
pthread_join(threads[(d-4)/2], NULL);
printf(results[(d-4)/2]);
}
printf("long lived tree of depth %d\t check: %d\n", maxDepth, itemCheck(longLivedTree));
printf("time: %f\n", ((double)clock() - start) / CLOCKS_PER_SEC);
return 0;
}

View file

@ -46,7 +46,7 @@ int main() {
GC_init(); GC_init();
int maxDepth = 18; int maxDepth = 21;
int stretchDepth = maxDepth + 1; int stretchDepth = maxDepth + 1;
int start = clock(); int start = clock();
Node* stretchTree = bottomUpTree(stretchDepth); Node* stretchTree = bottomUpTree(stretchDepth);

View file

@ -32,6 +32,7 @@ static METADATA: GCMetadata<BenchVM> = GCMetadata {
instance_size: size_of::<Node>(), instance_size: size_of::<Node>(),
compute_size: None, compute_size: None,
alignment: 16, alignment: 16,
compute_alignment: None,
}; };
struct BenchVM { struct BenchVM {
@ -67,6 +68,12 @@ impl VirtualMachine for BenchVM {
type Slot = SimpleSlot; type Slot = SimpleSlot;
type ThreadContext = ThreadBenchContext; type ThreadContext = ThreadBenchContext;
type MemorySlice = UnimplementedMemorySlice; type MemorySlice = UnimplementedMemorySlice;
#[cfg(feature="vmside_forwarding")]
const LOCAL_FORWARDING_POINTER_SPEC: mmtk::vm::VMLocalForwardingPointerSpec = mmtk::vm::VMLocalForwardingPointerSpec::in_header(0);
#[cfg(feature="vmside_forwarding")]
const LOCAL_FORWARDING_BITS_SPEC: mmtk::vm::VMLocalForwardingBitsSpec = mmtk::vm::VMLocalForwardingBitsSpec::in_header(62);
fn get() -> &'static Self { fn get() -> &'static Self {
VM.get().unwrap() VM.get().unwrap()
} }
@ -119,7 +126,7 @@ impl NodeRef {
&METADATA, &METADATA,
AllocationSemantics::Default, AllocationSemantics::Default,
); );
//node.hashcode::<BenchVM>();
node.set_field_object::<BenchVM, false>(offset_of!(Node, left), left.0); node.set_field_object::<BenchVM, false>(offset_of!(Node, left), left.0);
node.set_field_object::<BenchVM, false>(offset_of!(Node, right), right.0); node.set_field_object::<BenchVM, false>(offset_of!(Node, right), right.0);
@ -181,15 +188,8 @@ fn main() {
.parse::<usize>() .parse::<usize>()
.unwrap(); .unwrap();
let mut builder = MMTKBuilder::new(); let mut builder = MMTKBuilder::new();
builder.options.plan.set(PlanSelector::Immix); builder.options.plan.set(PlanSelector::StickyImmix);
builder.options.threads.set(nthreads); builder.options.threads.set(nthreads);
builder
.options
.gc_trigger
.set(mmtk::util::options::GCTriggerSelector::DynamicHeapSize(
4 * 1024 * 1024 * 1024,
16 * 1024 * 1024 * 1024,
));
VM.set(BenchVM { VM.set(BenchVM {
vmkit: VMKit::new(&mut builder), vmkit: VMKit::new(&mut builder),
}) })

View file

@ -24,6 +24,7 @@ use crate::{
MemoryManager, MemoryManager,
}, },
object_model::{ object_model::{
finalization::FinalizerProcessing,
metadata::{GCMetadata, Metadata, TraceCallback}, metadata::{GCMetadata, Metadata, TraceCallback},
object::VMKitObject, object::VMKitObject,
}, },
@ -33,7 +34,7 @@ use crate::{
}; };
use easy_bitfield::*; use easy_bitfield::*;
use mmtk::{ use mmtk::{
util::Address, util::{options::PlanSelector, Address},
vm::{ vm::{
slot::{SimpleSlot, UnimplementedMemorySlice}, slot::{SimpleSlot, UnimplementedMemorySlice},
ObjectTracer, ObjectTracer,
@ -155,6 +156,11 @@ impl VirtualMachine for BDWGC {
type Slot = SimpleSlot; type Slot = SimpleSlot;
type MemorySlice = UnimplementedMemorySlice<Self::Slot>; type MemorySlice = UnimplementedMemorySlice<Self::Slot>;
#[cfg(feature="vmside_forwarding")]
const LOCAL_FORWARDING_BITS_SPEC: mmtk::vm::VMLocalForwardingBitsSpec = mmtk::vm::VMLocalForwardingBitsSpec::side_first();
#[cfg(feature="vmside_forwarding")]
const LOCAL_FORWARDING_POINTER_SPEC: mmtk::vm::VMLocalForwardingPointerSpec = mmtk::vm::VMLocalForwardingPointerSpec::in_header(0);
fn get() -> &'static Self { fn get() -> &'static Self {
BDWGC_VM.get().expect("GC is not initialized") BDWGC_VM.get().expect("GC is not initialized")
} }
@ -186,7 +192,7 @@ impl VirtualMachine for BDWGC {
} }
}); });
false FinalizerProcessing::process::<BDWGC>(_worker, _tracer_context)
} }
fn forward_weak_refs( fn forward_weak_refs(
@ -247,6 +253,10 @@ impl VirtualMachine for BDWGC {
} }
} }
} }
fn compute_hashcode(object: VMKitObject) -> usize {
object.as_address().as_usize()
}
} }
type VTableAddress = BitField<usize, usize, 0, 37, false>; type VTableAddress = BitField<usize, usize, 0, 37, false>;
@ -326,6 +336,7 @@ struct MarkStackMeta<'a> {
static CONSERVATIVE_METADATA: GCMetadata<BDWGC> = GCMetadata { static CONSERVATIVE_METADATA: GCMetadata<BDWGC> = GCMetadata {
alignment: 8, alignment: 8,
instance_size: 0, instance_size: 0,
compute_alignment: None,
compute_size: Some(|object| { compute_size: Some(|object| {
let header = object.header::<BDWGC>().metadata(); let header = object.header::<BDWGC>().metadata();
ObjectSize::decode(header.meta) * BDWGC::MIN_ALIGNMENT ObjectSize::decode(header.meta) * BDWGC::MIN_ALIGNMENT
@ -455,21 +466,21 @@ static CONSERVATIVE_METADATA: GCMetadata<BDWGC> = GCMetadata {
}), }),
}; };
impl FromBitfield<u64> for BDWGCMetadata { impl FromBitfield<usize> for BDWGCMetadata {
fn from_bitfield(value: u64) -> Self { fn from_bitfield(value: usize) -> Self {
Self { Self {
meta: value as usize, meta: value as usize,
} }
} }
fn from_i64(value: i64) -> Self { fn from_i64(value: i64) -> Self {
Self::from_bitfield(value as u64) Self::from_bitfield(value as usize)
} }
} }
impl ToBitfield<u64> for BDWGCMetadata { impl ToBitfield<usize> for BDWGCMetadata {
fn to_bitfield(self) -> u64 { fn to_bitfield(self) -> usize {
self.meta as u64 self.meta as usize
} }
fn one() -> Self { fn one() -> Self {
@ -508,10 +519,13 @@ pub static mut GC_VERBOSE: i32 = 0;
static BUILDER: LazyLock<Mutex<MMTKBuilder>> = LazyLock::new(|| { static BUILDER: LazyLock<Mutex<MMTKBuilder>> = LazyLock::new(|| {
Mutex::new({ Mutex::new({
let mut builder = MMTKBuilder::new(); let mut builder = MMTKBuilder::new();
builder builder.options.read_env_var_settings();
.options if !matches!(
.plan *builder.options.plan,
.set(mmtk::util::options::PlanSelector::Immix); PlanSelector::Immix | PlanSelector::MarkSweep
) {
builder.options.plan.set(PlanSelector::Immix);
}
builder builder
}) })
}); });
@ -595,6 +609,11 @@ pub extern "C-unwind" fn GC_unregister_my_thread() {
unsafe { Thread::<BDWGC>::unregister_mutator_manual() }; unsafe { Thread::<BDWGC>::unregister_mutator_manual() };
} }
#[no_mangle]
pub extern "C-unwind" fn GC_allow_register_threads() {
/* noop: always allowed */
}
#[no_mangle] #[no_mangle]
pub extern "C-unwind" fn GC_pthread_create( pub extern "C-unwind" fn GC_pthread_create(
thread_ptr: &mut libc::pthread_t, thread_ptr: &mut libc::pthread_t,

View file

@ -13,19 +13,42 @@ pub mod mm;
pub mod object_model; pub mod object_model;
pub mod options; pub mod options;
pub mod platform; pub mod platform;
pub mod semaphore;
pub mod sync; pub mod sync;
pub mod threading; pub mod threading;
pub mod macros;
#[cfg(feature = "uncooperative")] #[cfg(feature = "uncooperative")]
pub mod bdwgc_shim; pub mod bdwgc_shim;
pub use mmtk;
pub trait VirtualMachine: Sized + 'static + Send + Sync { pub trait VirtualMachine: Sized + 'static + Send + Sync {
type ThreadContext: threading::ThreadContext<Self>; type ThreadContext: threading::ThreadContext<Self>;
type BlockAdapterList: threading::BlockAdapterList<Self>; type BlockAdapterList: threading::BlockAdapterList<Self>;
type Metadata: object_model::metadata::Metadata<Self>; type Metadata: object_model::metadata::Metadata<Self>;
type Slot: SlotExtra; type Slot: SlotExtra;
type MemorySlice: MemorySlice<SlotType = Self::Slot>; type MemorySlice: MemorySlice<SlotType = Self::Slot>;
/// Should we always trace objects? If `true` then `support_slot_enqueing` will always return
/// false to MMTk and we will always work through `ObjectTracer` to trace objects.
const ALWAYS_TRACE: bool = false;
/*#[cfg(feature = "address_based_hashing")]
const HASH_STATE_SPEC: VMLocalHashStateSpec = VMLocalHashStateSpec::in_header(61);*/
/// 1-word local metadata for spaces that may copy objects.
/// This metadata has to be stored in the header.
/// This metadata can be defined at a position within the object payload.
/// As a forwarding pointer is only stored in dead objects which is not
/// accessible by the language, it is okay that store a forwarding pointer overwrites object payload
///
#[cfg(feature = "vmside_forwarding")]
const LOCAL_FORWARDING_POINTER_SPEC: mmtk::vm::VMLocalForwardingPointerSpec;
/// 2-bit local metadata for spaces that store a forwarding state for objects.
/// If this spec is defined in the header, it can be defined with a position of the lowest 2 bits in the forwarding pointer.
/// If this spec is defined on the side it must be defined after the [`MARK_BIT_SPEC`](crate::object_model::MARK_BIT_SPEC).
#[cfg(feature = "vmside_forwarding")]
const LOCAL_FORWARDING_BITS_SPEC: mmtk::vm::VMLocalForwardingBitsSpec;
const ALIGNMENT_VALUE: u32 = 0xdead_beef; const ALIGNMENT_VALUE: u32 = 0xdead_beef;
const MAX_ALIGNMENT: usize = 32; const MAX_ALIGNMENT: usize = 32;
const MIN_ALIGNMENT: usize = 8; const MIN_ALIGNMENT: usize = 8;
@ -146,6 +169,20 @@ pub trait VirtualMachine: Sized + 'static + Send + Sync {
let _ = tls; let _ = tls;
unimplemented!() unimplemented!()
} }
/// Compute the hashcode of an object. When feature `address_based_hashing` is enabled,
/// this function is ignored. Otherwise VMKit calls into this function to compute hashcode of an object.
///
/// In case VM uses moving plans it's strongly advised to *not* compute hashcode based on address
/// as the object's address may change during GC, instead store hashcode as a field or use some bits
/// in header to store the hashcode. This function must be fast as it's called for each `VMKitObject::hashcode()`
/// invocation.
fn compute_hashcode(object: VMKitObject) -> usize {
let _ = object;
unimplemented!(
"VM currently does not support hashcode computation, override this method to do so"
);
}
} }
pub struct VMKit<VM: VirtualMachine> { pub struct VMKit<VM: VirtualMachine> {
@ -178,3 +215,15 @@ impl<VM: VirtualMachine> VMKit<VM> {
&self.thread_manager &self.thread_manager
} }
} }
#[cfg(feature="derive")]
pub use vmkit_proc::GCMetadata;
pub mod prelude {
#[cfg(feature="derive")]
pub use super::GCMetadata;
pub use super::mm::traits::*;
pub use super::object_model::object::*;
pub use super::object_model::metadata::*;
pub use mmtk::vm::ObjectTracer;
pub use mmtk::vm::SlotVisitor;
}

View file

@ -1,2 +1,62 @@
cfgenius::cond! {
if macro(crate::macros::darwin) {
cfgenius::cond! {
if cfg(target_os="x86_64") {
pub type PlatformRegisters = libc::__darwin_x86_thread_state64;
} else if cfg!(target_os="arm64") {
pub type PlatformRegisters = libc::__darwin_arm_thread_state64;
} else {
compile_error!("Unsupported Apple target");
}
}
pub unsafe fn registers_from_ucontext(ucontext: *const libc::ucontext_t) -> *const PlatformRegisters {
return &(*ucontext).uc_mcontext.__ss;
}
} else if macro(crate::macros::have_machine_context) {
#[cfg(not(target_os="openbsd"))]
use libc::mcontext_t;
#[cfg(target_os="openbsd")]
use libc::ucontext_t as mcontext_t;
#[repr(C)]
#[derive(Clone)]
pub struct PlatformRegisters {
pub machine_context: mcontext_t
}
pub unsafe fn registers_from_ucontext(ucontext: *const libc::ucontext_t) -> *const PlatformRegisters {
cfgenius::cond! {
if cfg(target_os="openbsd")
{
return ucontext.cast();
}
else if cfg(target_arch="powerpc")
{
return unsafe { std::mem::transmute(
&(*ucontext).uc_mcontext.uc_regs
) }
} else {
return unsafe { std::mem::transmute(
&(*ucontext).uc_mcontext
) }
}
}
}
} else if cfg(windows) {
use winapi::um::winnt::CONTEXT;
pub type PlatformRegisters = CONTEXT;
} else {
pub struct PlatformRegisters {
pub stack_pointer: *mut u8
}
}
}

27
vmkit/src/macros.rs Normal file
View file

@ -0,0 +1,27 @@
cfgenius::define! {
pub darwin = cfg(target_vendor="apple");
pub ios_family = all(macro(darwin), cfg(target_os="ios"));
pub have_machine_context = any(
macro(darwin),
cfg(target_os="fuchsia"),
all(
cfg(any(
target_os="freebsd",
target_os="haiku",
target_os="netbsd",
target_os="openbsd",
target_os="linux",
target_os="hurd"
)),
cfg(
any(
target_arch="x86_64",
target_arch="arm",
target_arch="aarch64",
target_arch="riscv64",
)
)
));
}

View file

@ -1 +1,277 @@
fn main() {} use mmtk::util::Address;
use mmtk::vm::slot::UnimplementedMemorySlice;
use mmtk::{util::options::PlanSelector, vm::slot::SimpleSlot, AllocationSemantics, MMTKBuilder};
use std::cell::RefCell;
use std::mem::offset_of;
use std::sync::Arc;
use std::sync::OnceLock;
use vmkit::threading::parked_scope;
use vmkit::{
mm::{traits::Trace, MemoryManager},
object_model::{
metadata::{GCMetadata, TraceCallback},
object::VMKitObject,
},
sync::Monitor,
threading::{GCBlockAdapter, Thread, ThreadContext},
VMKit, VirtualMachine,
};
#[repr(C)]
struct Node {
left: NodeRef,
right: NodeRef,
}
static METADATA: GCMetadata<BenchVM> = GCMetadata {
trace: TraceCallback::TraceObject(|object, tracer| unsafe {
let node = object.as_address().as_mut_ref::<Node>();
node.left.0.trace_object(tracer);
node.right.0.trace_object(tracer);
}),
instance_size: size_of::<Node>(),
compute_size: None,
alignment: 16,
compute_alignment: None,
};
struct BenchVM {
vmkit: VMKit<Self>,
}
static VM: OnceLock<BenchVM> = OnceLock::new();
struct ThreadBenchContext;
impl ThreadContext<BenchVM> for ThreadBenchContext {
fn new(_: bool) -> Self {
Self
}
fn save_thread_state(&self) {}
fn scan_roots(
&self,
_factory: impl mmtk::vm::RootsWorkFactory<<BenchVM as VirtualMachine>::Slot>,
) {
}
fn scan_conservative_roots(
&self,
_croots: &mut vmkit::mm::conservative_roots::ConservativeRoots,
) {
}
}
impl VirtualMachine for BenchVM {
type BlockAdapterList = (GCBlockAdapter, ());
type Metadata = &'static GCMetadata<Self>;
type Slot = SimpleSlot;
type ThreadContext = ThreadBenchContext;
type MemorySlice = UnimplementedMemorySlice;
#[cfg(feature="vmside_forwarding")]
const LOCAL_FORWARDING_POINTER_SPEC: mmtk::vm::VMLocalForwardingPointerSpec = mmtk::vm::VMLocalForwardingPointerSpec::in_header(0);
#[cfg(feature="vmside_forwarding")]
const LOCAL_FORWARDING_BITS_SPEC: mmtk::vm::VMLocalForwardingBitsSpec = mmtk::vm::VMLocalForwardingBitsSpec::in_header(62);
fn get() -> &'static Self {
VM.get().unwrap()
}
fn vmkit(&self) -> &VMKit<Self> {
&self.vmkit
}
fn prepare_for_roots_re_scanning() {}
fn notify_initial_thread_scan_complete(partial_scan: bool, tls: mmtk::util::VMWorkerThread) {
let _ = partial_scan;
let _ = tls;
}
fn forward_weak_refs(
_worker: &mut mmtk::scheduler::GCWorker<vmkit::mm::MemoryManager<Self>>,
_tracer_context: impl mmtk::vm::ObjectTracerContext<vmkit::mm::MemoryManager<Self>>,
) {
}
fn scan_roots_in_mutator_thread(
_tls: mmtk::util::VMWorkerThread,
_mutator: &'static mut mmtk::Mutator<vmkit::mm::MemoryManager<Self>>,
_factory: impl mmtk::vm::RootsWorkFactory<
<vmkit::mm::MemoryManager<Self> as mmtk::vm::VMBinding>::VMSlot,
>,
) {
}
fn scan_vm_specific_roots(
_tls: mmtk::util::VMWorkerThread,
_factory: impl mmtk::vm::RootsWorkFactory<
<vmkit::mm::MemoryManager<Self> as mmtk::vm::VMBinding>::VMSlot,
>,
) {
}
}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq)]
struct NodeRef(VMKitObject);
impl NodeRef {
pub fn new(thread: &Thread<BenchVM>, left: NodeRef, right: NodeRef) -> Self {
let node = MemoryManager::<BenchVM>::allocate(
thread,
size_of::<Node>(),
32,
&METADATA,
AllocationSemantics::Default,
);
//node.hashcode::<BenchVM>();
node.set_field_object::<BenchVM, false>(offset_of!(Node, left), left.0);
node.set_field_object::<BenchVM, false>(offset_of!(Node, right), right.0);
Self(node)
}
pub fn left(self) -> NodeRef {
unsafe {
let node = self.0.as_address().as_ref::<Node>();
node.left
}
}
pub fn right(self) -> NodeRef {
unsafe {
let node = self.0.as_address().as_ref::<Node>();
node.right
}
}
pub fn null() -> Self {
Self(VMKitObject::NULL)
}
pub fn item_check(&self) -> usize {
if self.left() == NodeRef::null() {
1
} else {
1 + self.left().item_check() + self.right().item_check()
}
}
pub fn leaf(thread: &Thread<BenchVM>) -> Self {
Self::new(thread, NodeRef::null(), NodeRef::null())
}
}
fn bottom_up_tree(thread: &Thread<BenchVM>, depth: usize) -> NodeRef {
if thread.take_yieldpoint() != 0 {
Thread::<BenchVM>::yieldpoint(0, Address::ZERO);
}
if depth > 0 {
NodeRef::new(
thread,
bottom_up_tree(thread, depth - 1),
bottom_up_tree(thread, depth - 1),
)
} else {
NodeRef::leaf(thread)
}
}
const MIN_DEPTH: usize = 4;
fn main() {
env_logger::init();
let nthreads = std::env::var("THREADS")
.unwrap_or("4".to_string())
.parse::<usize>()
.unwrap();
let mut builder = MMTKBuilder::new();
builder.options.plan.set(PlanSelector::StickyImmix);
builder.options.threads.set(nthreads);
VM.set(BenchVM {
vmkit: VMKit::new(&mut builder),
})
.unwrap_or_else(|_| panic!());
Thread::<BenchVM>::main(ThreadBenchContext, || {
/*let thread = Thread::<BenchVM>::current();
let start = std::time::Instant::now();
let n = std::env::var("DEPTH")
.unwrap_or("18".to_string())
.parse::<usize>()
.unwrap();
let max_depth = if n < MIN_DEPTH + 2 { MIN_DEPTH + 2 } else { n };
let stretch_depth = max_depth + 1;
println!("stretch tree of depth {stretch_depth}");
let _ = bottom_up_tree(&thread, stretch_depth);
let duration = start.elapsed();
println!("time: {duration:?}");
let results = Arc::new(Monitor::new(vec![
RefCell::new(String::new());
(max_depth - MIN_DEPTH) / 2 + 1
]));
let mut handles = Vec::new();
for d in (MIN_DEPTH..=max_depth).step_by(2) {
let depth = d;
let thread = Thread::<BenchVM>::for_mutator(ThreadBenchContext);
let results = results.clone();
let handle = thread.start(move || {
let thread = Thread::<BenchVM>::current();
let mut check = 0;
let iterations = 1 << (max_depth - depth + MIN_DEPTH);
for _ in 1..=iterations {
let tree_node = bottom_up_tree(&thread, depth);
check += tree_node.item_check();
}
*results.lock_with_handshake::<BenchVM>()[(depth - MIN_DEPTH) / 2].borrow_mut() =
format!("{iterations}\t trees of depth {depth}\t check: {check}");
});
handles.push(handle);
}
println!("created {} threads", handles.len());
parked_scope::<(), BenchVM>(|| {
while let Some(handle) = handles.pop() {
handle.join().unwrap();
}
});
for result in results.lock_with_handshake::<BenchVM>().iter() {
println!("{}", result.borrow());
}
println!(
"long lived tree of depth {max_depth}\t check: {}",
bottom_up_tree(&thread, max_depth).item_check()
);
let duration = start.elapsed();
println!("time: {duration:?}");*/
let thread = Thread::<BenchVM>::current();
thread.save_registers();
let registers = thread.get_registers();
for (i, greg) in registers.machine_context.gregs.iter().enumerate() {
println!("{:02}: {:x}",i, greg);
}
});
}

View file

@ -7,7 +7,6 @@ use crate::{
threading::Thread, threading::Thread,
VirtualMachine, VirtualMachine,
}; };
use easy_bitfield::{AtomicBitfieldContainer, ToBitfield};
use mmtk::{ use mmtk::{
util::{ util::{
alloc::{AllocatorSelector, BumpAllocator, ImmixAllocator}, alloc::{AllocatorSelector, BumpAllocator, ImmixAllocator},
@ -21,8 +20,8 @@ use mmtk::{
}, },
AllocationSemantics, BarrierSelector, MutatorContext, AllocationSemantics, BarrierSelector, MutatorContext,
}; };
use ref_glue::Finalizer;
use std::{marker::PhantomData, panic::AssertUnwindSafe}; use std::marker::PhantomData;
#[derive(Clone, Copy)] #[derive(Clone, Copy)]
pub struct MemoryManager<VM: VirtualMachine>(PhantomData<VM>); pub struct MemoryManager<VM: VirtualMachine>(PhantomData<VM>);
@ -57,6 +56,7 @@ pub mod scanning;
pub mod stack_bounds; pub mod stack_bounds;
pub mod tlab; pub mod tlab;
pub mod traits; pub mod traits;
pub mod spec;
impl<VM: VirtualMachine> MemoryManager<VM> { impl<VM: VirtualMachine> MemoryManager<VM> {
pub extern "C-unwind" fn request_gc() -> bool { pub extern "C-unwind" fn request_gc() -> bool {
@ -100,10 +100,7 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
let object_start = let object_start =
mmtk::memory_manager::alloc(thread.mutator(), size, alignment, 0, semantics); mmtk::memory_manager::alloc(thread.mutator(), size, alignment, 0, semantics);
object_start.store(HeapObjectHeader::<VM> { object_start.store(HeapObjectHeader::<VM>::new(metadata));
metadata: AtomicBitfieldContainer::new(metadata.to_bitfield()),
marker: PhantomData,
});
let object = VMKitObject::from_address(object_start + OBJECT_REF_OFFSET); let object = VMKitObject::from_address(object_start + OBJECT_REF_OFFSET);
Self::set_vo_bit(object); Self::set_vo_bit(object);
Self::refill_tlab(thread); Self::refill_tlab(thread);
@ -146,12 +143,10 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
tlab.allocate::<VM>(size, alignment, OBJECT_REF_OFFSET as usize); tlab.allocate::<VM>(size, alignment, OBJECT_REF_OFFSET as usize);
if !object_start.is_zero() { if !object_start.is_zero() {
object_start.store(HeapObjectHeader::<VM> { object_start.store(HeapObjectHeader::<VM>::new(metadata));
metadata: AtomicBitfieldContainer::new(metadata.to_bitfield()),
marker: PhantomData,
});
let object = VMKitObject::from_address(object_start + OBJECT_REF_OFFSET); let object = VMKitObject::from_address(object_start + OBJECT_REF_OFFSET);
Self::set_vo_bit(object); Self::set_vo_bit(object);
debug_assert!(mmtk::memory_manager::is_mapped_address(object.as_address()));
return object; return object;
} }
@ -185,10 +180,7 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
); );
let object = VMKitObject::from_address(object_start + OBJECT_REF_OFFSET); let object = VMKitObject::from_address(object_start + OBJECT_REF_OFFSET);
object_start.store(HeapObjectHeader::<VM> { object_start.store(HeapObjectHeader::<VM>::new(metadata));
metadata: AtomicBitfieldContainer::new(metadata.to_bitfield()),
marker: PhantomData,
});
//Self::set_vo_bit(object); //Self::set_vo_bit(object);
Self::refill_tlab(thread); Self::refill_tlab(thread);
@ -222,10 +214,7 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
); );
let object = VMKitObject::from_address(object_start + OBJECT_REF_OFFSET); let object = VMKitObject::from_address(object_start + OBJECT_REF_OFFSET);
object_start.store(HeapObjectHeader::<VM> { object_start.store(HeapObjectHeader::<VM>::new(metadata));
metadata: AtomicBitfieldContainer::new(metadata.to_bitfield()),
marker: PhantomData,
});
Self::set_vo_bit(object); Self::set_vo_bit(object);
Self::refill_tlab(thread); Self::refill_tlab(thread);
object object
@ -252,10 +241,7 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
); );
let object = VMKitObject::from_address(object_start + OBJECT_REF_OFFSET); let object = VMKitObject::from_address(object_start + OBJECT_REF_OFFSET);
object_start.store(HeapObjectHeader::<VM> { object_start.store(HeapObjectHeader::<VM>::new(metadata));
metadata: AtomicBitfieldContainer::new(metadata.to_bitfield()),
marker: PhantomData,
});
Self::set_vo_bit(object); Self::set_vo_bit(object);
Self::refill_tlab(thread); Self::refill_tlab(thread);
object object
@ -283,13 +269,11 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
semantics, semantics,
); );
object_start.store(HeapObjectHeader::<VM> { object_start.store(HeapObjectHeader::<VM>::new(metadata));
metadata: AtomicBitfieldContainer::new(metadata.to_bitfield()),
marker: PhantomData,
});
let object = VMKitObject::from_address(object_start + OBJECT_REF_OFFSET); let object = VMKitObject::from_address(object_start + OBJECT_REF_OFFSET);
Self::set_vo_bit(object); Self::set_vo_bit(object);
Self::refill_tlab(thread); Self::refill_tlab(thread);
debug_assert!(mmtk::memory_manager::is_mapped_address(object.as_address()));
object object
} }
} }
@ -488,40 +472,6 @@ impl<VM: VirtualMachine> MemoryManager<VM> {
.load(atomic::Ordering::SeqCst) .load(atomic::Ordering::SeqCst)
== 0 == 0
} }
pub fn register_finalizer(object: VMKitObject, callback: Box<dyn FnOnce(VMKitObject) + Send>) {
let finalizer = Finalizer {
object,
callback: Some(callback),
};
let vm = VM::get();
mmtk::memory_manager::add_finalizer(&vm.vmkit().mmtk, finalizer);
}
pub fn run_finalizers() -> usize {
let vm = VM::get();
let mut count = 0;
while let Some(mut finalizer) = mmtk::memory_manager::get_finalized_object(&vm.vmkit().mmtk) {
let _ = std::panic::catch_unwind(AssertUnwindSafe(|| finalizer.run()));
count += 1;
}
count
}
pub fn get_finalizers_for(object: VMKitObject) -> Vec<Finalizer> {
if object.is_null() {
return vec![];
}
let vm = VM::get();
mmtk::memory_manager::get_finalizers_for(&vm.vmkit().mmtk, unsafe {
object.as_object_unchecked()
})
}
pub fn get_finalized_object() -> Option<Finalizer> {
let vm = VM::get();
mmtk::memory_manager::get_finalized_object(&vm.vmkit().mmtk)
}
} }
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]

View file

@ -46,9 +46,13 @@ impl ConservativeRoots {
/// ///
/// `start` and `end` must be valid addresses. /// `start` and `end` must be valid addresses.
pub unsafe fn add_span(&mut self, mut start: Address, mut end: Address) { pub unsafe fn add_span(&mut self, mut start: Address, mut end: Address) {
assert!(!start.is_zero() && !end.is_zero(), "provided NULL address to ConservativeRoots::add_span");
if start > end { if start > end {
std::mem::swap(&mut start, &mut end); std::mem::swap(&mut start, &mut end);
} }
let start = start.align_down(size_of::<Address>());
let end = end.align_up(size_of::<Address>());
let mut current = start; let mut current = start;
while current < end { while current < end {
let addr = current.load::<Address>(); let addr = current.load::<Address>();

View file

@ -1,20 +1,16 @@
use std::marker::PhantomData; use std::marker::PhantomData;
use crate::{ use crate::{
mm::MemoryManager, machine_context::PlatformRegisters, mm::MemoryManager, object_model::{
object_model::{
metadata::{Metadata, TraceCallback}, metadata::{Metadata, TraceCallback},
object::VMKitObject, object::VMKitObject,
}, }, threading::{Thread, ThreadContext}, VirtualMachine
threading::{Thread, ThreadContext},
VirtualMachine,
}; };
use mmtk::{ use mmtk::{
vm::{slot::Slot, ObjectTracer, Scanning, SlotVisitor}, util::Address, vm::{slot::Slot, ObjectTracer, Scanning, SlotVisitor}, MutatorContext
MutatorContext,
}; };
use super::{conservative_roots::ConservativeRoots, traits::ToSlot}; use super::traits::ToSlot;
pub struct VMKitScanning<VM: VirtualMachine>(PhantomData<VM>); pub struct VMKitScanning<VM: VirtualMachine>(PhantomData<VM>);
@ -95,18 +91,28 @@ impl<VM: VirtualMachine> Scanning<MemoryManager<VM>> for VMKitScanning<VM> {
fn scan_roots_in_mutator_thread( fn scan_roots_in_mutator_thread(
_tls: mmtk::util::VMWorkerThread, _tls: mmtk::util::VMWorkerThread,
mutator: &'static mut mmtk::Mutator<MemoryManager<VM>>, mutator: &'static mut mmtk::Mutator<MemoryManager<VM>>,
mut factory: impl mmtk::vm::RootsWorkFactory<VM::Slot>, factory: impl mmtk::vm::RootsWorkFactory<VM::Slot>,
) { ) {
let tls = Thread::<VM>::from_vm_mutator_thread(mutator.get_tls()); let tls = Thread::<VM>::from_vm_mutator_thread(mutator.get_tls());
tls.context.scan_roots(factory.clone()); tls.context.scan_roots(factory.clone());
#[cfg(not(feature = "full-precise"))] #[cfg(not(feature = "full-precise"))]
{ {
let mut factory = factory;
use super::conservative_roots::ConservativeRoots;
let mut croots = ConservativeRoots::new(128); let mut croots = ConservativeRoots::new(128);
let bounds = *tls.stack_bounds(); let bounds = *tls.stack_bounds();
let registers = tls.get_registers();
unsafe {
let start = Address::from_ref(&registers);
let end = start.add(size_of::<PlatformRegisters>());
croots.add_span(start, end);
}
unsafe { croots.add_span(bounds.origin(), tls.stack_pointer()) }; unsafe { croots.add_span(bounds.origin(), tls.stack_pointer()) };
tls.context.scan_conservative_roots(&mut croots); tls.context.scan_conservative_roots(&mut croots);
croots.add_to_factory(&mut factory); croots.add_to_factory(&mut factory);
drop(registers);
} }
} }
@ -122,8 +128,12 @@ impl<VM: VirtualMachine> Scanning<MemoryManager<VM>> for VMKitScanning<VM> {
_tls: mmtk::util::VMWorkerThread, _tls: mmtk::util::VMWorkerThread,
object: mmtk::util::ObjectReference, object: mmtk::util::ObjectReference,
) -> bool { ) -> bool {
if VM::ALWAYS_TRACE {
return true;
}
let object = VMKitObject::from(object); let object = VMKitObject::from(object);
let metadata = object.header::<VM>().metadata(); let metadata = object.header::<VM>().metadata();
matches!(metadata.gc_metadata().trace, TraceCallback::ScanSlots(_)) matches!(metadata.gc_metadata().trace, TraceCallback::ScanSlots(_))
&& (!metadata.is_object() || metadata.to_slot().is_some()) && (!metadata.is_object() || metadata.to_slot().is_some())
} }

93
vmkit/src/mm/spec.rs Normal file
View file

@ -0,0 +1,93 @@
/// Define a VM metadata spec. This is essentially bitfields for storing object metadata.
/// Using this your VM can declare side metadata or local (header) metadata with ease.
///
/// This macro is copied from mmtk-core source code.
#[macro_export]
macro_rules! define_vm_metadata_spec {
($(#[$outer:meta])*$spec_name: ident, $is_global: expr, $log_num_bits: expr, $side_min_obj_size: expr) => {
$(#[$outer])*
pub struct $spec_name($crate::mmtk::util::metadata::MetadataSpec);
impl $spec_name {
/// The number of bits (in log2) that are needed for the spec.
pub const LOG_NUM_BITS: usize = $log_num_bits;
/// Whether this spec is global or local. For side metadata, the binding needs to make sure
/// global specs are laid out after another global spec, and local specs are laid
/// out after another local spec. Otherwise, there will be an assertion failure.
pub const IS_GLOBAL: bool = $is_global;
/// Declare that the VM uses in-header metadata for this metadata type.
/// For the specification of the `bit_offset` argument, please refer to
/// the document of `[crate::util::metadata::header_metadata::HeaderMetadataSpec.bit_offset]`.
/// The binding needs to make sure that the bits used for a spec in the header do not conflict with
/// the bits of another spec (unless it is specified that some bits may be reused).
pub const fn in_header(bit_offset: isize) -> Self {
Self($crate::mmtk::util::metadata::MetadataSpec::InHeader($crate::mmtk::util::metadata::header_metadata::HeaderMetadataSpec {
bit_offset,
num_of_bits: 1 << Self::LOG_NUM_BITS,
}))
}
/// Declare that the VM uses side metadata for this metadata type,
/// and the side metadata is the first of its kind (global or local).
/// The first global or local side metadata should be declared with `side_first()`,
/// and the rest side metadata should be declared with `side_after()` after a defined
/// side metadata of the same kind (global or local). Logically, all the declarations
/// create two list of side metadata, one for global, and one for local.
pub const fn side_first() -> Self {
if Self::IS_GLOBAL {
Self($crate::mmtk::util::metadata::MetadataSpec::OnSide($crate::mmtk::util::metadata::side_metadata::SideMetadataSpec {
name: stringify!($spec_name),
is_global: Self::IS_GLOBAL,
offset: $crate::mmtk::util::metadata::side_metadata::GLOBAL_SIDE_METADATA_VM_BASE_OFFSET,
log_num_of_bits: Self::LOG_NUM_BITS,
log_bytes_in_region: $side_min_obj_size as usize,
}))
} else {
Self($crate::mmtk::util::metadata::MetadataSpec::OnSide($crate::mmtk::util::metadata::side_metadata::SideMetadataSpec {
name: stringify!($spec_name),
is_global: Self::IS_GLOBAL,
offset: $crate::mmtk::util::metadata::side_metadata::LOCAL_SIDE_METADATA_VM_BASE_OFFSET,
log_num_of_bits: Self::LOG_NUM_BITS,
log_bytes_in_region: $side_min_obj_size as usize,
}))
}
}
/// Declare that the VM uses side metadata for this metadata type,
/// and the side metadata should be laid out after the given side metadata spec.
/// The first global or local side metadata should be declared with `side_first()`,
/// and the rest side metadata should be declared with `side_after()` after a defined
/// side metadata of the same kind (global or local). Logically, all the declarations
/// create two list of side metadata, one for global, and one for local.
pub const fn side_after(spec: &$crate::mmtk::util::metadata::MetadataSpec) -> Self {
assert!(spec.is_on_side());
let side_spec = spec.extract_side_spec();
assert!(side_spec.is_global == Self::IS_GLOBAL);
Self($crate::mmtk::util::metadata::MetadataSpec::OnSide($crate::mmtk::util::metadata::side_metadata::SideMetadataSpec {
name: stringify!($spec_name),
is_global: Self::IS_GLOBAL,
offset: $crate::mmtk::util::metadata::side_metadata::SideMetadataOffset::layout_after(side_spec),
log_num_of_bits: Self::LOG_NUM_BITS,
log_bytes_in_region: $side_min_obj_size as usize,
}))
}
/// Return the inner `[crate::util::metadata::MetadataSpec]` for the metadata type.
pub const fn as_spec(&self) -> &$crate::mmtk::util::metadata::MetadataSpec {
&self.0
}
/// Return the number of bits for the metadata type.
pub const fn num_bits(&self) -> usize {
1 << $log_num_bits
}
}
impl std::ops::Deref for $spec_name {
type Target = $crate::mmtk::util::metadata::MetadataSpec;
fn deref(&self) -> &Self::Target {
self.as_spec()
}
}
};
}

View file

@ -1,37 +1,68 @@
use std::marker::PhantomData; use std::marker::PhantomData;
use crate::{mm::MemoryManager, VirtualMachine}; use crate::{mm::MemoryManager, VirtualMachine};
use easy_bitfield::BitFieldTrait; use header::{HashState, HASHCODE_OFFSET, OBJECT_HEADER_OFFSET, OBJECT_REF_OFFSET};
use header::{HashState, HashStateField, HASHCODE_OFFSET, OBJECT_HEADER_OFFSET, OBJECT_REF_OFFSET};
use mmtk::{ use mmtk::{
util::{alloc::fill_alignment_gap, constants::LOG_BYTES_IN_ADDRESS, ObjectReference}, util::{alloc::fill_alignment_gap, constants::LOG_BYTES_IN_ADDRESS, metadata::MetadataSpec, ObjectReference},
vm::*, vm::*,
}; };
use object::{MoveTarget, VMKitObject}; use object::{MoveTarget, VMKitObject};
pub mod compression;
pub mod finalization;
pub mod header; pub mod header;
pub mod metadata; pub mod metadata;
pub mod object; pub mod object;
pub mod compression;
pub struct VMKitObjectModel<VM: VirtualMachine>(PhantomData<VM>); pub struct VMKitObjectModel<VM: VirtualMachine>(PhantomData<VM>);
pub const LOGGING_SIDE_METADATA_SPEC: VMGlobalLogBitSpec = VMGlobalLogBitSpec::side_first();
pub const FORWARDING_POINTER_METADATA_SPEC: VMLocalForwardingPointerSpec = /// 1-bit local metadata for spaces that need to mark an object.
VMLocalForwardingPointerSpec::in_header(0); ///
pub const FORWARDING_BITS_METADATA_SPEC: VMLocalForwardingBitsSpec = /// Always defined on the side in VMKit.
VMLocalForwardingBitsSpec::in_header(HashStateField::NEXT_BIT as _); pub const MARK_BIT_SPEC: VMLocalMarkBitSpec = VMLocalMarkBitSpec::side_first();
pub const MARKING_METADATA_SPEC: VMLocalMarkBitSpec = VMLocalMarkBitSpec::side_first();
pub const LOS_METADATA_SPEC: VMLocalLOSMarkNurserySpec =
VMLocalLOSMarkNurserySpec::in_header(HashStateField::NEXT_BIT as _);
impl<VM: VirtualMachine> ObjectModel<MemoryManager<VM>> for VMKitObjectModel<VM> { impl<VM: VirtualMachine> ObjectModel<MemoryManager<VM>> for VMKitObjectModel<VM> {
const GLOBAL_LOG_BIT_SPEC: mmtk::vm::VMGlobalLogBitSpec = LOGGING_SIDE_METADATA_SPEC;
const LOCAL_FORWARDING_POINTER_SPEC: mmtk::vm::VMLocalForwardingPointerSpec = const GLOBAL_LOG_BIT_SPEC: VMGlobalLogBitSpec = VMGlobalLogBitSpec::side_first();
FORWARDING_POINTER_METADATA_SPEC;
const LOCAL_FORWARDING_BITS_SPEC: mmtk::vm::VMLocalForwardingBitsSpec = const LOCAL_MARK_BIT_SPEC: VMLocalMarkBitSpec = MARK_BIT_SPEC;
FORWARDING_BITS_METADATA_SPEC; const LOCAL_FORWARDING_BITS_SPEC: VMLocalForwardingBitsSpec = {
const LOCAL_MARK_BIT_SPEC: mmtk::vm::VMLocalMarkBitSpec = MARKING_METADATA_SPEC; #[cfg(feature = "vmside_forwarding")]
const LOCAL_LOS_MARK_NURSERY_SPEC: mmtk::vm::VMLocalLOSMarkNurserySpec = LOS_METADATA_SPEC; {
VM::LOCAL_FORWARDING_BITS_SPEC
}
#[cfg(not(feature = "vmside_forwarding"))]
{
VMLocalForwardingBitsSpec::side_after(&MARK_BIT_SPEC.as_spec())
}
};
const LOCAL_FORWARDING_POINTER_SPEC: VMLocalForwardingPointerSpec = {
#[cfg(feature = "vmside_forwarding")]
{
VM::LOCAL_FORWARDING_POINTER_SPEC
}
#[cfg(not(feature = "vmside_forwarding"))]
{
VMLocalForwardingPointerSpec::in_header(0)
}
};
const LOCAL_LOS_MARK_NURSERY_SPEC: VMLocalLOSMarkNurserySpec = {
let spec_after = if Self::LOCAL_FORWARDING_BITS_SPEC.as_spec().is_in_header() {
Self::LOCAL_MARK_BIT_SPEC.as_spec()
} else {
Self::LOCAL_FORWARDING_BITS_SPEC.as_spec()
};
VMLocalLOSMarkNurserySpec::side_after(&spec_after)
};
#[cfg(feature="object_pinning")]
const LOCAL_PINNING_BIT_SPEC: VMLocalPinningBitSpec = {
VMLocalPinningBitSpec::side_after(&Self::LOCAL_LOS_MARK_NURSERY_SPEC.as_spec())
};
const OBJECT_REF_OFFSET_LOWER_BOUND: isize = OBJECT_REF_OFFSET; const OBJECT_REF_OFFSET_LOWER_BOUND: isize = OBJECT_REF_OFFSET;
const UNIFIED_OBJECT_REFERENCE_ADDRESS: bool = false; const UNIFIED_OBJECT_REFERENCE_ADDRESS: bool = false;
@ -91,12 +122,9 @@ impl<VM: VirtualMachine> ObjectModel<MemoryManager<VM>> for VMKitObjectModel<VM>
let res_addr = to + OBJECT_REF_OFFSET + vmkit_from.hashcode_overhead::<VM, true>(); let res_addr = to + OBJECT_REF_OFFSET + vmkit_from.hashcode_overhead::<VM, true>();
debug_assert!(!res_addr.is_zero()); debug_assert!(!res_addr.is_zero());
// SAFETY: we just checked that the address is not zero // SAFETY: we just checked that the address is not zero
unsafe { unsafe { ObjectReference::from_raw_address_unchecked(res_addr) }
ObjectReference::from_raw_address_unchecked(res_addr)
}
} }
fn get_type_descriptor(_reference: mmtk::util::ObjectReference) -> &'static [i8] { fn get_type_descriptor(_reference: mmtk::util::ObjectReference) -> &'static [i8] {
unreachable!() unreachable!()
} }
@ -117,7 +145,6 @@ impl<VM: VirtualMachine> ObjectModel<MemoryManager<VM>> for VMKitObjectModel<VM>
VMKitObject::from_objref_nullable(Some(object)).get_size_when_copied::<VM>() VMKitObject::from_objref_nullable(Some(object)).get_size_when_copied::<VM>()
} }
fn ref_to_object_start(reference: mmtk::util::ObjectReference) -> mmtk::util::Address { fn ref_to_object_start(reference: mmtk::util::ObjectReference) -> mmtk::util::Address {
VMKitObject::from_objref_nullable(Some(reference)).object_start::<VM>() VMKitObject::from_objref_nullable(Some(reference)).object_start::<VM>()
} }
@ -132,7 +159,12 @@ impl<VM: VirtualMachine> ObjectModel<MemoryManager<VM>> for VMKitObjectModel<VM>
} }
impl<VM: VirtualMachine> VMKitObjectModel<VM> { impl<VM: VirtualMachine> VMKitObjectModel<VM> {
fn move_object(from_obj: VMKitObject, mut to: MoveTarget, num_bytes: usize) -> VMKitObject { fn move_object(from_obj: VMKitObject, mut to: MoveTarget, num_bytes: usize) -> VMKitObject {
log::trace!("move_object: from_obj: {}, to: {}, bytes={}", from_obj.as_address(), to, num_bytes); log::debug!(
"move_object: from_obj: {}, to: {}, bytes={}",
from_obj.as_address(),
to,
num_bytes
);
let mut copy_bytes = num_bytes; let mut copy_bytes = num_bytes;
let mut obj_ref_offset = OBJECT_REF_OFFSET; let mut obj_ref_offset = OBJECT_REF_OFFSET;
let hash_state = from_obj.header::<VM>().hash_state(); let hash_state = from_obj.header::<VM>().hash_state();
@ -140,7 +172,6 @@ impl<VM: VirtualMachine> VMKitObjectModel<VM> {
// Adjust copy bytes and object reference offset based on hash state // Adjust copy bytes and object reference offset based on hash state
match hash_state { match hash_state {
HashState::Hashed => { HashState::Hashed => {
copy_bytes -= size_of::<usize>(); // Exclude hash code from copy copy_bytes -= size_of::<usize>(); // Exclude hash code from copy
if let MoveTarget::ToAddress(ref mut addr) = to { if let MoveTarget::ToAddress(ref mut addr) = to {
*addr += size_of::<usize>(); // Adjust address for hash code *addr += size_of::<usize>(); // Adjust address for hash code
@ -178,7 +209,9 @@ impl<VM: VirtualMachine> VMKitObjectModel<VM> {
// Update hash state if necessary // Update hash state if necessary
if hash_state == HashState::Hashed { if hash_state == HashState::Hashed {
unsafe { unsafe {
let hash_code = from_obj.as_address().as_usize() >> LOG_BYTES_IN_ADDRESS; let hash_code = from_obj.as_address().as_usize() >> LOG_BYTES_IN_ADDRESS;
to_obj to_obj
.as_address() .as_address()
.offset(HASHCODE_OFFSET) .offset(HASHCODE_OFFSET)
@ -193,4 +226,16 @@ impl<VM: VirtualMachine> VMKitObjectModel<VM> {
to_obj to_obj
} }
pub const fn last_side_metadata_spec() -> &'static MetadataSpec {
#[cfg(feature="object_pinning")]
{
Self::LOCAL_PINNING_BIT_SPEC.as_spec()
}
#[cfg(not(feature="object_pinning"))]
{
Self::LOCAL_LOS_MARK_NURSERY_SPEC.as_spec()
}
}
} }

View file

@ -47,6 +47,10 @@ static COMPRESSED_OPS: CompressedOpsStorage =
range: (Address::ZERO, Address::ZERO), range: (Address::ZERO, Address::ZERO),
})); }));
pub const MY_RANDOM_ADDR: usize = 0x500_000_000 + 6 * 1024 * 1024 * 1024;
pub const COMPRESSED: usize = (MY_RANDOM_ADDR - 0x500_000_000) >> 3;
impl CompressedOps { impl CompressedOps {
/// Initialize compressed object pointers. /// Initialize compressed object pointers.
/// ///

View file

@ -0,0 +1,127 @@
//! Simple finalization mechanism implementation.
//!
//! Implements finalizers which are unordered and can revive objects, also
//! allows registering objects for destruction without finalization.
//!
use std::{cell::RefCell, panic::AssertUnwindSafe};
use mmtk::{
scheduler::GCWorker,
util::ObjectReference,
vm::{ObjectTracer, ObjectTracerContext},
};
use crate::{mm::MemoryManager, sync::Monitor, VirtualMachine};
use super::object::VMKitObject;
pub struct Finalizer {
pub object: VMKitObject,
pub finalizer: FinalizerKind,
}
pub enum FinalizerKind {
Finalized,
/// Unordered finalizer: finalizer revives the object and
/// then executes the provided closure as a finalizer. Closure
/// can keep object alive if it's required (but that's not recommended).
Unordered(Box<dyn FnOnce(VMKitObject) + Send>),
/// Drop finalizer: does not revive the object nor
/// provides access to it. This can be used to close open FDs
/// or free off heap data.
Drop(Box<dyn FnOnce() + Send>),
}
impl FinalizerKind {
pub fn take(&mut self) -> Self {
std::mem::replace(self, FinalizerKind::Finalized)
}
pub fn is_finalized(&self) -> bool {
matches!(self, FinalizerKind::Finalized)
}
}
pub struct FinalizerProcessing;
pub static REGISTERED_FINALIZERS: Monitor<RefCell<Vec<Finalizer>>> =
Monitor::new(RefCell::new(Vec::new()));
pub static PENDING_FINALIZERS: Monitor<RefCell<Vec<Finalizer>>> =
Monitor::new(RefCell::new(Vec::new()));
impl FinalizerProcessing {
pub fn process<VM: VirtualMachine>(
tls: &mut GCWorker<MemoryManager<VM>>,
tracer_context: impl ObjectTracerContext<MemoryManager<VM>>,
) -> bool {
let vm = VM::get();
assert!(
vm.vmkit().mmtk.gc_in_progress(),
"Attempt to process finalizers outside of GC"
);
let registered = REGISTERED_FINALIZERS.lock_no_handshake();
let pending = PENDING_FINALIZERS.lock_no_handshake();
let mut registered = registered.borrow_mut();
let mut pending = pending.borrow_mut();
let mut closure_required = false;
tracer_context.with_tracer(tls, |tracer| {
registered.retain_mut(|finalizer| {
let Ok(object) = finalizer.object.try_into() else {
return false;
};
let object: ObjectReference = object;
if object.is_reachable() {
let new_object = object.get_forwarded_object().unwrap_or(object);
finalizer.object = VMKitObject::from(new_object);
true
} else {
let new_object = tracer.trace_object(object);
pending.push(Finalizer {
finalizer: std::mem::replace(
&mut finalizer.finalizer,
FinalizerKind::Finalized,
),
object: VMKitObject::from(new_object),
});
closure_required = true;
false
}
});
});
closure_required
}
}
impl<VM: VirtualMachine> MemoryManager<VM> {
pub fn register_finalizer(object: VMKitObject, callback: Box<dyn FnOnce(VMKitObject) + Send>) {
let finalizer = Finalizer {
object,
finalizer: FinalizerKind::Unordered(callback),
};
REGISTERED_FINALIZERS
.lock_no_handshake()
.borrow_mut()
.push(finalizer);
}
pub fn run_finalizers() -> usize {
let vm = VM::get();
let mut count = 0;
while let Some(mut finalizer) = mmtk::memory_manager::get_finalized_object(&vm.vmkit().mmtk)
{
let _ = std::panic::catch_unwind(AssertUnwindSafe(|| finalizer.run()));
count += 1;
}
count
}
pub fn get_finalized_object() -> Option<Finalizer> {
PENDING_FINALIZERS.lock_no_handshake().borrow_mut().pop()
}
}

View file

@ -3,14 +3,18 @@ use std::marker::PhantomData;
use crate::VirtualMachine; use crate::VirtualMachine;
use easy_bitfield::*; use easy_bitfield::*;
use super::object::ADDRESS_BASED_HASHING;
/// Offset from allocation pointer to the actual object start. /// Offset from allocation pointer to the actual object start.
pub const OBJECT_REF_OFFSET: isize = 8; pub const OBJECT_REF_OFFSET: isize = 8;
/// Object header behind object. /// Object header behind object.
pub const OBJECT_HEADER_OFFSET: isize = -OBJECT_REF_OFFSET; pub const OBJECT_HEADER_OFFSET: isize = -OBJECT_REF_OFFSET;
pub const HASHCODE_OFFSET: isize = -(OBJECT_REF_OFFSET + size_of::<usize>() as isize); pub const HASHCODE_OFFSET: isize = -(OBJECT_REF_OFFSET + size_of::<usize>() as isize);
pub type MetadataField = BitField<u64, usize, 0, 58, false>; pub const METADATA_BIT_LIMIT: usize = if ADDRESS_BASED_HASHING { usize::BITS as usize - 2 } else { usize::BITS as usize - 1 };
pub type HashStateField = BitField<u64, HashState, { MetadataField::NEXT_BIT }, 2, false>;
pub type MetadataField = BitField<usize, usize, 0, METADATA_BIT_LIMIT, false>;
pub type HashStateField = BitField<usize, HashState, { MetadataField::NEXT_BIT }, 2, false>;
#[derive(Debug, Clone, Copy, PartialEq, Eq)] #[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum HashState { pub enum HashState {
@ -19,8 +23,8 @@ pub enum HashState {
HashedAndMoved, HashedAndMoved,
} }
impl FromBitfield<u64> for HashState { impl FromBitfield<usize> for HashState {
fn from_bitfield(value: u64) -> Self { fn from_bitfield(value: usize) -> Self {
match value { match value {
0 => Self::Unhashed, 0 => Self::Unhashed,
1 => Self::Hashed, 1 => Self::Hashed,
@ -39,8 +43,8 @@ impl FromBitfield<u64> for HashState {
} }
} }
impl ToBitfield<u64> for HashState { impl ToBitfield<usize> for HashState {
fn to_bitfield(self) -> u64 { fn to_bitfield(self) -> usize {
match self { match self {
Self::Unhashed => 0, Self::Unhashed => 0,
Self::Hashed => 1, Self::Hashed => 1,
@ -58,7 +62,7 @@ impl ToBitfield<u64> for HashState {
} }
pub struct HeapObjectHeader<VM: VirtualMachine> { pub struct HeapObjectHeader<VM: VirtualMachine> {
pub metadata: AtomicBitfieldContainer<u64>, pub metadata: AtomicBitfieldContainer<usize>,
pub marker: PhantomData<VM>, pub marker: PhantomData<VM>,
} }
@ -71,6 +75,9 @@ impl<VM: VirtualMachine> HeapObjectHeader<VM> {
} }
pub fn hash_state(&self) -> HashState { pub fn hash_state(&self) -> HashState {
if !ADDRESS_BASED_HASHING {
return HashState::Unhashed;
}
self.metadata.read::<HashStateField>() self.metadata.read::<HashStateField>()
} }
@ -83,9 +90,7 @@ impl<VM: VirtualMachine> HeapObjectHeader<VM> {
} }
pub fn set_metadata(&self, metadata: VM::Metadata) { pub fn set_metadata(&self, metadata: VM::Metadata) {
self.metadata.update_synchronized::<MetadataField>(metadata.to_bitfield() as _); self.metadata
.update_synchronized::<MetadataField>(metadata.to_bitfield() as _);
} }
} }

View file

@ -14,6 +14,7 @@ pub struct GCMetadata<VM: VirtualMachine> {
pub instance_size: usize, pub instance_size: usize,
pub compute_size: Option<fn(VMKitObject) -> usize>, pub compute_size: Option<fn(VMKitObject) -> usize>,
pub alignment: usize, pub alignment: usize,
pub compute_alignment: Option<fn(VMKitObject) -> usize>,
} }
#[derive(Debug)] #[derive(Debug)]
@ -31,7 +32,7 @@ pub enum TraceCallback<VM: VirtualMachine> {
/// ///
/// Types which implement `Metadata` must also be convertible to and from bitfields. /// Types which implement `Metadata` must also be convertible to and from bitfields.
pub trait Metadata<VM: VirtualMachine>: pub trait Metadata<VM: VirtualMachine>:
ToBitfield<u64> + FromBitfield<u64> + ToSlot<VM::Slot> ToBitfield<usize> + FromBitfield<usize> + ToSlot<VM::Slot>
{ {
/// Size of the metadata in bits. Must be `<= 62`. /// Size of the metadata in bits. Must be `<= 62`.
const METADATA_BIT_SIZE: usize; const METADATA_BIT_SIZE: usize;
@ -81,16 +82,16 @@ macro_rules! make_uncooperative_metadata {
unreachable!() unreachable!()
} }
} }
impl<VM: $crate::VirtualMachine> $crate::object_model::metadata::ToBitfield<u64> for $name { impl<VM: $crate::VirtualMachine> $crate::object_model::metadata::ToBitfield<usize> for $name {
fn to_bitfield(self) -> u64 { fn to_bitfield(self) -> usize {
self.wsize as u64 self.wsize as usize
} }
} }
impl<VM: $crate::VirtualMachine> $crate::object_model::metadata::FromBitfield<u64> impl<VM: $crate::VirtualMachine> $crate::object_model::metadata::FromBitfield<usize>
for $name for $name
{ {
fn from_bitfield(value: u64) -> Self { fn from_bitfield(value: usize) -> Self {
Self { Self {
wsize: value as usize, wsize: value as usize,
} }
@ -99,9 +100,9 @@ macro_rules! make_uncooperative_metadata {
}; };
} }
impl<VM: VirtualMachine> ToBitfield<u64> for &'static GCMetadata<VM> { impl<VM: VirtualMachine> ToBitfield<usize> for &'static GCMetadata<VM> {
fn to_bitfield(self) -> u64 { fn to_bitfield(self) -> usize {
let res = self as *const GCMetadata<VM> as usize as u64; let res = self as *const GCMetadata<VM> as usize as usize;
res res
} }
@ -114,8 +115,8 @@ impl<VM: VirtualMachine> ToBitfield<u64> for &'static GCMetadata<VM> {
} }
} }
impl<VM: VirtualMachine> FromBitfield<u64> for &'static GCMetadata<VM> { impl<VM: VirtualMachine> FromBitfield<usize> for &'static GCMetadata<VM> {
fn from_bitfield(value: u64) -> Self { fn from_bitfield(value: usize) -> Self {
unsafe { &*(value as usize as *const GCMetadata<VM>) } unsafe { &*(value as usize as *const GCMetadata<VM>) }
} }

View file

@ -3,12 +3,14 @@ use crate::threading::Thread;
use crate::{mm::MemoryManager, VirtualMachine}; use crate::{mm::MemoryManager, VirtualMachine};
use atomic::Atomic; use atomic::Atomic;
use core::ops::Range; use core::ops::Range;
use std::hash::Hash; use std::mem::MaybeUninit;
use std::ptr::NonNull;
use mmtk::util::{ use mmtk::util::{
constants::LOG_BYTES_IN_ADDRESS, conversions::raw_align_up, Address, ObjectReference, constants::LOG_BYTES_IN_ADDRESS, conversions::raw_align_up, Address, ObjectReference,
}; };
use mmtk::vm::slot::{MemorySlice, SimpleSlot, Slot}; use mmtk::vm::slot::{MemorySlice, SimpleSlot, Slot};
use std::fmt; use std::fmt;
use std::hash::Hash;
use std::marker::PhantomData; use std::marker::PhantomData;
use super::{ use super::{
@ -19,6 +21,13 @@ use super::{
metadata::Metadata, metadata::Metadata,
}; };
/// Is address based hash enabled? If true
/// then object header uses 2 bits to indicate hash state and if GC moves
/// the object, object hash is stored in the object itself.
///
/// When disabled, `hashcode()` instead calls into VM to get the hashcode.
pub const ADDRESS_BASED_HASHING: bool = cfg!(feature="address_based_hashing");
#[repr(transparent)] #[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)] #[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct VMKitObject(Address); pub struct VMKitObject(Address);
@ -127,8 +136,18 @@ impl VMKitObject {
/// # Returns /// # Returns
/// ///
/// * `usize` - The alignment of the object. /// * `usize` - The alignment of the object.
pub fn alignment<VM: VirtualMachine>(&self) -> usize { pub fn alignment<VM: VirtualMachine>(self) -> usize {
self.header::<VM>().metadata().gc_metadata().alignment let alignment = self.header::<VM>().metadata().gc_metadata().alignment;
if alignment == 0 {
return self
.header::<VM>()
.metadata()
.gc_metadata()
.compute_alignment
.map(|f| f(self))
.unwrap_or(VM::MAX_ALIGNMENT);
}
alignment
} }
/// Returns the number of bytes used by the `VMKitObject`. /// Returns the number of bytes used by the `VMKitObject`.
@ -198,6 +217,9 @@ impl VMKitObject {
/// * `usize` - The hashcode overhead. /// * `usize` - The hashcode overhead.
#[inline(always)] #[inline(always)]
pub fn hashcode_overhead<VM: VirtualMachine, const WHEN_COPIED: bool>(&self) -> usize { pub fn hashcode_overhead<VM: VirtualMachine, const WHEN_COPIED: bool>(&self) -> usize {
if !ADDRESS_BASED_HASHING {
return 0;
}
let hash_state = self.header::<VM>().hash_state(); let hash_state = self.header::<VM>().hash_state();
let has_hashcode = if WHEN_COPIED { let has_hashcode = if WHEN_COPIED {
@ -257,7 +279,10 @@ impl VMKitObject {
self.bytes_required_when_copied::<VM>() self.bytes_required_when_copied::<VM>()
} }
pub fn hashcode<VM: VirtualMachine>(&self) -> usize { pub fn hashcode<VM: VirtualMachine>(self) -> usize {
if !ADDRESS_BASED_HASHING {
return VM::compute_hashcode(self);
}
let header = self.header::<VM>(); let header = self.header::<VM>();
match header.hash_state() { match header.hash_state() {
HashState::HashedAndMoved => { HashState::HashedAndMoved => {
@ -644,12 +669,12 @@ impl<SL: SlotExtra> Eq for SimpleMemorySlice<SL> {}
impl<SL: SlotExtra> Clone for SimpleMemorySlice<SL> { impl<SL: SlotExtra> Clone for SimpleMemorySlice<SL> {
fn clone(&self) -> Self { fn clone(&self) -> Self {
Self { range: self.range.clone() } Self {
range: self.range.clone(),
}
} }
} }
pub struct SimpleMemorySliceRangeIterator<SL: SlotExtra = SimpleSlot> { pub struct SimpleMemorySliceRangeIterator<SL: SlotExtra = SimpleSlot> {
cursor: Address, cursor: Address,
end: Address, end: Address,
@ -715,3 +740,103 @@ impl<SL: SlotExtra> MemorySlice for SimpleMemorySlice<SL> {
} }
} }
} }
pub struct GcPtr<T> {
ptr: NonNull<T>,
}
impl<T> GcPtr<T> {
pub fn new(ptr: NonNull<T>) -> Self {
Self { ptr }
}
pub fn as_ptr(&self) -> *mut T {
self.ptr.as_ptr()
}
pub fn as_ref(&self) -> &T {
unsafe { self.ptr.as_ref() }
}
pub fn as_mut(&mut self) -> &mut T {
unsafe { self.ptr.as_mut() }
}
pub fn as_address(&self) -> Address {
Address::from_mut_ptr(self.ptr.as_ptr())
}
pub fn from_address(address: Address) -> Self {
assert!(!address.is_zero());
Self {
ptr: NonNull::new(address.to_mut_ptr()).unwrap(),
}
}
pub fn from_ptr(ptr: *mut T) -> Self {
assert!(!ptr.is_null());
Self {
ptr: NonNull::new(ptr).unwrap(),
}
}
}
impl<T> GcPtr<MaybeUninit<T>> {
pub unsafe fn assume_init(self) -> GcPtr<T> {
GcPtr::new(self.ptr.cast())
}
}
impl<T> std::ops::Deref for GcPtr<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.as_ref()
}
}
impl<T> std::ops::DerefMut for GcPtr<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.as_mut()
}
}
impl<T: fmt::Debug> fmt::Debug for GcPtr<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", **self)
}
}
impl<T: Hash> Hash for GcPtr<T> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
(**self).hash(state);
}
}
impl<T: PartialEq> PartialEq for GcPtr<T> {
fn eq(&self, other: &Self) -> bool {
**self == **other
}
}
impl<T: Eq> Eq for GcPtr<T> {}
impl<T: PartialOrd> PartialOrd for GcPtr<T> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
(**self).partial_cmp(&**other)
}
}
impl<T: Ord> Ord for GcPtr<T> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
(**self).cmp(&**other)
}
}
impl<T> Clone for GcPtr<T> {
fn clone(&self) -> Self {
Self::new(self.ptr)
}
}
impl<T> Copy for GcPtr<T> {}

View file

@ -1,233 +1,17 @@
use std::{ //! Synchronization primitives for VMKit.
mem::ManuallyDrop, //!
num::NonZeroU64, //!
ops::Deref, //! Provides synchronization primitives which are friendly to our thread system. Most of the types
sync::atomic::{AtomicU64, AtomicUsize, Ordering}, //! provide `*_with_handshake` and `*_no_handshake` methods. The main difference between these two
time::Duration, //! methods is that the former will notify the scheduler that thread is blocked, while the latter
}; //! will not. Most of the times your code should use the `*_with_handshake` methods, so that GC
//! or other tasks can be scheduled while the thread is blocked. `*_no_handshake` methods should be
//! used when the thread is accessing GC data structures, or when the thread is not allowed to be
//! blocked.
pub mod semaphore;
pub mod monitor;
use parking_lot::{Condvar, Mutex, MutexGuard, WaitTimeoutResult};
use crate::{ pub use monitor::*;
threading::{parked_scope, Thread, ThreadContext},
VirtualMachine,
};
fn get_thread_id() -> NonZeroU64 {
thread_local! {
static KEY: u64 = 0;
}
KEY.with(|x| {
NonZeroU64::new(x as *const _ as u64).expect("thread-local variable address is null")
})
}
/// Implementation of a heavy lock and condition variable implemented using
/// the primitives available from the `parking_lot`. Currently we use
/// a `Mutex` and `Condvar`.
/// <p>
/// It is perfectly safe to use this throughout the VM for locking. It is
/// meant to provide roughly the same functionality as ReentrantMutex combined with Condvar,
/// except:
/// <ul>
/// <li>This struct provides a faster slow path than ReentrantMutex.</li>
/// <li>This struct provides a slower fast path than ReentrantMutex.</li>
/// <li>This struct will work in the inner guts of the VM runtime because
/// it gives you the ability to lock and unlock, as well as wait and
/// notify, without using any other VM runtime functionality.</li>
/// <li>This struct allows you to optionally block without letting the thread
/// system know that you are blocked. The benefit is that you can
/// perform synchronization without depending on VM thread subsystem functionality.
/// However, most of the time, you should use the methods that inform
/// the thread system that you are blocking. Methods that have the
/// `with_handshake` suffix will inform the thread system if you are blocked,
/// while methods that do not have the suffix will either not block
/// (as is the case with `unlock()` and `broadcast()`)
/// or will block without letting anyone know (like `lock_no_handshake()`
/// and `wait_no_handshake()`). Not letting the threading
/// system know that you are blocked may cause things like GC to stall
/// until you unblock.</li>
/// <li>This struct does not provide mutable access to the protected data as it is unsound,
/// instead use `RefCell` to mutate the protected data.</li>
/// </ul>
pub struct Monitor<T> {
mutex: Mutex<T>,
cvar: Condvar,
rec_count: AtomicUsize,
holder: AtomicU64,
}
impl<T> Monitor<T> {
pub const fn new(value: T) -> Self {
Self {
mutex: Mutex::new(value),
cvar: Condvar::new(),
rec_count: AtomicUsize::new(0),
holder: AtomicU64::new(0),
}
}
pub fn lock_no_handshake(&self) -> MonitorGuard<T> {
let my_slot = get_thread_id().get();
let guard = if self.holder.load(Ordering::Relaxed) != my_slot {
let guard = self.mutex.lock();
self.holder.store(my_slot, Ordering::Release);
MonitorGuard {
monitor: self,
guard: ManuallyDrop::new(guard),
}
} else {
MonitorGuard {
monitor: self,
guard: unsafe { ManuallyDrop::new(self.mutex.make_guard_unchecked()) },
}
};
self.rec_count.fetch_add(1, Ordering::Relaxed);
guard
}
pub fn lock_with_handshake<VM: VirtualMachine>(&self) -> MonitorGuard<T> {
let my_slot = get_thread_id().get();
let guard = if my_slot != self.holder.load(Ordering::Relaxed) {
let guard = self.lock_with_handshake_no_rec::<VM>();
self.holder.store(my_slot, Ordering::Release);
guard
} else {
MonitorGuard {
monitor: self,
guard: unsafe { ManuallyDrop::new(self.mutex.make_guard_unchecked()) },
}
};
self.rec_count.fetch_add(1, Ordering::Relaxed);
guard
}
fn lock_with_handshake_no_rec<VM: VirtualMachine>(&self) -> MonitorGuard<'_, T> {
let tls = Thread::<VM>::current();
tls.context.save_thread_state();
let mutex_guard = loop {
Thread::<VM>::enter_native();
let guard = self.mutex.lock();
if Thread::<VM>::attempt_leave_native_no_block() {
break guard;
} else {
drop(guard);
Thread::<VM>::leave_native();
}
};
MonitorGuard {
monitor: self,
guard: ManuallyDrop::new(mutex_guard),
}
}
pub fn notify(&self) {
self.cvar.notify_one();
}
pub fn notify_all(&self) {
self.cvar.notify_all();
}
pub unsafe fn relock_with_handshake<VM: VirtualMachine>(
&self,
rec_count: usize,
) -> MonitorGuard<'_, T> {
let thread = Thread::<VM>::current();
thread.context.save_thread_state();
let guard = loop {
Thread::<VM>::enter_native();
let lock = self.mutex.lock();
if Thread::<VM>::attempt_leave_native_no_block() {
break lock;
} else {
drop(lock);
Thread::<VM>::leave_native();
}
};
self.holder.store(get_thread_id().get(), Ordering::Relaxed);
self.rec_count.store(rec_count, Ordering::Relaxed);
MonitorGuard {
monitor: self,
guard: ManuallyDrop::new(guard),
}
}
}
pub struct MonitorGuard<'a, T> {
monitor: &'a Monitor<T>,
guard: ManuallyDrop<MutexGuard<'a, T>>,
}
impl<T> Deref for MonitorGuard<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.guard
}
}
impl<'a, T> MonitorGuard<'a, T> {
pub fn wait_no_handshake(&mut self) {
let rec_count = self.monitor.rec_count.swap(0, Ordering::Relaxed);
let holder = self.monitor.holder.swap(0, Ordering::Relaxed);
self.monitor.cvar.wait(&mut self.guard);
self.monitor.rec_count.store(rec_count, Ordering::Relaxed);
self.monitor.holder.store(holder, Ordering::Relaxed);
}
pub fn wait_for_no_handshake(&mut self, timeout: Duration) -> WaitTimeoutResult {
let rec_count = self.monitor.rec_count.swap(0, Ordering::Relaxed);
let holder = self.monitor.holder.swap(0, Ordering::Relaxed);
let result = self.monitor.cvar.wait_for(&mut self.guard, timeout);
self.monitor.rec_count.store(rec_count, Ordering::Relaxed);
self.monitor.holder.store(holder, Ordering::Relaxed);
result
}
pub fn notify(&self) {
self.monitor.cvar.notify_one();
}
pub fn notify_all(&self) {
self.monitor.cvar.notify_all();
}
pub fn monitor(&self) -> &Monitor<T> {
self.monitor
}
pub unsafe fn unlock_completely(&mut self) -> usize {
let result = self.monitor.rec_count.load(Ordering::Relaxed);
self.monitor.rec_count.store(0, Ordering::Relaxed);
self.monitor.holder.store(0, Ordering::Relaxed);
unsafe {
ManuallyDrop::drop(&mut self.guard);
}
result
}
pub fn wait_with_handshake<VM: VirtualMachine>(mut self) -> Self {
let t = Thread::<VM>::current();
t.context.save_thread_state();
let rec_count = parked_scope::<usize, VM>(|| {
self.wait_no_handshake();
let rec_count = unsafe { self.unlock_completely() };
rec_count
});
unsafe { self.monitor.relock_with_handshake::<VM>(rec_count) }
}
}
impl<'a, T> Drop for MonitorGuard<'a, T> {
fn drop(&mut self) {
if self.monitor.rec_count.fetch_sub(1, Ordering::Relaxed) == 1 {
self.monitor.holder.store(0, Ordering::Relaxed);
unsafe { ManuallyDrop::drop(&mut self.guard) };
}
}
}
pub use super::threading::parked_scope;

234
vmkit/src/sync/monitor.rs Normal file
View file

@ -0,0 +1,234 @@
use std::{
mem::ManuallyDrop,
num::NonZeroU64,
ops::Deref,
sync::atomic::{AtomicU64, AtomicUsize, Ordering},
time::Duration,
};
use parking_lot::{Condvar, Mutex, MutexGuard, WaitTimeoutResult};
use crate::{
threading::{parked_scope, Thread, ThreadContext},
VirtualMachine,
};
fn get_thread_id() -> NonZeroU64 {
thread_local! {
static KEY: u64 = 0;
}
KEY.with(|x| {
NonZeroU64::new(x as *const _ as u64).expect("thread-local variable address is null")
})
}
/// Implementation of a heavy lock and condition variable implemented using
/// the primitives available from the `parking_lot`. Currently we use
/// a `Mutex` and `Condvar`.
/// <p>
/// It is perfectly safe to use this throughout the VM for locking. It is
/// meant to provide roughly the same functionality as ReentrantMutex combined with Condvar,
/// except:
/// <ul>
/// <li>This struct provides a faster slow path than ReentrantMutex.</li>
/// <li>This struct provides a slower fast path than ReentrantMutex.</li>
/// <li>This struct will work in the inner guts of the VM runtime because
/// it gives you the ability to lock and unlock, as well as wait and
/// notify, without using any other VM runtime functionality.</li>
/// <li>This struct allows you to optionally block without letting the thread
/// system know that you are blocked. The benefit is that you can
/// perform synchronization without depending on VM thread subsystem functionality.
/// However, most of the time, you should use the methods that inform
/// the thread system that you are blocking. Methods that have the
/// `with_handshake` suffix will inform the thread system if you are blocked,
/// while methods that do not have the suffix will either not block
/// (as is the case with `unlock()` and `broadcast()`)
/// or will block without letting anyone know (like `lock_no_handshake()`
/// and `wait_no_handshake()`). Not letting the threading
/// system know that you are blocked may cause things like GC to stall
/// until you unblock.</li>
/// <li>This struct does not provide mutable access to the protected data as it is unsound,
/// instead use `RefCell` to mutate the protected data.</li>
/// </ul>
pub struct Monitor<T> {
mutex: Mutex<T>,
cvar: Condvar,
rec_count: AtomicUsize,
holder: AtomicU64,
}
impl<T> Monitor<T> {
pub const fn new(value: T) -> Self {
Self {
mutex: Mutex::new(value),
cvar: Condvar::new(),
rec_count: AtomicUsize::new(0),
holder: AtomicU64::new(0),
}
}
pub fn lock_no_handshake(&self) -> MonitorGuard<T> {
let my_slot = get_thread_id().get();
let guard = if self.holder.load(Ordering::Relaxed) != my_slot {
let guard = self.mutex.lock();
self.holder.store(my_slot, Ordering::Release);
MonitorGuard {
monitor: self,
guard: ManuallyDrop::new(guard),
}
} else {
MonitorGuard {
monitor: self,
guard: unsafe { ManuallyDrop::new(self.mutex.make_guard_unchecked()) },
}
};
self.rec_count.fetch_add(1, Ordering::Relaxed);
guard
}
pub fn lock_with_handshake<VM: VirtualMachine>(&self) -> MonitorGuard<T> {
let my_slot = get_thread_id().get();
let guard = if my_slot != self.holder.load(Ordering::Relaxed) {
let guard = self.lock_with_handshake_no_rec::<VM>();
self.holder.store(my_slot, Ordering::Release);
guard
} else {
MonitorGuard {
monitor: self,
guard: unsafe { ManuallyDrop::new(self.mutex.make_guard_unchecked()) },
}
};
self.rec_count.fetch_add(1, Ordering::Relaxed);
guard
}
fn lock_with_handshake_no_rec<VM: VirtualMachine>(&self) -> MonitorGuard<'_, T> {
let tls = Thread::<VM>::current();
tls.context.save_thread_state();
let mutex_guard = loop {
Thread::<VM>::enter_native();
let guard = self.mutex.lock();
if Thread::<VM>::attempt_leave_native_no_block() {
break guard;
} else {
drop(guard);
Thread::<VM>::leave_native();
}
};
MonitorGuard {
monitor: self,
guard: ManuallyDrop::new(mutex_guard),
}
}
pub fn notify(&self) {
self.cvar.notify_one();
}
pub fn notify_all(&self) {
self.cvar.notify_all();
}
pub unsafe fn relock_with_handshake<VM: VirtualMachine>(
&self,
rec_count: usize,
) -> MonitorGuard<'_, T> {
let thread = Thread::<VM>::current();
thread.context.save_thread_state();
let guard = loop {
Thread::<VM>::enter_native();
let lock = self.mutex.lock();
if Thread::<VM>::attempt_leave_native_no_block() {
break lock;
} else {
drop(lock);
Thread::<VM>::leave_native();
}
};
self.holder.store(get_thread_id().get(), Ordering::Relaxed);
self.rec_count.store(rec_count, Ordering::Relaxed);
MonitorGuard {
monitor: self,
guard: ManuallyDrop::new(guard),
}
}
}
pub struct MonitorGuard<'a, T> {
monitor: &'a Monitor<T>,
guard: ManuallyDrop<MutexGuard<'a, T>>,
}
impl<T> Deref for MonitorGuard<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.guard
}
}
impl<'a, T> MonitorGuard<'a, T> {
pub fn wait_no_handshake(&mut self) {
let rec_count = self.monitor.rec_count.swap(0, Ordering::Relaxed);
let holder = self.monitor.holder.swap(0, Ordering::Relaxed);
self.monitor.cvar.wait(&mut self.guard);
self.monitor.rec_count.store(rec_count, Ordering::Relaxed);
self.monitor.holder.store(holder, Ordering::Relaxed);
}
pub fn wait_for_no_handshake(&mut self, timeout: Duration) -> WaitTimeoutResult {
let rec_count = self.monitor.rec_count.swap(0, Ordering::Relaxed);
let holder = self.monitor.holder.swap(0, Ordering::Relaxed);
let result = self.monitor.cvar.wait_for(&mut self.guard, timeout);
self.monitor.rec_count.store(rec_count, Ordering::Relaxed);
self.monitor.holder.store(holder, Ordering::Relaxed);
result
}
pub fn notify(&self) {
self.monitor.cvar.notify_one();
}
pub fn notify_all(&self) {
self.monitor.cvar.notify_all();
}
pub fn monitor(&self) -> &Monitor<T> {
self.monitor
}
pub unsafe fn unlock_completely(&mut self) -> usize {
let result = self.monitor.rec_count.load(Ordering::Relaxed);
self.monitor.rec_count.store(0, Ordering::Relaxed);
self.monitor.holder.store(0, Ordering::Relaxed);
unsafe {
ManuallyDrop::drop(&mut self.guard);
}
result
}
pub fn wait_with_handshake<VM: VirtualMachine>(mut self) -> Self {
let t = Thread::<VM>::current();
t.context.save_thread_state();
let rec_count = parked_scope::<usize, VM>(|| {
self.wait_no_handshake();
let rec_count = unsafe { self.unlock_completely() };
rec_count
});
unsafe { self.monitor.relock_with_handshake::<VM>(rec_count) }
}
}
impl<'a, T> Drop for MonitorGuard<'a, T> {
fn drop(&mut self) {
if self.monitor.rec_count.fetch_sub(1, Ordering::Relaxed) == 1 {
self.monitor.holder.store(0, Ordering::Relaxed);
unsafe { ManuallyDrop::drop(&mut self.guard) };
}
}
}

View file

@ -18,14 +18,14 @@ use mmtk::{
use parking_lot::Once; use parking_lot::Once;
use crate::{ use crate::{
machine_context::PlatformRegisters,
mm::{ mm::{
conservative_roots::ConservativeRoots, conservative_roots::ConservativeRoots,
stack_bounds::{current_stack_pointer, StackBounds}, stack_bounds::{current_stack_pointer, StackBounds},
tlab::TLAB, tlab::TLAB,
AllocFastPath, MemoryManager, AllocFastPath, MemoryManager,
}, },
semaphore::Semaphore, sync::{semaphore::Semaphore, Monitor, MonitorGuard},
sync::{Monitor, MonitorGuard},
VirtualMachine, VirtualMachine,
}; };
@ -195,6 +195,7 @@ pub struct Thread<VM: VirtualMachine> {
should_block_for_gc: AtomicBool, should_block_for_gc: AtomicBool,
stack_pointer: Atomic<usize>, stack_pointer: Atomic<usize>,
platform_registers: UnsafeCell<MaybeUninit<PlatformRegisters>>,
suspend_count: AtomicUsize, suspend_count: AtomicUsize,
/// The monitor of the thread. Protects access to the thread's state. /// The monitor of the thread. Protects access to the thread's state.
monitor: Monitor<()>, monitor: Monitor<()>,
@ -264,6 +265,7 @@ impl<VM: VirtualMachine> Thread<VM> {
should_block_for_gc: AtomicBool::new(false), should_block_for_gc: AtomicBool::new(false),
monitor: Monitor::new(()), monitor: Monitor::new(()),
communication_lock: Monitor::new(()), communication_lock: Monitor::new(()),
platform_registers: UnsafeCell::new(MaybeUninit::zeroed()),
}) })
} }
@ -365,6 +367,9 @@ impl<VM: VirtualMachine> Thread<VM> {
self.tid.store(libc::gettid() as _, Ordering::Relaxed); self.tid.store(libc::gettid() as _, Ordering::Relaxed);
} }
init_current_thread(self.clone()); init_current_thread(self.clone());
self.stack_bounds
.set(StackBounds::current_thread_stack_bounds())
.unwrap();
let constraints = VM::get().vmkit().mmtk.get_plan().constraints(); let constraints = VM::get().vmkit().mmtk.get_plan().constraints();
self.max_non_los_default_alloc_bytes self.max_non_los_default_alloc_bytes
.set(constraints.max_non_los_default_alloc_bytes); .set(constraints.max_non_los_default_alloc_bytes);
@ -386,9 +391,6 @@ impl<VM: VirtualMachine> Thread<VM> {
_ => self.alloc_fastpath.set(AllocFastPath::None), _ => self.alloc_fastpath.set(AllocFastPath::None),
} }
self.stack_bounds
.set(StackBounds::current_thread_stack_bounds())
.unwrap();
let vmkit = VM::get().vmkit(); let vmkit = VM::get().vmkit();
if !self.is_collector_thread() && !self.ignore_handshakes_and_gc() { if !self.is_collector_thread() && !self.ignore_handshakes_and_gc() {
let mutator = mmtk::memory_manager::bind_mutator( let mutator = mmtk::memory_manager::bind_mutator(
@ -445,6 +447,7 @@ impl<VM: VirtualMachine> Thread<VM> {
self.add_about_to_terminate(); self.add_about_to_terminate();
} }
/// Start a main thread.
pub fn main<F, R>(context: VM::ThreadContext, f: F) -> Option<R> pub fn main<F, R>(context: VM::ThreadContext, f: F) -> Option<R>
where where
F: FnOnce() -> R + Send + 'static, F: FnOnce() -> R + Send + 'static,
@ -739,6 +742,40 @@ impl<VM: VirtualMachine> Thread<VM> {
self.check_block_no_save_context(); self.check_block_no_save_context();
} }
/// Save the thread's registers.
pub fn save_registers(&self) {
assert_eq!(
self.platform_handle(),
Self::current().platform_handle(),
"attempt to save registers of another thread"
);
let sp = current_stack_pointer();
self.stack_pointer.store(sp.as_usize(), Ordering::Relaxed);
unsafe {
cfgenius::cond! {
if macro(crate::macros::have_machine_context) {
let mut ucontext = MaybeUninit::<libc::ucontext_t>::uninit();
libc::getcontext(ucontext.as_mut_ptr());
let registers = crate::machine_context::registers_from_ucontext(ucontext.as_ptr()).read();
self.platform_registers.get().write(MaybeUninit::new(registers));
} else {
self.platform_registers.get().write(MaybeUninit::new(crate::machine_context::PlatformRegisters {
stack_pointer: sp.as_usize() as _
}));
}
}
}
}
/// Get the thread's registers.
///
/// NOTE: Does not guarantee valid registers
/// nor that the returned value is the currently active registers.
pub fn get_registers(&self) -> PlatformRegisters {
unsafe { self.platform_registers.get().read().assume_init() }
}
/// Return this thread's stack pointer. /// Return this thread's stack pointer.
/// ///
/// Note: Does not guarantee that the returned value is currently active stack pointer. /// Note: Does not guarantee that the returned value is currently active stack pointer.
@ -928,9 +965,15 @@ impl<VM: VirtualMachine> Thread<VM> {
A::is_blocked(self) A::is_blocked(self)
} }
/// Change thread state to `InNative` and store the current stack pointer.
///
/// This method will mark this thread as blocked for thread system and GC can run
/// concurrently with this thread. Note that native code *must* not access GC heap
/// as it can lead to undefined behavior.
pub fn enter_native() { pub fn enter_native() {
let t = Self::current(); let t = Self::current();
t.stack_pointer
.store(current_stack_pointer().as_usize(), Ordering::Relaxed);
let mut old_state; let mut old_state;
loop { loop {
old_state = t.get_exec_status(); old_state = t.get_exec_status();
@ -947,6 +990,10 @@ impl<VM: VirtualMachine> Thread<VM> {
} }
} }
/// Attempt to leave native state and return to managed state without blocking.
///
/// If this method returns `false` you should call [`leave_native`](Self::leave_native) to block the thread or
/// spin wait untilt this method returns `true`.
#[must_use = "If thread can't leave native state without blocking, call [leave_native](Thread::leave_native) instead"] #[must_use = "If thread can't leave native state without blocking, call [leave_native](Thread::leave_native) instead"]
pub fn attempt_leave_native_no_block() -> bool { pub fn attempt_leave_native_no_block() -> bool {
let t = Self::current(); let t = Self::current();
@ -966,12 +1013,16 @@ impl<VM: VirtualMachine> Thread<VM> {
} }
} }
/// Leave native state and return to managed state.
///
/// This method might block the thread if any block requestes are pending.
pub fn leave_native() { pub fn leave_native() {
if !Self::attempt_leave_native_no_block() { if !Self::attempt_leave_native_no_block() {
Self::current().leave_native_blocked(); Self::current().leave_native_blocked();
} }
} }
/// Unblock the thread and notify all waiting threads.
pub fn unblock<A: BlockAdapter<VM>>(&self) { pub fn unblock<A: BlockAdapter<VM>>(&self) {
let lock = self.monitor.lock_no_handshake(); let lock = self.monitor.lock_no_handshake();
A::clear_block_request(self); A::clear_block_request(self);
@ -980,10 +1031,12 @@ impl<VM: VirtualMachine> Thread<VM> {
drop(lock); drop(lock);
} }
/// Are yieldpoints enabled for this thread?
pub fn yieldpoints_enabled(&self) -> bool { pub fn yieldpoints_enabled(&self) -> bool {
self.yieldpoints_enabled_count.load(Ordering::Relaxed) == 1 self.yieldpoints_enabled_count.load(Ordering::Relaxed) == 1
} }
/// Enable yieldpoints for this thread.
pub fn enable_yieldpoints(&self) { pub fn enable_yieldpoints(&self) {
let val = self let val = self
.yieldpoints_enabled_count .yieldpoints_enabled_count
@ -997,6 +1050,7 @@ impl<VM: VirtualMachine> Thread<VM> {
} }
} }
/// Disable yieldpoints for this thread.
pub fn disable_yieldpoints(&self) { pub fn disable_yieldpoints(&self) {
self.yieldpoints_enabled_count self.yieldpoints_enabled_count
.fetch_sub(1, Ordering::Relaxed); .fetch_sub(1, Ordering::Relaxed);
@ -1275,6 +1329,18 @@ pub(crate) fn deinit_current_thread<VM: VirtualMachine>() {
}) })
} }
/// Thread management system. This type is responsible
/// for registering and managing all threads in the system. When GC
/// requests a thread to block, it will go through this type.
///
/// Threads are suspended in two ways:
/// - 1) Yieldpoints are requested: this assumes that runtime is cooperative
/// and does periodically invoke check [`take_yieldpoint`](Thread::take_yieldpoint) to be non-zero and then
/// calls into [`Thread::yieldpoint`].
/// - 2) Signals: this is used when runtime is uncooperative and runs without yieldpoints. We deliver
/// UNIX signals or Windows exceptions to suspend the thread. This method is less precise and does not
/// account for all locks held in the mutators, so it is less safe. It is highly discouraged to use
/// [`sync`](crate::sync) module in uncooperative mode.
pub struct ThreadManager<VM: VirtualMachine> { pub struct ThreadManager<VM: VirtualMachine> {
inner: Monitor<RefCell<ThreadManagerInner<VM>>>, inner: Monitor<RefCell<ThreadManagerInner<VM>>>,
soft_handshake_left: AtomicUsize, soft_handshake_left: AtomicUsize,
@ -1447,7 +1513,8 @@ impl<VM: VirtualMachine> ThreadManager<VM> {
// Deal with terminating threads to ensure that all threads are either dead to MMTk or stopped above. // Deal with terminating threads to ensure that all threads are either dead to MMTk or stopped above.
self.process_about_to_terminate(); self.process_about_to_terminate();
self.inner let threads = self
.inner
.lock_no_handshake() .lock_no_handshake()
.borrow() .borrow()
.threads .threads
@ -1455,7 +1522,17 @@ impl<VM: VirtualMachine> ThreadManager<VM> {
.flatten() .flatten()
.filter(|t| t.is_blocked_for::<GCBlockAdapter>()) .filter(|t| t.is_blocked_for::<GCBlockAdapter>())
.cloned() .cloned()
.collect::<Vec<_>>() .collect::<Vec<_>>();
#[cfg(debug_assertions)]
{
for thread in threads.iter() {
assert!(!thread.stack_bounds().is_empty());
assert!(!thread.stack_pointer().is_zero());
}
}
threads
} else { } else {
self.process_about_to_terminate(); self.process_about_to_terminate();
let mut handshake_threads = Vec::with_capacity(4); let mut handshake_threads = Vec::with_capacity(4);
@ -1834,6 +1911,7 @@ static TARGET_THREAD: AtomicPtr<()> = AtomicPtr::new(std::ptr::null_mut());
extern "C-unwind" fn signal_handler_suspend_resume<VM: VirtualMachine>( extern "C-unwind" fn signal_handler_suspend_resume<VM: VirtualMachine>(
_signal: i32, _signal: i32,
_info: *const libc::siginfo_t, _info: *const libc::siginfo_t,
_ucontext: *mut libc::c_void,
) { ) {
let target = TARGET_THREAD.load(Ordering::Relaxed).cast::<Thread<VM>>(); let target = TARGET_THREAD.load(Ordering::Relaxed).cast::<Thread<VM>>();
let thread = unsafe { target.as_ref().unwrap() }; let thread = unsafe { target.as_ref().unwrap() };
@ -1868,6 +1946,22 @@ extern "C-unwind" fn signal_handler_suspend_resume<VM: VirtualMachine>(
thread thread
.stack_pointer .stack_pointer
.store(approximate_stack_pointer.as_usize(), Ordering::Release); .store(approximate_stack_pointer.as_usize(), Ordering::Release);
cfgenius::cond! {
if macro(crate::macros::have_machine_context) {
let user_context = _ucontext.cast::<libc::ucontext_t>();
unsafe {
thread.platform_registers.get().write(MaybeUninit::new(crate::machine_context::registers_from_ucontext(user_context).read()));
}
} else {
unsafe {
thread.platform_registers.get().write(MaybeUninit::new(crate::machine_context::PlatformRegisters {
stack_pointer: approximate_stack_pointer.as_usize() as *mut u8
}))
}
}
}
// Allow suspend caller to see that this thread is suspended. // Allow suspend caller to see that this thread is suspended.
// sem_post is async-signal-safe function. It means that we can call this from a signal handler. // sem_post is async-signal-safe function. It means that we can call this from a signal handler.
// http://pubs.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html#tag_02_04_03 // http://pubs.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html#tag_02_04_03