From a65a04387071e721f0e8dc2a8fbe144268da608f Mon Sep 17 00:00:00 2001 From: playX18 Date: Sun, 9 Feb 2025 09:21:48 +0700 Subject: [PATCH] first --- .cursorignore | 3 + .gitignore | 1 + Cargo.lock | 1481 +++++++++++++++++++++++ Cargo.toml | 4 + vmkit-proc/Cargo.lock | 7 + vmkit-proc/Cargo.toml | 13 + vmkit-proc/README.md | 3 + vmkit-proc/src/lib.rs | 6 + vmkit-proc/src/main.rs | 3 + vmkit/Cargo.lock | 1295 +++++++++++++++++++++ vmkit/Cargo.toml | 33 + vmkit/src/lib.rs | 134 +++ vmkit/src/main.rs | 167 +++ vmkit/src/mm.rs | 402 +++++++ vmkit/src/mm/active_plan.rs | 51 + vmkit/src/mm/aslr.rs | 95 ++ vmkit/src/mm/collection.rs | 87 ++ vmkit/src/mm/conservative_roots.rs | 203 ++++ vmkit/src/mm/ref_glue.rs | 49 + vmkit/src/mm/scanning.rs | 154 +++ vmkit/src/mm/stack_bounds.rs | 272 +++++ vmkit/src/mm/tlab.rs | 45 + vmkit/src/mm/traits.rs | 301 +++++ vmkit/src/object_model.rs | 194 ++++ vmkit/src/object_model/compression.rs | 131 +++ vmkit/src/object_model/header.rs | 91 ++ vmkit/src/object_model/metadata.rs | 149 +++ vmkit/src/object_model/object.rs | 575 +++++++++ vmkit/src/options.rs | 33 + vmkit/src/sync.rs | 195 ++++ vmkit/src/threading.rs | 1547 +++++++++++++++++++++++++ 31 files changed, 7724 insertions(+) create mode 100644 .cursorignore create mode 100644 .gitignore create mode 100644 Cargo.lock create mode 100644 Cargo.toml create mode 100644 vmkit-proc/Cargo.lock create mode 100644 vmkit-proc/Cargo.toml create mode 100644 vmkit-proc/README.md create mode 100644 vmkit-proc/src/lib.rs create mode 100644 vmkit-proc/src/main.rs create mode 100644 vmkit/Cargo.lock create mode 100644 vmkit/Cargo.toml create mode 100644 vmkit/src/lib.rs create mode 100644 vmkit/src/main.rs create mode 100644 vmkit/src/mm.rs create mode 100644 vmkit/src/mm/active_plan.rs create mode 100644 vmkit/src/mm/aslr.rs create mode 100644 vmkit/src/mm/collection.rs create mode 100644 vmkit/src/mm/conservative_roots.rs create mode 100644 vmkit/src/mm/ref_glue.rs create mode 100644 vmkit/src/mm/scanning.rs create mode 100644 vmkit/src/mm/stack_bounds.rs create mode 100644 vmkit/src/mm/tlab.rs create mode 100644 vmkit/src/mm/traits.rs create mode 100644 vmkit/src/object_model.rs create mode 100644 vmkit/src/object_model/compression.rs create mode 100644 vmkit/src/object_model/header.rs create mode 100644 vmkit/src/object_model/metadata.rs create mode 100644 vmkit/src/object_model/object.rs create mode 100644 vmkit/src/options.rs create mode 100644 vmkit/src/sync.rs create mode 100644 vmkit/src/threading.rs diff --git a/.cursorignore b/.cursorignore new file mode 100644 index 0000000..724c8ea --- /dev/null +++ b/.cursorignore @@ -0,0 +1,3 @@ +# Add directories or file patterns to ignore during indexing (e.g. foo/ or *.csv) +target/* +*/target/* \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ea8c4bf --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +/target diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..dd06c5b --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,1481 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "anstream" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" + +[[package]] +name = "anstyle-parse" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" +dependencies = [ + "anstyle", + "once_cell", + "windows-sys", +] + +[[package]] +name = "atomic" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d818003e740b63afc82337e3160717f4f63078720a810b7b903e70a5d1d2994" +dependencies = [ + "bytemuck", +] + +[[package]] +name = "atomic-traits" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "707f750b93bd1b739cf9ddf85f8fe7c97a4a62c60ccf8b6f232514bd9103bedc" +dependencies = [ + "cfg-if", + "rustc_version", +] + +[[package]] +name = "atomic_refcell" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41e67cd8309bbd06cd603a9e693a784ac2e5d1e955f11286e355089fcab3047c" + +[[package]] +name = "autocfg" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" + +[[package]] +name = "bitflags" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" + +[[package]] +name = "built" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c360505aed52b7ec96a3636c3f039d99103c37d1d9b4f7a8c743d3ea9ffcd03b" +dependencies = [ + "git2", +] + +[[package]] +name = "bytemuck" +version = "1.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" +dependencies = [ + "bytemuck_derive", +] + +[[package]] +name = "bytemuck_derive" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fa76293b4f7bb636ab88fd78228235b5248b4d05cc589aed610f954af5d7c7a" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "cc" +version = "1.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "755717a7de9ec452bf7f3f1a3099085deabd7f2962b861dae91ecd7a365903d2" +dependencies = [ + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "clap" +version = "4.5.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e77c3243bd94243c03672cb5154667347c457ca271254724f9f393aee1c05ff" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b26884eb4b57140e4d2d93652abfa49498b938b3c9179f9fc487b0acc3edad7" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.5.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf4ced95c6f4a675af3da73304b9ac4ed991640c36374e4b46795c49e17cf1ed" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "clap_lex" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" + +[[package]] +name = "colorchoice" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "crossbeam" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8" +dependencies = [ + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-queue" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "delegate" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e018fccbeeb50ff26562ece792ed06659b9c2dae79ece77c4456bb10d9bf79b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "downcast-rs" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" + +[[package]] +name = "easy-bitfield" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26ea791db72a3c7fc461f4a25a2c193151490c4b99a06bfd8e5edc1ad31a003c" +dependencies = [ + "atomic", + "bytemuck", + "num", +] + +[[package]] +name = "either" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" + +[[package]] +name = "enum-map" +version = "2.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6866f3bfdf8207509a033af1a75a7b08abda06bbaaeae6669323fd5a097df2e9" +dependencies = [ + "enum-map-derive", +] + +[[package]] +name = "enum-map-derive" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f282cfdfe92516eb26c2af8589c274c7c17681f5ecc03c18255fe741c6aa64eb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "env_filter" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "186e05a59d4c50738528153b83b0b0194d3a29507dfec16eccd4b342903397d0" +dependencies = [ + "log", + "regex", +] + +[[package]] +name = "env_logger" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcaee3d8e3cfc3fd92428d477bc97fc29ec8716d180c0d74c643bb26166660e0" +dependencies = [ + "anstream", + "anstyle", + "env_filter", + "humantime", + "log", +] + +[[package]] +name = "errno" +version = "0.3.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "getrandom" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +dependencies = [ + "cfg-if", + "libc", + "wasi", + "windows-targets", +] + +[[package]] +name = "git2" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b903b73e45dc0c6c596f2d37eccece7c1c8bb6e4407b001096387c63d0d93724" +dependencies = [ + "bitflags", + "libc", + "libgit2-sys", + "log", + "url", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "idna" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "is-terminal" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e19b23d53f35ce9f56aebc7d1bb4e6ac1e9c0db7ac85c8d1760c04379edced37" +dependencies = [ + "hermit-abi 0.4.0", + "libc", + "windows-sys", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + +[[package]] +name = "jobserver" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +dependencies = [ + "libc", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.169" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" + +[[package]] +name = "libgit2-sys" +version = "0.17.0+1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10472326a8a6477c3c20a64547b0059e4b0d086869eee31e6d7da728a8eb7224" +dependencies = [ + "cc", + "libc", + "libz-sys", + "pkg-config", +] + +[[package]] +name = "libz-sys" +version = "1.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "litemap" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" + +[[package]] +name = "lock_api" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "memoffset" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] + +[[package]] +name = "mmtk" +version = "0.30.0" +source = "git+https://github.com/mmtk/mmtk-core#4ca8812607bfb0b398278e01cbd3755959c7d010" +dependencies = [ + "atomic", + "atomic-traits", + "atomic_refcell", + "built", + "bytemuck", + "cfg-if", + "crossbeam", + "delegate", + "downcast-rs", + "enum-map", + "env_logger", + "is-terminal", + "itertools", + "lazy_static", + "libc", + "log", + "memoffset", + "mmtk-macros", + "num-traits", + "num_cpus", + "portable-atomic", + "probe", + "regex", + "rustversion", + "spin", + "static_assertions", + "strum", + "strum_macros", + "sysinfo", +] + +[[package]] +name = "mmtk-macros" +version = "0.30.0" +source = "git+https://github.com/mmtk/mmtk-core#4ca8812607bfb0b398278e01cbd3755959c7d010" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "ntapi" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" +dependencies = [ + "winapi", +] + +[[package]] +name = "num" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi 0.3.9", + "libc", +] + +[[package]] +name = "once_cell" +version = "1.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e" + +[[package]] +name = "parking_lot" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pkg-config" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" + +[[package]] +name = "portable-atomic" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" + +[[package]] +name = "ppv-lite86" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy 0.7.35", +] + +[[package]] +name = "probe" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8e2d2444b730c8f027344c60f9e1f1554d7a3342df9bdd425142ed119a6e5a3" + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro2" +version = "1.0.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +dependencies = [ + "rand_chacha", + "rand_core", + "zerocopy 0.8.17", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b08f3c9802962f7e1b25113931d94f43ed9725bebc59db9d0c3e9a23b67e15ff" +dependencies = [ + "getrandom", + "zerocopy 0.8.17", +] + +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "redox_syscall" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" +dependencies = [ + "bitflags", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + +[[package]] +name = "rustversion" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "semver" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" + +[[package]] +name = "serde" +version = "1.0.217" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.217" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "smallvec" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "strum" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" + +[[package]] +name = "strum_macros" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.98", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "sysinfo" +version = "0.30.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a5b4ddaee55fb2bea2bf0e5000747e5f5c0de765e5a5ff87f4cd106439f4bb3" +dependencies = [ + "cfg-if", + "core-foundation-sys", + "libc", + "ntapi", + "once_cell", + "rayon", + "windows", +] + +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "unicode-ident" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" + +[[package]] +name = "url" +version = "2.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "vmkit" +version = "0.1.0" +dependencies = [ + "atomic", + "bytemuck", + "cfg-if", + "clap", + "easy-bitfield", + "errno", + "libc", + "mmtk", + "parking_lot", + "rand", + "winapi", +] + +[[package]] +name = "vmkit-proc" +version = "0.1.0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "wasi" +version = "0.13.3+wasi-0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" +dependencies = [ + "windows-core", + "windows-targets", +] + +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "wit-bindgen-rt" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags", +] + +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + +[[package]] +name = "yoke" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "byteorder", + "zerocopy-derive 0.7.35", +] + +[[package]] +name = "zerocopy" +version = "0.8.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa91407dacce3a68c56de03abe2760159582b846c6a4acd2f456618087f12713" +dependencies = [ + "zerocopy-derive 0.8.17", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06718a168365cad3d5ff0bb133aad346959a2074bd4a85c121255a11304a8626" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "zerofrom" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", + "synstructure", +] + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..103205b --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,4 @@ +[workspace] +members = ["vmkit", "vmkit-proc"] +default-members = ["vmkit"] +resolver = "2" \ No newline at end of file diff --git a/vmkit-proc/Cargo.lock b/vmkit-proc/Cargo.lock new file mode 100644 index 0000000..60491fe --- /dev/null +++ b/vmkit-proc/Cargo.lock @@ -0,0 +1,7 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "vmkit-proc" +version = "0.1.0" diff --git a/vmkit-proc/Cargo.toml b/vmkit-proc/Cargo.toml new file mode 100644 index 0000000..882cdea --- /dev/null +++ b/vmkit-proc/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "vmkit-proc" +version = "0.1.0" +edition = "2021" + +[dependencies] +proc-macro2 = "1.0.93" +quote = "1.0.38" +syn = { version = "2.0.98", features = ["full"] } + + +[lib] +proc-macro = true \ No newline at end of file diff --git a/vmkit-proc/README.md b/vmkit-proc/README.md new file mode 100644 index 0000000..4f430ee --- /dev/null +++ b/vmkit-proc/README.md @@ -0,0 +1,3 @@ +# vmkit-proc + +Various procedural macros which simplify the usage of VMKit. \ No newline at end of file diff --git a/vmkit-proc/src/lib.rs b/vmkit-proc/src/lib.rs new file mode 100644 index 0000000..8333ee2 --- /dev/null +++ b/vmkit-proc/src/lib.rs @@ -0,0 +1,6 @@ +#[proc_macro] +pub fn define_options(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let _input = proc_macro2::TokenStream::from(input); + + proc_macro::TokenStream::new() +} diff --git a/vmkit-proc/src/main.rs b/vmkit-proc/src/main.rs new file mode 100644 index 0000000..e7a11a9 --- /dev/null +++ b/vmkit-proc/src/main.rs @@ -0,0 +1,3 @@ +fn main() { + println!("Hello, world!"); +} diff --git a/vmkit/Cargo.lock b/vmkit/Cargo.lock new file mode 100644 index 0000000..057d6fd --- /dev/null +++ b/vmkit/Cargo.lock @@ -0,0 +1,1295 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "anstream" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" + +[[package]] +name = "anstyle-parse" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" +dependencies = [ + "anstyle", + "once_cell", + "windows-sys", +] + +[[package]] +name = "atomic" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d818003e740b63afc82337e3160717f4f63078720a810b7b903e70a5d1d2994" +dependencies = [ + "bytemuck", +] + +[[package]] +name = "atomic-traits" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "707f750b93bd1b739cf9ddf85f8fe7c97a4a62c60ccf8b6f232514bd9103bedc" +dependencies = [ + "cfg-if", + "rustc_version", +] + +[[package]] +name = "atomic_refcell" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41e67cd8309bbd06cd603a9e693a784ac2e5d1e955f11286e355089fcab3047c" + +[[package]] +name = "autocfg" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" + +[[package]] +name = "bitflags" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" + +[[package]] +name = "built" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c360505aed52b7ec96a3636c3f039d99103c37d1d9b4f7a8c743d3ea9ffcd03b" +dependencies = [ + "git2", +] + +[[package]] +name = "bytemuck" +version = "1.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" +dependencies = [ + "bytemuck_derive", +] + +[[package]] +name = "bytemuck_derive" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fa76293b4f7bb636ab88fd78228235b5248b4d05cc589aed610f954af5d7c7a" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "cc" +version = "1.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4730490333d58093109dc02c23174c3f4d490998c3fed3cc8e82d57afedb9cf" +dependencies = [ + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "colorchoice" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "crossbeam" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8" +dependencies = [ + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-queue" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "delegate" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e018fccbeeb50ff26562ece792ed06659b9c2dae79ece77c4456bb10d9bf79b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "downcast-rs" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" + +[[package]] +name = "easy-bitfield" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26ea791db72a3c7fc461f4a25a2c193151490c4b99a06bfd8e5edc1ad31a003c" +dependencies = [ + "atomic", + "bytemuck", + "num", +] + +[[package]] +name = "either" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" + +[[package]] +name = "enum-map" +version = "2.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6866f3bfdf8207509a033af1a75a7b08abda06bbaaeae6669323fd5a097df2e9" +dependencies = [ + "enum-map-derive", +] + +[[package]] +name = "enum-map-derive" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f282cfdfe92516eb26c2af8589c274c7c17681f5ecc03c18255fe741c6aa64eb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "env_filter" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "186e05a59d4c50738528153b83b0b0194d3a29507dfec16eccd4b342903397d0" +dependencies = [ + "log", + "regex", +] + +[[package]] +name = "env_logger" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcaee3d8e3cfc3fd92428d477bc97fc29ec8716d180c0d74c643bb26166660e0" +dependencies = [ + "anstream", + "anstyle", + "env_filter", + "humantime", + "log", +] + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "git2" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b903b73e45dc0c6c596f2d37eccece7c1c8bb6e4407b001096387c63d0d93724" +dependencies = [ + "bitflags", + "libc", + "libgit2-sys", + "log", + "url", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "idna" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "is-terminal" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e19b23d53f35ce9f56aebc7d1bb4e6ac1e9c0db7ac85c8d1760c04379edced37" +dependencies = [ + "hermit-abi 0.4.0", + "libc", + "windows-sys", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + +[[package]] +name = "jobserver" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +dependencies = [ + "libc", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.169" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" + +[[package]] +name = "libgit2-sys" +version = "0.17.0+1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10472326a8a6477c3c20a64547b0059e4b0d086869eee31e6d7da728a8eb7224" +dependencies = [ + "cc", + "libc", + "libz-sys", + "pkg-config", +] + +[[package]] +name = "libz-sys" +version = "1.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "litemap" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" + +[[package]] +name = "lock_api" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "memoffset" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] + +[[package]] +name = "mmtk" +version = "0.30.0" +source = "git+https://github.com/mmtk/mmtk-core#054feef9576ec221dec46ac38a57bab58ba13093" +dependencies = [ + "atomic", + "atomic-traits", + "atomic_refcell", + "built", + "bytemuck", + "cfg-if", + "crossbeam", + "delegate", + "downcast-rs", + "enum-map", + "env_logger", + "is-terminal", + "itertools", + "lazy_static", + "libc", + "log", + "memoffset", + "mmtk-macros", + "num-traits", + "num_cpus", + "portable-atomic", + "probe", + "regex", + "rustversion", + "spin", + "static_assertions", + "strum", + "strum_macros", + "sysinfo", +] + +[[package]] +name = "mmtk-macros" +version = "0.30.0" +source = "git+https://github.com/mmtk/mmtk-core#054feef9576ec221dec46ac38a57bab58ba13093" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "ntapi" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" +dependencies = [ + "winapi", +] + +[[package]] +name = "num" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi 0.3.9", + "libc", +] + +[[package]] +name = "once_cell" +version = "1.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" + +[[package]] +name = "parking_lot" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pkg-config" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" + +[[package]] +name = "portable-atomic" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" + +[[package]] +name = "probe" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8e2d2444b730c8f027344c60f9e1f1554d7a3342df9bdd425142ed119a6e5a3" + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro2" +version = "1.0.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "redox_syscall" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" +dependencies = [ + "bitflags", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + +[[package]] +name = "rustversion" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "semver" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" + +[[package]] +name = "serde" +version = "1.0.217" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.217" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "smallvec" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strum" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" + +[[package]] +name = "strum_macros" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.98", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "sysinfo" +version = "0.30.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a5b4ddaee55fb2bea2bf0e5000747e5f5c0de765e5a5ff87f4cd106439f4bb3" +dependencies = [ + "cfg-if", + "core-foundation-sys", + "libc", + "ntapi", + "once_cell", + "rayon", + "windows", +] + +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "unicode-ident" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" + +[[package]] +name = "url" +version = "2.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "vmkit" +version = "0.1.0" +dependencies = [ + "atomic", + "bytemuck", + "cfg-if", + "easy-bitfield", + "libc", + "mmtk", + "parking_lot", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" +dependencies = [ + "windows-core", + "windows-targets", +] + +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + +[[package]] +name = "yoke" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", + "synstructure", +] + +[[package]] +name = "zerofrom" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", + "synstructure", +] + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] diff --git a/vmkit/Cargo.toml b/vmkit/Cargo.toml new file mode 100644 index 0000000..eb558f0 --- /dev/null +++ b/vmkit/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "vmkit" +version = "0.1.0" +edition = "2021" + +[dependencies] +atomic = "0.6.0" +bytemuck = "1.21.0" +cfg-if = "1.0.0" +clap = { version = "4.5.28", features = ["derive"] } +easy-bitfield = "0.1.0" +errno = "0.3.10" +libc = "0.2.169" +mmtk = { git = "https://github.com/mmtk/mmtk-core" } +parking_lot = "0.12.3" +rand = "0.9.0" + + + + +[features] +default = ["cooperative"] +# VMKit is built for use in cooperative runtime. Such runtime +# would be able to use write barriers and safepoints. Such environment +# must also provide precise object layout (stack can be uncooperative). +cooperative = ["mmtk/vo_bit", "mmtk/is_mmtk_object", "mmtk/vo_bit_access"] +# VMKit is built for use in full-precise runtime. Such runtime +# would be able to use precise write barriers and safepoints, object +# layout is fully precise. +full-precise = [] + +[target.'cfg(windows)'.dependencies] +winapi = { version = "0.3.9", features = ["everything"] } diff --git a/vmkit/src/lib.rs b/vmkit/src/lib.rs new file mode 100644 index 0000000..d93f247 --- /dev/null +++ b/vmkit/src/lib.rs @@ -0,0 +1,134 @@ +use std::{marker::PhantomData, sync::atomic::AtomicBool}; + +use mm::{aslr::aslr_vm_layout, traits::SlotExtra, MemoryManager}; +use mmtk::{ MMTKBuilder, MMTK}; +use threading::ThreadManager; + +pub mod mm; +pub mod object_model; +pub mod options; +pub mod sync; +pub mod threading; + +pub trait VirtualMachine: Sized + 'static + Send + Sync { + type ThreadContext: threading::ThreadContext; + type BlockAdapterList: threading::BlockAdapterList; + type Metadata: object_model::metadata::Metadata; + type Slot: SlotExtra; + + const MAX_ALIGNMENT: usize = 16; + const MIN_ALIGNMENT: usize = 16; + + /// Does this VM use conservative tracing? If `true` then VM can + /// query VO-bit (valid-object bit) to check if an object is live + /// during tracing work. + /// + /// Note that this is distinct from conservative stack scanning. When + /// collecting roots VO-bits are always available. + /// + /// Read more: [ObjectModel::NEED_VO_BITS_DURING_TRACING](mmtk::vm::ObjectModel::NEED_VO_BITS_DURING_TRACING). + /// + /// # Note + /// + /// - [`InternalPointer`](mm::conservative_roots::InternalPointer) can only be used when this is `true`. + #[cfg(feature = "cooperative")] + const CONSERVATIVE_TRACING: bool = false; + + /// Get currently active VM instance. + /// + /// # Notes + /// + /// At the moment we assume only one active VM per process. This can be changed in the future once MMTk supports + /// instances. In that case this function can return active VM for current thread instead of one global instance. + fn get() -> &'static Self; + + fn vmkit(&self) -> &VMKit; + + /// Prepare for another round of root scanning in the same GC. + /// + /// For details: [Scanning::prepare_for_roots_re_scanning](mmtk::vm::Scanning::prepare_for_roots_re_scanning) + fn prepare_for_roots_re_scanning(); + + /// MMTk calls this method at the first time during a collection that thread's stacks have been scanned. This can be used (for example) to clean up obsolete compiled methods that are no longer being executed. + fn notify_initial_thread_scan_complete(partial_scan: bool, tls: mmtk::util::VMWorkerThread); + /// Process weak references. + /// + /// This function is called after a transitive closure is completed. + /// + /// For details: [Scanning::process_weak_refs](mmtk::vm::Scanning::process_weak_refs) + fn process_weak_refs( + _worker: &mut mmtk::scheduler::GCWorker>, + _tracer_context: impl mmtk::vm::ObjectTracerContext>, + ) -> bool { + false + } + + fn forward_weak_refs( + _worker: &mut mmtk::scheduler::GCWorker>, + _tracer_context: impl mmtk::vm::ObjectTracerContext>, + ); + /// Scan one mutator for stack roots. + /// + /// For details: [Scanning::scan_roots_in_mutator_thread](mmtk::vm::Scanning::scan_roots_in_mutator_thread) + fn scan_roots_in_mutator_thread( + tls: mmtk::util::VMWorkerThread, + mutator: &'static mut mmtk::Mutator>, + factory: impl mmtk::vm::RootsWorkFactory< as mmtk::vm::VMBinding>::VMSlot>, + ); + + /// Scan VM-specific roots. + /// + /// For details: [Scanning::scan_vm_specific_roots](mmtk::vm::Scanning::scan_vm_specific_roots) + fn scan_vm_specific_roots( + tls: mmtk::util::VMWorkerThread, + factory: impl mmtk::vm::RootsWorkFactory< as mmtk::vm::VMBinding>::VMSlot>, + ); + + /// A hook for the VM to do work after forwarding objects. + fn post_forwarding(tls: mmtk::util::VMWorkerThread) { + let _ = tls; + } + + fn schedule_finalization(tls: mmtk::util::VMWorkerThread) { + let _ = tls; + } + + fn vm_live_bytes() -> usize { + 0 + } + + fn out_of_memory(tls: mmtk::util::VMThread, err_kind: mmtk::util::alloc::AllocationError) { + let _ = tls; + let _ = err_kind; + eprintln!("Out of memory: {:?}", err_kind); + std::process::exit(1); + } +} + +pub struct VMKit { + thread_manager: ThreadManager, + pub mmtk: MMTK>, + pub(crate) collector_started: AtomicBool, + marker: PhantomData, +} + +impl VMKit { + pub fn new(mut builder: MMTKBuilder) -> Self { + let vm_layout = aslr_vm_layout(&mut builder.options); + builder.set_vm_layout(vm_layout); + VMKit { + mmtk: builder.build(), + marker: PhantomData, + collector_started: AtomicBool::new(false), + thread_manager: ThreadManager::new(), + } + } + + pub(crate) fn are_collector_threads_spawned(&self) -> bool { + self.collector_started.load(atomic::Ordering::Relaxed) + } + + pub fn thread_manager(&self) -> &ThreadManager { + &self.thread_manager + } +} diff --git a/vmkit/src/main.rs b/vmkit/src/main.rs new file mode 100644 index 0000000..c035957 --- /dev/null +++ b/vmkit/src/main.rs @@ -0,0 +1,167 @@ +use easy_bitfield::{FromBitfield, ToBitfield}; +use mmtk::{util::options::PlanSelector, vm::slot::SimpleSlot, AllocationSemantics, MMTKBuilder}; +use std::sync::OnceLock; +use vmkit::{ + mm::{traits::ToSlot, MemoryManager}, + object_model::metadata::{GCMetadata, Metadata, Trace}, + threading::{GCBlockAdapter, Thread, ThreadContext}, + VMKit, VirtualMachine, +}; + +struct TestContext; + +impl ThreadContext for TestContext { + fn save_thread_state(&self) {} + + fn scan_roots(&self, _factory: impl mmtk::vm::RootsWorkFactory<::Slot>) {} + + fn scan_conservative_roots( + &self, + croots: &mut vmkit::mm::conservative_roots::ConservativeRoots, + ) { + let _ = croots; + } + + fn new(_collector_context: bool) -> Self { + Self + } +} +static VM_STORAGE: OnceLock = OnceLock::new(); +impl VirtualMachine for VM { + const MAX_ALIGNMENT: usize = 16; + const MIN_ALIGNMENT: usize = 16; + type ThreadContext = TestContext; + type BlockAdapterList = (GCBlockAdapter, ()); + type Metadata = &'static GCMetadata; + type Slot = SimpleSlot; + + fn get() -> &'static Self { + VM_STORAGE.get().unwrap() + } + + fn vmkit(&self) -> &VMKit { + &self.vmkit + } + + fn prepare_for_roots_re_scanning() {} + + fn notify_initial_thread_scan_complete(_partial_scan: bool, _tls: mmtk::util::VMWorkerThread) {} + + fn scan_roots_in_mutator_thread( + _tls: mmtk::util::VMWorkerThread, + _mutator: &'static mut mmtk::Mutator>, + _factory: impl mmtk::vm::RootsWorkFactory< + as mmtk::vm::VMBinding>::VMSlot, + >, + ) { + } + + fn scan_vm_specific_roots( + _tls: mmtk::util::VMWorkerThread, + _factory: impl mmtk::vm::RootsWorkFactory< + as mmtk::vm::VMBinding>::VMSlot, + >, + ) { + } + + fn forward_weak_refs( + _worker: &mut mmtk::scheduler::GCWorker>, + _tracer_context: impl mmtk::vm::ObjectTracerContext>, + ) { + todo!() + } +} + +struct VM { + vmkit: VMKit, +} + +static METADATA: GCMetadata = GCMetadata { + instance_size: 48, + compute_size: None, + trace: Trace::TraceObject(|object, _tracer| { + println!("tracing {}", object.as_address()); + }), + alignment: 16, +}; + +struct FooMeta; + +impl Metadata for FooMeta { + const METADATA_BIT_SIZE: usize = 56; + fn gc_metadata(&self) -> &'static GCMetadata { + &METADATA + } + + fn is_object(&self) -> bool { + false + } + + fn from_object_reference(_reference: mmtk::util::ObjectReference) -> Self { + unreachable!() + } + + fn to_object_reference(&self) -> Option { + unreachable!() + } +} + +impl ToSlot for FooMeta { + fn to_slot(&self) -> Option { + None + } +} + +impl FromBitfield for FooMeta { + fn from_bitfield(_bitfield: u64) -> Self { + FooMeta + } + + fn from_i64(_value: i64) -> Self { + FooMeta + } +} + +impl ToBitfield for FooMeta { + fn to_bitfield(self) -> u64 { + 0 + } + + fn one() -> Self { + FooMeta + } + + fn zero() -> Self { + FooMeta + } +} + +extern "C-unwind" fn handler(signum: libc::c_int) { + println!("signal {signum}"); + println!("backtrace:\n{}", std::backtrace::Backtrace::force_capture()); + std::process::exit(1); +} + +fn main() { + unsafe { + libc::signal(libc::SIGSEGV, handler as usize); + } + + let mut mmtk = MMTKBuilder::new(); + mmtk.options.plan.set(PlanSelector::Immix); + mmtk.options.threads.set(1); + VM_STORAGE.get_or_init(|| VM { + vmkit: VMKit::new(mmtk), + }); + + Thread::::main(TestContext, || { + let tls = Thread::::current(); + let my_obj = MemoryManager::allocate(tls, 48, 16, &METADATA, AllocationSemantics::Default); + + println!("Allocated object at {}", my_obj.as_address()); + + MemoryManager::::request_gc(); + + println!("object {} at {:p}", my_obj.as_address(), &my_obj); + }); +} diff --git a/vmkit/src/mm.rs b/vmkit/src/mm.rs new file mode 100644 index 0000000..e1f9f96 --- /dev/null +++ b/vmkit/src/mm.rs @@ -0,0 +1,402 @@ +use crate::{ + object_model::{ + header::{HeapObjectHeader, OBJECT_REF_OFFSET}, + object::VMKitObject, + VMKitObjectModel, + }, + threading::Thread, + VirtualMachine, +}; +use easy_bitfield::{AtomicBitfieldContainer, ToBitfield}; +use mmtk::{ + util::{ + alloc::{AllocatorSelector, BumpAllocator, ImmixAllocator}, + metadata::side_metadata::GLOBAL_SIDE_METADATA_BASE_ADDRESS, + VMMutatorThread, + }, + vm::{ + slot::{Slot, UnimplementedMemorySlice}, + VMBinding, + }, + AllocationSemantics, BarrierSelector, MutatorContext, +}; +use std::marker::PhantomData; + +#[derive(Clone, Copy)] +pub struct MemoryManager(PhantomData); + +impl Default for MemoryManager { + fn default() -> Self { + Self(PhantomData) + } +} + +impl VMBinding for MemoryManager { + type VMMemorySlice = UnimplementedMemorySlice; + type VMSlot = VM::Slot; + + type VMObjectModel = VMKitObjectModel; + type VMActivePlan = active_plan::VMKitActivePlan; + type VMCollection = collection::VMKitCollection; + type VMScanning = scanning::VMKitScanning; + type VMReferenceGlue = ref_glue::VMKitReferenceGlue; + + const MAX_ALIGNMENT: usize = VM::MAX_ALIGNMENT; + const MIN_ALIGNMENT: usize = VM::MIN_ALIGNMENT; +} + +pub mod active_plan; +pub mod aslr; +pub mod collection; +pub mod conservative_roots; +pub mod ref_glue; +pub mod scanning; +pub mod stack_bounds; +pub mod tlab; +pub mod traits; + +impl MemoryManager { + pub extern "C-unwind" fn request_gc() -> bool { + let tls = Thread::::current(); + + mmtk::memory_manager::handle_user_collection_request( + &VM::get().vmkit().mmtk, + VMMutatorThread(tls.to_vm_thread()), + ) + } + + /// Allocate object with `size`, `alignment`, and `metadata` with specified `semantics`. + /// + /// This function is a fast-path for allocation. If you allocate with `Default` semantics, + /// this function will first try to allocate through bump-pointer in TLAB. If other + /// semantic is specified or TLAB is empty (or unavailable) we invoke MMTK API directly. + #[inline] + pub extern "C-unwind" fn allocate( + thread: &Thread, + size: usize, + alignment: usize, + metadata: VM::Metadata, + mut semantics: AllocationSemantics, + ) -> VMKitObject { + if semantics == AllocationSemantics::Default + && size >= thread.max_non_los_default_alloc_bytes() + { + semantics = AllocationSemantics::Los; + } + // all allocator functions other than this actually invoke `flush_tlab` due to the fact + // that GC can happen inside them. + match semantics { + AllocationSemantics::Los => Self::allocate_los(thread, size, alignment, metadata), + AllocationSemantics::NonMoving => { + Self::allocate_nonmoving(thread, size, alignment, metadata) + } + AllocationSemantics::Immortal => { + Self::allocate_immortal(thread, size, alignment, metadata) + } + _ => unsafe { + let tlab = thread.tlab.get().as_mut().unwrap(); + let object_start = tlab.allocate(size, alignment); + if !object_start.is_zero() { + object_start.store(HeapObjectHeader:: { + metadata: AtomicBitfieldContainer::new(metadata.to_bitfield()), + marker: PhantomData, + }); + let object = VMKitObject::from_address(object_start + OBJECT_REF_OFFSET); + Self::set_vo_bit(object); + Self::refill_tlab(thread); + return object; + } + + Self::allocate_slow(thread, size, alignment, metadata, semantics) + }, + } + } + + pub extern "C-unwind" fn allocate_los( + thread: &Thread, + size: usize, + alignment: usize, + metadata: VM::Metadata, + ) -> VMKitObject { + debug_assert!(thread.id() == Thread::::current().id()); + + unsafe { + Self::flush_tlab(thread); + let object_start = mmtk::memory_manager::alloc( + thread.mutator(), + size, + alignment, + 0, + AllocationSemantics::Los, + ); + + let object = VMKitObject::from_address(object_start + OBJECT_REF_OFFSET); + object_start.store(HeapObjectHeader:: { + metadata: AtomicBitfieldContainer::new(metadata.to_bitfield()), + marker: PhantomData, + }); + + Self::set_vo_bit(object); + Self::refill_tlab(thread); + object + } + } + + pub extern "C-unwind" fn allocate_nonmoving( + thread: &Thread, + size: usize, + alignment: usize, + metadata: VM::Metadata, + ) -> VMKitObject { + debug_assert!(thread.id() == Thread::::current().id()); + + unsafe { + Self::flush_tlab(thread); + let object_start = mmtk::memory_manager::alloc( + thread.mutator(), + size, + alignment, + 0, + AllocationSemantics::NonMoving, + ); + + let object = VMKitObject::from_address(object_start + OBJECT_REF_OFFSET); + object_start.store(HeapObjectHeader:: { + metadata: AtomicBitfieldContainer::new(metadata.to_bitfield()), + marker: PhantomData, + }); + Self::set_vo_bit(object); + Self::refill_tlab(thread); + object + } + } + + pub extern "C-unwind" fn allocate_immortal( + thread: &Thread, + size: usize, + alignment: usize, + metadata: VM::Metadata, + ) -> VMKitObject { + debug_assert!(thread.id() == Thread::::current().id()); + + unsafe { + Self::flush_tlab(thread); + let object_start = mmtk::memory_manager::alloc( + thread.mutator(), + size, + alignment, + 0, + AllocationSemantics::Immortal, + ); + + let object = VMKitObject::from_address(object_start + OBJECT_REF_OFFSET); + object_start.store(HeapObjectHeader:: { + metadata: AtomicBitfieldContainer::new(metadata.to_bitfield()), + marker: PhantomData, + }); + Self::set_vo_bit(object); + Self::refill_tlab(thread); + object + } + } + + /// Allocate object in a slowpath. This will reset TLAB and invoke MMTK directly to allocate. This + /// function potentially triggers GC or allocates more heap memory if necessary. + #[inline(never)] + #[cold] + pub extern "C-unwind" fn allocate_slow( + thread: &Thread, + size: usize, + alignment: usize, + metadata: VM::Metadata, + semantics: AllocationSemantics, + ) -> VMKitObject { + unsafe { + Self::flush_tlab(thread); + let object_start = + mmtk::memory_manager::alloc_slow(thread.mutator(), size, alignment, 0, semantics); + + object_start.store(HeapObjectHeader:: { + metadata: AtomicBitfieldContainer::new(metadata.to_bitfield()), + marker: PhantomData, + }); + let object = VMKitObject::from_address(object_start + OBJECT_REF_OFFSET); + Self::set_vo_bit(object); + Self::refill_tlab(thread); + object + } + } + + #[inline(never)] + pub extern "C-unwind" fn set_vo_bit(object: VMKitObject) { + #[cfg(feature = "cooperative")] + unsafe { + let meta = mmtk::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_ADDR + + (object.as_address() >> 6); + let byte = meta.load::(); + let mask = 1 << ((object.as_address() >> 3) & 7); + let new_byte = byte | mask; + meta.store::(new_byte); + } + } + + /// Flushes the TLAB by storing the TLAB to underlying allocator. + /// + /// Leaves TLAB cursor set to zero. Invoke `refill_tlab` to rebind the TLAB. + /// + /// + /// # Safety + /// + /// Must be invoked in the context of current thread and before `alloc()` calls into MMTK + pub unsafe fn flush_tlab(thread: &Thread) { + let tlab = thread.tlab.get().as_mut().unwrap(); + let (cursor, limit) = tlab.take(); + if cursor.is_zero() { + assert!(limit.is_zero()); + return; + } + let selector = mmtk::memory_manager::get_allocator_mapping( + &VM::get().vmkit().mmtk, + AllocationSemantics::Default, + ); + match selector { + AllocatorSelector::Immix(_) => { + let allocator = thread + .mutator_unchecked() + .allocator_impl_mut::>(selector); + allocator.bump_pointer.reset(cursor, limit); + } + + AllocatorSelector::BumpPointer(_) => { + let allocator = thread + .mutator_unchecked() + .allocator_impl_mut::>(selector); + allocator.bump_pointer.reset(cursor, limit); + } + + _ => { + assert!( + cursor.is_zero() && limit.is_zero(), + "currently selected plan has no TLAB" + ); + } + } + } + + pub unsafe fn refill_tlab(thread: &Thread) { + let tlab = thread.tlab.get().as_mut().unwrap(); + + let selector = mmtk::memory_manager::get_allocator_mapping( + &VM::get().vmkit().mmtk, + AllocationSemantics::Default, + ); + + match selector { + AllocatorSelector::Immix(_) => { + let allocator = thread + .mutator() + .allocator_impl::>(selector); + let (cursor, limit) = (allocator.bump_pointer.cursor, allocator.bump_pointer.limit); + tlab.rebind(cursor, limit); + } + + AllocatorSelector::BumpPointer(_) => { + let allocator = thread + .mutator() + .allocator_impl::>(selector); + let (cursor, limit) = (allocator.bump_pointer.cursor, allocator.bump_pointer.limit); + tlab.rebind(cursor, limit); + } + + _ => {} + } + } + + /// The write barrier by MMTk. This is a *post* write barrier, which we expect a binding to call + /// *after* it modifies an object. For performance reasons, a VM should implement the write barrier + /// fast-path on their side rather than just calling this function. + /// + /// For a correct barrier implementation, a VM binding needs to choose one of the following options: + /// * Use subsuming barrier `object_reference_write` + /// * Use both `object_reference_write_pre` and `object_reference_write_post`, or both, if the binding has difficulty delegating the store to mmtk-core with the subsuming barrier. + /// * Implement fast-path on the VM side, and call the generic api `object_reference_write_slow` as barrier slow-path call. + /// * Implement fast-path on the VM side, and do a specialized slow-path call. + /// + /// Arguments: + /// * `thread`: a current thread. + /// * `src`: The modified source object. + /// * `slot`: The location of the field to be modified. + /// * `target`: The target for the write operation. `NULL` if the slot no longer hold an object + /// reference after the write operation. This may happen when writing a `null` reference, a small + /// integers, or a special value such as`true`, `false`, `undefined`, etc., into the slot. + pub fn object_reference_write_post( + thread: &Thread, + src: VMKitObject, + slot: VM::Slot, + target: VMKitObject, + ) { + match thread.barrier() { + BarrierSelector::ObjectBarrier => unsafe { + let addr = src.as_address(); + let meta_addr = GLOBAL_SIDE_METADATA_BASE_ADDRESS + (addr >> 6); + let shift = (addr >> 3) & 0b111; + let byte_val = meta_addr.load::(); + if (byte_val >> shift) & 1 == 1 { + thread.mutator().barrier().object_reference_write_slow( + src.as_object_unchecked(), + slot, + if target.is_null() { + None + } else { + Some(target.as_object_unchecked()) + }, + ); + } + }, + + BarrierSelector::NoBarrier => {} + } + } + /// The write barrier by MMTk. This is a *pre* write barrier, which we expect a binding to call + /// *before* it modifies an object. For performance reasons, a VM should implement the write barrier + /// fast-path on their side rather than just calling this function. + /// + /// For a correct barrier implementation, a VM binding needs to choose one of the following options: + /// * Use subsuming barrier `object_reference_write` + /// * Use both `object_reference_write_pre` and `object_reference_write_post`, or both, if the binding has difficulty delegating the store to mmtk-core with the subsuming barrier. + /// * Implement fast-path on the VM side, and call the generic api `object_reference_write_slow` as barrier slow-path call. + /// * Implement fast-path on the VM side, and do a specialized slow-path call. + /// + /// Arguments: + /// * `thread`: a current thread. + /// * `src`: The modified source object. + /// * `slot`: The location of the field to be modified. + /// * `target`: The target for the write operation. `NULL` if the slot did not hold an object + /// reference before the write operation. For example, the slot may be holding a `null` + /// reference, a small integer, or special values such as `true`, `false`, `undefined`, etc. + pub fn object_reference_write_pre( + thread: &Thread, + src: VMKitObject, + slot: VM::Slot, + target: VMKitObject, + ) { + let _ = thread; + let _ = src; + let _ = slot; + let _ = target; + } + + pub fn object_reference_write( + thread: &Thread, + src: VMKitObject, + slot: VM::Slot, + target: VMKitObject, + ) { + assert!(target.is_not_null()); + Self::object_reference_write_pre(thread, src, slot, target); + unsafe { + slot.store(target.as_object_unchecked()); + } + Self::object_reference_write_post(thread, src, slot, target); + } +} diff --git a/vmkit/src/mm/active_plan.rs b/vmkit/src/mm/active_plan.rs new file mode 100644 index 0000000..907ae61 --- /dev/null +++ b/vmkit/src/mm/active_plan.rs @@ -0,0 +1,51 @@ +use std::marker::PhantomData; + +use mmtk::vm::*; + +use crate::{mm::MemoryManager, threading::Thread, VirtualMachine}; + + +pub struct VMKitActivePlan(PhantomData); + +impl ActivePlan> for VMKitActivePlan { + + fn is_mutator(tls: mmtk::util::VMThread) -> bool { + Thread::::from_vm_thread(tls).active_mutator_context() + } + + fn mutator(tls: mmtk::util::VMMutatorThread) -> &'static mut mmtk::Mutator> { + // SAFETY: `mutator()` is invoked by MMTk only when all threads are suspended for GC or + // MMTk is in allocation slowpath in *current* thread. + // At this point no other thread can access this thread's context. + unsafe { Thread::::from_vm_mutator_thread(tls).mutator_unchecked() } + } + + fn mutators<'a>() -> Box>> + 'a> { + let vm = VM::get(); + Box::new(vm.vmkit().thread_manager() + .threads() + .filter(|t| t.active_mutator_context()) + .map(|t| + + // SAFETY: `mutators()` is invoked by MMTk only when all threads are suspended for GC. + // At this point no other thread can access this thread's context. + unsafe { t.mutator_unchecked() })) + } + + + fn number_of_mutators() -> usize { + let vm = VM::get(); + vm.vmkit().thread_manager() + .threads() + .filter(|t| t.active_mutator_context()) + .count() + } + + fn vm_trace_object( + _queue: &mut Q, + _object: mmtk::util::ObjectReference, + _worker: &mut mmtk::scheduler::GCWorker>, + ) -> mmtk::util::ObjectReference { + todo!() + } +} diff --git a/vmkit/src/mm/aslr.rs b/vmkit/src/mm/aslr.rs new file mode 100644 index 0000000..3ce2e37 --- /dev/null +++ b/vmkit/src/mm/aslr.rs @@ -0,0 +1,95 @@ +//! Address space layout randomization for MMTk. + +use mmtk::util::{ + conversions::raw_align_down, heap::vm_layout::{VMLayout, BYTES_IN_CHUNK}, options::{GCTriggerSelector, Options}, Address +}; +use rand::Rng; + +use crate::options::OPTIONS; + + +fn get_random_mmap_addr() -> Address { + let mut rng = rand::rng(); + let uniform = rand::distr::Uniform::new(0, usize::MAX).unwrap(); + let mut raw_addr = rng.sample(uniform); + + raw_addr = raw_align_down(raw_addr, BYTES_IN_CHUNK); + + raw_addr &= 0x3FFFFFFFF000; + + unsafe { Address::from_usize(raw_addr) } +} + +fn is_address_maybe_unmapped(addr: Address, size: usize) -> bool { + #[cfg(unix)] + { + unsafe { + let res = libc::msync(addr.to_mut_ptr(), size, libc::MS_ASYNC); + if res == -1 && errno::errno().0 == libc::ENOMEM { + return true; + } + } + } + false +} + +pub fn aslr_vm_layout(mmtk_options: &mut Options) -> VMLayout { + let mut vm_layout = VMLayout::default(); + let options = &*OPTIONS; + + if options.compressed_pointers { + vm_layout.heap_start = unsafe { Address::from_usize(0x4000_0000) }; + vm_layout.heap_end = vm_layout.heap_start + 32usize * 1024 * 1024 + } + + + let size = if options.compressed_pointers { + if options.tag_compressed_pointers { + 32 * 1024 * 1024 + } else { + 16 * 1024 * 1024 + } + } else { + options.max_heap_size + }; + if options.aslr { + vm_layout.force_use_contiguous_spaces = false; + loop { + let start = get_random_mmap_addr().align_down(BYTES_IN_CHUNK); + if !is_address_maybe_unmapped(start, size) { + continue; + } + + let end = (start + size).align_up(BYTES_IN_CHUNK); + vm_layout.heap_start = start; + vm_layout.heap_end = end; + + if !options.compressed_pointers { + vm_layout.log_address_space = if cfg!(target_pointer_width = "32") { + 31 + } else { + 47 + }; + vm_layout.log_space_extent = size.trailing_zeros() as usize - 1; + } + + break; + } + + + } + + if options.compressed_pointers { + vm_layout.log_address_space = 36; + vm_layout.log_space_extent = 31; + } + + mmtk_options.gc_trigger.set(GCTriggerSelector::DynamicHeapSize( + options.min_heap_size, + options.max_heap_size, + )); + + vm_layout + + +} diff --git a/vmkit/src/mm/collection.rs b/vmkit/src/mm/collection.rs new file mode 100644 index 0000000..9056af1 --- /dev/null +++ b/vmkit/src/mm/collection.rs @@ -0,0 +1,87 @@ +use std::marker::PhantomData; + +use crate::{ + mm::MemoryManager, + threading::{GCBlockAdapter, Thread}, + VirtualMachine, +}; +use mmtk::{ + util::VMWorkerThread, + vm::{Collection, GCThreadContext}, +}; + +pub struct VMKitCollection(PhantomData); + +impl Collection> for VMKitCollection { + fn stop_all_mutators(_tls: mmtk::util::VMWorkerThread, mut mutator_visitor: F) + where + F: FnMut(&'static mut mmtk::Mutator>), + { + let vmkit = VM::get().vmkit(); + + let mutators = vmkit.thread_manager().block_all_mutators_for_gc(); + + for mutator in mutators { + unsafe { + // reset TLAb if there's any. + MemoryManager::flush_tlab(&mutator); + mutator_visitor(mutator.mutator_unchecked()); + } + } + } + + fn block_for_gc(tls: mmtk::util::VMMutatorThread) { + let tls = Thread::::from_vm_mutator_thread(tls); + tls.block_unchecked::(true); + } + + fn resume_mutators(_tls: mmtk::util::VMWorkerThread) { + let vmkit = VM::get().vmkit(); + vmkit.thread_manager().unblock_all_mutators_for_gc(); + } + + fn create_gc_trigger() -> Box>> { + todo!() + } + + fn is_collection_enabled() -> bool { + true + } + + fn out_of_memory(_tls: mmtk::util::VMThread, _err_kind: mmtk::util::alloc::AllocationError) { + todo!() + } + + fn post_forwarding(_tls: mmtk::util::VMWorkerThread) { + VM::post_forwarding(_tls); + } + + fn schedule_finalization(_tls: mmtk::util::VMWorkerThread) { + + } + + fn spawn_gc_thread( + _tls: mmtk::util::VMThread, + ctx: mmtk::vm::GCThreadContext>, + ) { + let thread = Thread::::for_collector(); + + match ctx { + GCThreadContext::Worker(worker_ctx) => { + thread.start(move || { + let vm = VM::get(); + let tls = Thread::::current().to_vm_thread(); + mmtk::memory_manager::start_worker( + &vm.vmkit().mmtk, + VMWorkerThread(tls), + worker_ctx, + ); + }); + } + } + } + + fn vm_live_bytes() -> usize { + VM::vm_live_bytes() + } +} diff --git a/vmkit/src/mm/conservative_roots.rs b/vmkit/src/mm/conservative_roots.rs new file mode 100644 index 0000000..01c6a63 --- /dev/null +++ b/vmkit/src/mm/conservative_roots.rs @@ -0,0 +1,203 @@ +use std::{ + hash::Hash, + marker::PhantomData, + ops::{Deref, DerefMut}, + ptr::NonNull, +}; + +use mmtk::{ + util::{Address, ObjectReference}, + vm::{slot::Slot, RootsWorkFactory}, +}; + +use crate::{object_model::object::VMKitObject, options::OPTIONS, VirtualMachine}; + +pub struct ConservativeRoots { + pub roots: Vec, + pub internal_pointer_limit: usize, +} + +impl ConservativeRoots { + /// Add a pointer to conservative root set. + /// + /// If pointer is not in the heap, this function does nothing. + pub fn add_pointer(&mut self, pointer: Address) { + let starting_address = mmtk::memory_manager::starting_heap_address(); + let ending_address = mmtk::memory_manager::last_heap_address(); + + if pointer < starting_address || pointer > ending_address { + return; + } + + let Some(start) = mmtk::memory_manager::find_object_from_internal_pointer( + pointer, + self.internal_pointer_limit, + ) else { + return; + }; + + self.roots.push(start); + } + + /// Add all pointers in the span to the roots. + /// + /// # SAFETY + /// + /// `start` and `end` must be valid addresses. + pub unsafe fn add_span(&mut self, mut start: Address, mut end: Address) { + if start > end { + std::mem::swap(&mut start, &mut end); + } + let mut current = start; + while current < end { + let addr = current.load::
(); + self.add_pointer(addr); + current = current.add(size_of::
()); + } + } + + pub fn new() -> Self { + Self { + roots: Vec::new(), + internal_pointer_limit: OPTIONS.interior_pointer_max_bytes, + } + } + + pub fn add_to_factory(&mut self, factory: &mut impl RootsWorkFactory) { + factory.create_process_pinning_roots_work(std::mem::take(&mut self.roots)); + } +} + +/// Pointer to some part of an heap object. +/// This type can only be used when `cooperative` feature is enabled and [`VM::CONSERVATIVE_TRACING`](VirtualMachine::CONSERVATIVE_TRACING) is `true`. +pub struct InternalPointer { + address: Address, + _marker: std::marker::PhantomData<(NonNull, &'static VM)>, +} + +unsafe impl Send for InternalPointer {} +unsafe impl Sync for InternalPointer {} + +impl InternalPointer { + pub fn new(address: Address) -> Self { + if !cfg!(feature = "cooperative") { + unreachable!("Internal pointers are not supported in precise mode"); + } + debug_assert!( + mmtk::memory_manager::find_object_from_internal_pointer( + address, + OPTIONS.interior_pointer_max_bytes + ) + .is_some(), + "Internal pointer is not in the heap" + ); + assert!(VM::CONSERVATIVE_TRACING, "Internal pointers are not supported without VM::CONSERVATIVE_TRACING set to true"); + Self { + address, + _marker: std::marker::PhantomData, + } + } + + pub fn as_address(&self) -> Address { + self.address + } + + /// Get original object from the pointer. + /// + /// # Panics + /// + /// Panics if the pointer is not in the heap. + pub fn object(&self) -> VMKitObject { + mmtk::memory_manager::find_object_from_internal_pointer(self.address, OPTIONS.interior_pointer_max_bytes).unwrap().into() + } + + /// Return offset from the object start. + pub fn offset(&self) -> usize { + self.address.as_usize() - self.object().as_address().as_usize() + } +} + +impl Deref for InternalPointer { + type Target = T; + + fn deref(&self) -> &Self::Target { + unsafe { &*self.address.as_ref() } + } +} + +impl DerefMut for InternalPointer { + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { &mut *self.address.as_mut_ref() } + } +} + +/// Identical to [`InternalPointer`] but does not rely on VO-bits being enabled. +/// +/// Stores original object and offset instead. This makes +/// this struct "fat" as its a 16-byte pointer on 64-bit systems compared to 8-bytes +/// of [`InternalPointer`]. This type can be used in precise mode. +pub struct FatInternalPointer { + offset: usize, + object: VMKitObject, + marker: PhantomData<(NonNull, &'static VM)>, +} + +unsafe impl Send for FatInternalPointer {} +unsafe impl Sync for FatInternalPointer {} + +impl Deref for FatInternalPointer { + type Target = T; + + fn deref(&self) -> &Self::Target { + unsafe { self.object.as_address().add(self.offset).as_ref() } + } +} + +impl DerefMut for FatInternalPointer { + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { self.object.as_address().add(self.offset).as_mut_ref() } + } +} + +impl Clone for FatInternalPointer { + fn clone(&self) -> Self { + Self { + offset: self.offset, + object: self.object, + marker: PhantomData, + } + } +} + +impl PartialEq for FatInternalPointer { + fn eq(&self, other: &Self) -> bool { + self.offset == other.offset && self.object == other.object + } +} + +impl Eq for FatInternalPointer {} + +impl Hash for FatInternalPointer { + fn hash(&self, state: &mut H) { + self.offset.hash(state); + self.object.hashcode::().hash(state); + } +} + +impl FatInternalPointer { + pub fn new(object: VMKitObject, offset: usize) -> Self { + Self { + offset, + object, + marker: PhantomData, + } + } + + pub fn offset(&self) -> usize { + self.offset + } + + pub fn object(&self) -> VMKitObject { + self.object + } +} diff --git a/vmkit/src/mm/ref_glue.rs b/vmkit/src/mm/ref_glue.rs new file mode 100644 index 0000000..b1f7bb6 --- /dev/null +++ b/vmkit/src/mm/ref_glue.rs @@ -0,0 +1,49 @@ +#![allow(dead_code, unused_variables)] + +use std::marker::PhantomData; + +use mmtk::vm::{Finalizable, ReferenceGlue}; + +use crate::{object_model::object::VMKitObject, VirtualMachine}; + +use super::MemoryManager; + + +pub struct VMKitReferenceGlue(PhantomData); + + + +impl ReferenceGlue> for VMKitReferenceGlue { + type FinalizableType = VMKitObject; + + fn clear_referent(new_reference: mmtk::util::ObjectReference) { + + } + + fn enqueue_references(references: &[mmtk::util::ObjectReference], tls: mmtk::util::VMWorkerThread) { + + } + + fn get_referent(object: mmtk::util::ObjectReference) -> Option { + todo!() + } + + fn set_referent(reff: mmtk::util::ObjectReference, referent: mmtk::util::ObjectReference) { + todo!() + } + +} + +impl Finalizable for VMKitObject { + fn get_reference(&self) -> mmtk::util::ObjectReference { + todo!() + } + + fn keep_alive(&mut self, trace: &mut E) { + + } + + fn set_reference(&mut self, object: mmtk::util::ObjectReference) { + + } +} \ No newline at end of file diff --git a/vmkit/src/mm/scanning.rs b/vmkit/src/mm/scanning.rs new file mode 100644 index 0000000..a233643 --- /dev/null +++ b/vmkit/src/mm/scanning.rs @@ -0,0 +1,154 @@ +use std::marker::PhantomData; + +use crate::{ + mm::MemoryManager, + object_model::{ + metadata::{Metadata, Trace}, + object::VMKitObject, + }, + options::OPTIONS, + threading::{Thread, ThreadContext}, + VirtualMachine, +}; +use mmtk::{ + vm::{slot::Slot, ObjectTracer, Scanning, SlotVisitor}, + MutatorContext, +}; + +use super::{conservative_roots::ConservativeRoots, traits::ToSlot}; + +pub struct VMKitScanning(PhantomData); + +impl Scanning> for VMKitScanning { + fn forward_weak_refs( + _worker: &mut mmtk::scheduler::GCWorker>, + _tracer_context: impl mmtk::vm::ObjectTracerContext>, + ) { + VM::forward_weak_refs(_worker, _tracer_context); + } + + fn notify_initial_thread_scan_complete(partial_scan: bool, tls: mmtk::util::VMWorkerThread) { + VM::notify_initial_thread_scan_complete(partial_scan, tls); + } + + fn prepare_for_roots_re_scanning() { + VM::prepare_for_roots_re_scanning(); + } + + fn process_weak_refs( + _worker: &mut mmtk::scheduler::GCWorker>, + _tracer_context: impl mmtk::vm::ObjectTracerContext>, + ) -> bool { + VM::process_weak_refs(_worker, _tracer_context) + } + + fn scan_object< + SV: mmtk::vm::SlotVisitor< as mmtk::vm::VMBinding>::VMSlot>, + >( + _tls: mmtk::util::VMWorkerThread, + object: mmtk::util::ObjectReference, + slot_visitor: &mut SV, + ) { + let object = VMKitObject::from(object); + let metadata = object.header::().metadata(); + if metadata.is_object() { + slot_visitor.visit_slot(metadata.to_slot().expect("Object is not a slot")); + } + let gc_metadata = metadata.gc_metadata(); + let trace = &gc_metadata.trace; + match trace { + Trace::ScanSlots(fun) => { + fun(object, slot_visitor); + } + Trace::None => (), + Trace::TraceObject(_) => { + unreachable!("TraceObject is not supported for scanning"); + } + } + } + + fn scan_object_and_trace_edges( + _tls: mmtk::util::VMWorkerThread, + object: mmtk::util::ObjectReference, + object_tracer: &mut OT, + ) { + let object = VMKitObject::from(object); + let metadata = object.header::().metadata(); + let gc_metadata = metadata.gc_metadata(); + let trace = &gc_metadata.trace; + match trace { + Trace::ScanSlots(fun) => { + // wrap object tracer in a trait that implements SlotVisitor + // but actually traces the object directly. + let mut visitor = TraceSlotVisitor:: { + ot: object_tracer, + marker: PhantomData, + }; + fun(object, &mut visitor); + } + Trace::None => (), + Trace::TraceObject(fun) => { + fun(object, object_tracer); + } + } + } + + fn scan_roots_in_mutator_thread( + _tls: mmtk::util::VMWorkerThread, + mutator: &'static mut mmtk::Mutator>, + mut factory: impl mmtk::vm::RootsWorkFactory, + ) { + let tls = Thread::::from_vm_mutator_thread(mutator.get_tls()); + tls.context.scan_roots(factory.clone()); + + if OPTIONS.conservative_stacks { + let mut croots = ConservativeRoots::new(); + let bounds = *tls.stack_bounds(); + unsafe { croots.add_span(bounds.origin(), bounds.end()) }; + tls.context.scan_conservative_roots(&mut croots); + croots.add_to_factory(&mut factory); + } + } + + fn scan_vm_specific_roots( + tls: mmtk::util::VMWorkerThread, + factory: impl mmtk::vm::RootsWorkFactory, + ) { + VM::scan_vm_specific_roots(tls, factory); + } + + fn support_slot_enqueuing( + _tls: mmtk::util::VMWorkerThread, + object: mmtk::util::ObjectReference, + ) -> bool { + let object = VMKitObject::from(object); + let metadata = object.header::().metadata(); + matches!(metadata.gc_metadata().trace, Trace::ScanSlots(_)) + && (!metadata.is_object() || metadata.to_slot().is_some()) + } + + fn supports_return_barrier() -> bool { + false + } +} + +struct TraceSlotVisitor<'a, VM: VirtualMachine, OT: ObjectTracer> { + ot: &'a mut OT, + marker: PhantomData, +} + +impl<'a, VM: VirtualMachine, OT: ObjectTracer> SlotVisitor + for TraceSlotVisitor<'a, VM, OT> +{ + fn visit_slot(&mut self, slot: VM::Slot) { + let value = slot.load(); + match value { + Some(object) => { + let object = self.ot.trace_object(object); + slot.store(object); + } + + None => (), + } + } +} diff --git a/vmkit/src/mm/stack_bounds.rs b/vmkit/src/mm/stack_bounds.rs new file mode 100644 index 0000000..6f68a7a --- /dev/null +++ b/vmkit/src/mm/stack_bounds.rs @@ -0,0 +1,272 @@ +use std::ptr::null_mut; + +use libc::pthread_attr_destroy; +use mmtk::util::Address; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct StackBounds { + origin: Address, + bound: Address, +} + +impl StackBounds { + // Default reserved zone size + pub const DEFAULT_RESERVED_ZONE: usize = 64 * 1024; + + pub const fn empty_bounds() -> Self { + Self { + origin: Address::ZERO, + bound: Address::ZERO, + } + } + + pub fn current_thread_stack_bounds() -> Self { + let result = Self::current_thread_stack_bounds_internal(); + result.check_consistency(); + result + } + + pub fn origin(&self) -> Address { + debug_assert!(!self.origin.is_zero()); + self.origin + } + + pub fn end(&self) -> Address { + debug_assert!(!self.bound.is_zero()); + self.bound + } + + pub fn size(&self) -> usize { + self.origin - self.bound + } + + pub fn is_empty(&self) -> bool { + self.origin.is_zero() + } + + pub fn contains(&self, p: Address) -> bool { + if self.is_empty() { + return false; + } + self.origin >= p && p > self.bound + } + + pub fn recursion_limit(&self, min_reserved_zone: usize) -> Address { + self.check_consistency(); + self.bound + min_reserved_zone + } + + pub fn recursion_limit_with_params( + &self, + start_of_user_stack: Address, + max_user_stack: usize, + reserved_zone_size: usize, + ) -> Address { + self.check_consistency(); + + let reserved_zone_size = reserved_zone_size.min(max_user_stack); + let mut max_user_stack_with_reserved_zone = max_user_stack - reserved_zone_size; + + let end_of_stack_with_reserved_zone = self.bound.add(reserved_zone_size); + if start_of_user_stack < end_of_stack_with_reserved_zone { + return end_of_stack_with_reserved_zone; + } + + let available_user_stack = + start_of_user_stack.get_offset(end_of_stack_with_reserved_zone) as usize; + if max_user_stack_with_reserved_zone > available_user_stack { + max_user_stack_with_reserved_zone = available_user_stack; + } + + start_of_user_stack - max_user_stack_with_reserved_zone + } + + pub fn with_soft_origin(&self, origin: Address) -> Self { + assert!(self.contains(origin)); + Self { + origin, + bound: self.bound, + } + } + fn check_consistency(&self) { + #[cfg(debug_assertions)] + { + let current_position = current_stack_pointer(); + assert_ne!(self.origin, self.bound); + assert!(current_position < self.origin && current_position > self.bound); + } + } + + pub fn is_growing_downwards(&self) -> bool { + assert!(!self.origin.is_zero() && !self.bound.is_zero()); + self.bound <= self.origin + } +} + +#[cfg(unix)] +impl StackBounds { + pub unsafe fn new_thread_stack_bounds(handle: libc::pthread_t) -> Self { + #[cfg(target_vendor = "apple")] + unsafe { + let origin = libc::pthread_get_stackaddr_np(handle); + let size = libc::pthread_get_stacksize_np(handle); + let bound = origin.byte_sub(size); + Self { + origin: Address::from_ptr(origin), + bound: Address::from_ptr(bound), + } + } + + #[cfg(target_os = "openbsd")] + { + let mut stack: std::mem::MaybeUninit = std::mem::MaybeUninit::zeroed(); + unsafe { + libc::pthread_stackseg_np(handle, stack.as_mut_ptr() as _); + } + let stack = unsafe { stack.assume_init() }; + let origin = Address::from_ptr(stack.ss_sp); + let bound = origin - stack.ss_size as usize; + Self { origin, bound } + } + + #[cfg(all(unix, not(target_vendor = "apple"), not(target_os = "openbsd")))] + { + let mut bound = null_mut(); + let mut stack_size = 0; + + let mut sattr: std::mem::MaybeUninit = + std::mem::MaybeUninit::zeroed(); + + unsafe { + libc::pthread_attr_init(sattr.as_mut_ptr() as _); + #[cfg(target_os = "netbsd")] + { + libc::pthread_attr_get_np(handle, sattr.as_mut_ptr() as _); + } + #[cfg(not(target_os = "netbsd"))] + { + libc::pthread_getattr_np(handle, sattr.as_mut_ptr() as _); + } + libc::pthread_attr_getstack(sattr.assume_init_mut(), &mut bound, &mut stack_size); + pthread_attr_destroy(sattr.assume_init_mut()); + let bound = Address::from_ptr(bound); + let origin = bound + stack_size; + Self { origin, bound } + } + } + } + + fn current_thread_stack_bounds_internal() -> Self { + let ret = unsafe { Self::new_thread_stack_bounds(libc::pthread_self()) }; + + /*#[cfg(target_os = "linux")] + unsafe { + // on glibc, pthread_attr_getstack will generally return the limit size (minus a guard page) + // for the main thread; this is however not necessarily always true on every libc - for example + // on musl, it will return the currently reserved size - since the stack bounds are expected to + // be constant (and they are for every thread except main, which is allowed to grow), check + // resource limits and use that as the boundary instead (and prevent stack overflows). + if libc::getpid() == libc::syscall(libc::SYS_gettid) as libc::pid_t { + let origin = ret.origin(); + let mut limit: std::mem::MaybeUninit = + std::mem::MaybeUninit::zeroed(); + libc::getrlimit(libc::RLIMIT_STACK, limit.as_mut_ptr() as _); + + let limit = limit.assume_init(); + let mut size = limit.rlim_cur; + if size == libc::RLIM_INFINITY { + size = 8 * 1024 * 1024; + } + size -= libc::sysconf(libc::_SC_PAGE_SIZE) as u64; + let bound = origin - size as usize; + + return Self { origin, bound }; + } + }*/ + + ret + } +} + +#[inline(never)] +pub fn current_stack_pointer() -> Address { + #[cfg(target_arch = "x86_64")] + { + let current_sp: usize; + unsafe { + std::arch::asm!( + "mov rax, rsp", + out("rax") current_sp, + ); + Address::from_usize(current_sp) + } + } + + #[cfg(not(target_arch = "x86_64"))] + { + let mut current_sp = Address::ZERO; + current_sp = Address::from_ptr(¤t_sp); + current_sp + } +} + +#[cfg(windows)] +impl StackBounds { + fn current_thread_stack_bounds_internal() -> Self { + use std::mem::*; + use winapi::um::winnt::*; + use winapi::um::memoryapi::*; + unsafe { + let mut stack_origin: MaybeUninit = MaybeUninit::uninit(); + + VirtualQuery( + stack_origin.as_mut_ptr(), + stack_origin.as_mut_ptr(), + size_of::(), + ); + + let stack_origin = stack_origin.assume_init(); + let origin = + Address::from_ptr(stack_origin.BaseAddress) + stack_origin.RegionSize as usize; + // The stack on Windows consists out of three parts (uncommitted memory, a guard page and present + // committed memory). The 3 regions have different BaseAddresses but all have the same AllocationBase + // since they are all from the same VirtualAlloc. The 3 regions are laid out in memory (from high to + // low) as follows: + // + // High |-------------------| ----- + // | committedMemory | ^ + // |-------------------| | + // | guardPage | reserved memory for the stack + // |-------------------| | + // | uncommittedMemory | v + // Low |-------------------| ----- <--- stackOrigin.AllocationBase + // + // See http://msdn.microsoft.com/en-us/library/ms686774%28VS.85%29.aspx for more information. + + let mut uncommited_memory: MaybeUninit = MaybeUninit::uninit(); + VirtualQuery( + stack_origin.AllocationBase as _, + uncommited_memory.as_mut_ptr(), + size_of::(), + ); + let uncommited_memory = uncommited_memory.assume_init(); + assert_eq!(uncommited_memory.State, MEM_RESERVE); + + let mut guard_page: MaybeUninit = MaybeUninit::uninit(); + VirtualQuery( + (uncommited_memory.BaseAddress as usize + uncommited_memory.RegionSize as usize) as _, + guard_page.as_mut_ptr(), + size_of::(), + ); + + let guard_page = guard_page.assume_init(); + assert!(guard_page.Protect & PAGE_GUARD != 0); + + let end_of_stack = Address::from_ptr(stack_origin.AllocationBase); + + let bound = end_of_stack + guard_page.RegionSize as usize; + + Self { origin, bound } + } + } +} diff --git a/vmkit/src/mm/tlab.rs b/vmkit/src/mm/tlab.rs new file mode 100644 index 0000000..6bc1103 --- /dev/null +++ b/vmkit/src/mm/tlab.rs @@ -0,0 +1,45 @@ +use mmtk::util::{conversions::raw_align_up, Address}; + +/// Thread-local allocation buffer. +pub struct TLAB { + pub cursor: Address, + pub limit: Address, +} + +impl TLAB { + pub fn new() -> Self { + Self { + cursor: Address::ZERO, + limit: Address::ZERO, + } + } + + pub fn allocate(&mut self, size: usize, alignment: usize) -> Address { + let aligned_size = raw_align_up(size, alignment); + let result = self.cursor.align_up(alignment); + if result + aligned_size > self.limit { + + return Address::ZERO; + } else { + self.cursor = result.add(aligned_size); + return result; + } + } + + pub fn rebind(&mut self, cursor: Address, limit: Address) { + self.cursor = cursor; + self.limit = limit; + } + + pub fn reset(&mut self) { + self.cursor = Address::ZERO; + self.limit = Address::ZERO; + } + + pub fn take(&mut self) -> (Address, Address) { + let cursor = self.cursor; + let limit = self.limit; + self.reset(); + (cursor, limit) + } +} diff --git a/vmkit/src/mm/traits.rs b/vmkit/src/mm/traits.rs new file mode 100644 index 0000000..895fe16 --- /dev/null +++ b/vmkit/src/mm/traits.rs @@ -0,0 +1,301 @@ +//! Collection of traits for the memory manager. +//! +//! We provide all the traits to simplify implementation of a VM. You can simply +//! implement `Trace` or `Scan` to get a trace-able object for example. + +use mmtk::{ + util::Address, + vm::{ + slot::{SimpleSlot, Slot}, + ObjectModel, ObjectTracer, SlotVisitor, + }, +}; + +use crate::{ + object_model::{object::{VMKitNarrow, VMKitObject}, VMKitObjectModel}, + options::OPTIONS, + VirtualMachine, +}; + +use super::conservative_roots::{FatInternalPointer, InternalPointer}; + +pub trait ToSlot { + fn to_slot(&self) -> Option; +} + +pub trait Trace { + fn trace_object(&mut self, tracer: &mut impl ObjectTracer); +} + +pub trait Scan { + fn scan_object(&self, visitor: &mut impl SlotVisitor); +} + +impl Trace for VMKitObject { + fn trace_object(&mut self, tracer: &mut impl ObjectTracer) { + if self.is_null() { + return; + } + let new_object = VMKitObject::from(tracer.trace_object((*self).try_into().unwrap())); + + if new_object != *self { + *self = new_object; + } + } +} + +impl Trace for InternalPointer { + fn trace_object(&mut self, tracer: &mut impl ObjectTracer) { + #[cfg(feature = "cooperative")] + { + assert!( + VMKitObjectModel::::NEED_VO_BITS_DURING_TRACING, + "VO-bits are not enabled during tracing, can't use internal pointers" + ); + + let start = mmtk::memory_manager::find_object_from_internal_pointer( + self.as_address(), + OPTIONS.interior_pointer_max_bytes, + ); + + if let Some(start) = start { + let offset = self.as_address() - start.to_raw_address(); + let new_object = VMKitObject::from(tracer.trace_object(start)); + *self = InternalPointer::new(new_object.as_address() + offset); + } + } + #[cfg(not(feature = "cooperative"))] + { + unreachable!("Internal pointers are not supported in precise mode"); + } + } +} + +impl Scan for VMKitObject { + fn scan_object(&self, visitor: &mut impl SlotVisitor) { + if let Some(slot) = self.to_slot() { + visitor.visit_slot(slot); + } + } +} + +impl ToSlot for VMKitObject { + fn to_slot(&self) -> Option { + Some(SL::from_vmkit_object(self)) + } +} + +/// Extra methods for types implementing `Slot` trait from MMTK. +pub trait SlotExtra: Slot { + /// Construct a slot from a `VMKitObject`. Must be always implemented + /// as internally we use `VMKitObject` to represent all objects. + fn from_vmkit_object(object: &VMKitObject) -> Self; + fn from_address(address: Address) -> Self; + + /// Construct a slot from an `InternalPointer`. VMs are not required to implement + /// this as InternalPointer can also be traced. + fn from_internal_pointer(pointer: &InternalPointer) -> Self { + let _ = pointer; + unimplemented!() + } + /// Construct a slot from a `FatInternalPointer`. VMs are not required to implement + /// this as `FatInternalPointer` can also be traced. + fn from_fat_internal_pointer( + pointer: &FatInternalPointer, + ) -> Self { + let _ = pointer; + unimplemented!() + } + + fn from_narrow(narrow: &VMKitNarrow) -> Self { + let _ = narrow; + unimplemented!() + } +} + +impl SlotExtra for SimpleSlot { + fn from_vmkit_object(object: &VMKitObject) -> Self { + Self::from_address(Address::from_ptr(object)) + } + + fn from_address(address: Address) -> Self { + SimpleSlot::from_address(address) + } + + fn from_internal_pointer(pointer: &InternalPointer) -> Self { + let _ = pointer; + unimplemented!("SimpleSlot does not support internal pointers") + } +} + +impl SlotExtra for Address { + fn from_vmkit_object(object: &VMKitObject) -> Self { + Address::from_ptr(object) + } + + fn from_address(address: Address) -> Self { + address + } + + fn from_internal_pointer(pointer: &InternalPointer) -> Self { + let _ = pointer; + unimplemented!("Address does not support internal pointers") + } +} + +/// Trait to check if type can be enqueued as a slot of an object. +/// +/// Slot is an address of a field of an object. When field +/// can't be enqueued, we simply trace it using `ObjectTracer`. +pub trait SupportsEnqueuing { + const VALUE: bool; +} + +impl SupportsEnqueuing for InternalPointer { + const VALUE: bool = false; +} + +impl SupportsEnqueuing for VMKitObject { + const VALUE: bool = true; +} + +macro_rules! impl_prim { + ($($t:ty)*) => { + $( + impl SupportsEnqueuing for $t { + const VALUE: bool = true; + } + + impl Scan for $t { + fn scan_object(&self, visitor: &mut impl SlotVisitor) { + let _ = visitor; + } + } + + impl ToSlot for $t { + fn to_slot(&self) -> Option { + None + } + } + + impl Trace for $t { + fn trace_object(&mut self, tracer: &mut impl ObjectTracer) { + let _ = tracer; + } + } + + )* + }; +} + +impl_prim! { + u8 u16 u32 u64 u128 usize + i8 i16 i32 i64 i128 isize + f32 f64 + bool char + String + std::fs::File +} + +impl SupportsEnqueuing for Vec { + const VALUE: bool = T::VALUE; // we don't enque vec itself but its elements. +} + +impl SupportsEnqueuing for Option { + const VALUE: bool = T::VALUE; +} + +impl SupportsEnqueuing for Result { + const VALUE: bool = T::VALUE && U::VALUE; +} + +impl Trace for Option { + fn trace_object(&mut self, tracer: &mut impl ObjectTracer) { + if let Some(value) = self { + value.trace_object(tracer); + } + } +} + +impl Trace for Vec { + fn trace_object(&mut self, tracer: &mut impl ObjectTracer) { + for value in self { + value.trace_object(tracer); + } + } +} + +impl Trace for [T; N] { + fn trace_object(&mut self, tracer: &mut impl ObjectTracer) { + for value in self { + value.trace_object(tracer); + } + } +} + +impl Trace for Box { + fn trace_object(&mut self, tracer: &mut impl ObjectTracer) { + (**self).trace_object(tracer); + } +} + +impl> Scan for Vec { + fn scan_object(&self, visitor: &mut impl SlotVisitor) { + for value in self { + value.scan_object(visitor); + } + } +} + +impl, const N: usize> Scan for [T; N] { + fn scan_object(&self, visitor: &mut impl SlotVisitor) { + for value in self { + value.scan_object(visitor); + } + } +} + +impl SupportsEnqueuing for FatInternalPointer { + const VALUE: bool = true; +} + +impl Scan for FatInternalPointer { + fn scan_object(&self, visitor: &mut impl SlotVisitor) { + visitor.visit_slot(self.object().to_slot().expect("never fails")); + } +} + +impl ToSlot for FatInternalPointer { + fn to_slot(&self) -> Option { + Some(self.object().to_slot().expect("never fails")) + } +} + +impl Trace for FatInternalPointer { + fn trace_object(&mut self, tracer: &mut impl ObjectTracer) { + self.object().trace_object(tracer); + } +} + + +impl Trace for VMKitNarrow { + fn trace_object(&mut self, tracer: &mut impl ObjectTracer) { + let mut object = self.to_object(); + object.trace_object(tracer); + *self = VMKitNarrow::encode(object); + } +} + +impl Scan for VMKitNarrow { + fn scan_object(&self, visitor: &mut impl SlotVisitor) { + let slot = SL::from_narrow(self); + visitor.visit_slot(slot); + } +} + +impl ToSlot for VMKitNarrow { + fn to_slot(&self) -> Option { + Some(SL::from_narrow(self)) + } +} + diff --git a/vmkit/src/object_model.rs b/vmkit/src/object_model.rs new file mode 100644 index 0000000..c7a6c5a --- /dev/null +++ b/vmkit/src/object_model.rs @@ -0,0 +1,194 @@ +use std::marker::PhantomData; + +use crate::{mm::MemoryManager, VirtualMachine}; +use easy_bitfield::BitFieldTrait; +use header::{HashState, HashStateField, HASHCODE_OFFSET, OBJECT_HEADER_OFFSET, OBJECT_REF_OFFSET}; +use mmtk::{ + util::{alloc::fill_alignment_gap, constants::LOG_BYTES_IN_ADDRESS, ObjectReference}, + vm::*, +}; +use object::{MoveTarget, VMKitObject}; + +pub mod header; +pub mod metadata; +pub mod object; +pub mod compression; + +pub struct VMKitObjectModel(PhantomData); + +pub const LOGGING_SIDE_METADATA_SPEC: VMGlobalLogBitSpec = VMGlobalLogBitSpec::side_first(); +pub const FORWARDING_POINTER_METADATA_SPEC: VMLocalForwardingPointerSpec = + VMLocalForwardingPointerSpec::in_header(0); +pub const FORWARDING_BITS_METADATA_SPEC: VMLocalForwardingBitsSpec = + VMLocalForwardingBitsSpec::in_header(HashStateField::NEXT_BIT as _); +pub const MARKING_METADATA_SPEC: VMLocalMarkBitSpec = VMLocalMarkBitSpec::side_first(); +pub const LOS_METADATA_SPEC: VMLocalLOSMarkNurserySpec = + VMLocalLOSMarkNurserySpec::in_header(HashStateField::NEXT_BIT as _); +impl ObjectModel> for VMKitObjectModel { + const GLOBAL_LOG_BIT_SPEC: mmtk::vm::VMGlobalLogBitSpec = LOGGING_SIDE_METADATA_SPEC; + const LOCAL_FORWARDING_POINTER_SPEC: mmtk::vm::VMLocalForwardingPointerSpec = + FORWARDING_POINTER_METADATA_SPEC; + const LOCAL_FORWARDING_BITS_SPEC: mmtk::vm::VMLocalForwardingBitsSpec = + FORWARDING_BITS_METADATA_SPEC; + const LOCAL_MARK_BIT_SPEC: mmtk::vm::VMLocalMarkBitSpec = MARKING_METADATA_SPEC; + const LOCAL_LOS_MARK_NURSERY_SPEC: mmtk::vm::VMLocalLOSMarkNurserySpec = LOS_METADATA_SPEC; + + const OBJECT_REF_OFFSET_LOWER_BOUND: isize = OBJECT_REF_OFFSET; + const UNIFIED_OBJECT_REFERENCE_ADDRESS: bool = false; + const VM_WORST_CASE_COPY_EXPANSION: f64 = 1.3; + + #[cfg(feature = "cooperative")] + const NEED_VO_BITS_DURING_TRACING: bool = VM::CONSERVATIVE_TRACING; + + fn copy( + from: mmtk::util::ObjectReference, + semantics: mmtk::util::copy::CopySemantics, + copy_context: &mut mmtk::util::copy::GCWorkerCopyContext>, + ) -> mmtk::util::ObjectReference { + let vmkit_from = VMKitObject::from(from); + + let bytes = vmkit_from.bytes_required_when_copied::(); + let align = vmkit_from.alignment::(); + let offset = vmkit_from.get_offset_for_alignment::(); + + let addr = copy_context.alloc_copy(from, bytes, align, offset, semantics); + + let vmkit_to_obj = Self::move_object(vmkit_from, MoveTarget::ToAddress(addr), bytes); + let to_obj = ObjectReference::from_raw_address(vmkit_to_obj.as_address()).unwrap(); + + copy_context.post_copy(to_obj, bytes, semantics); + + to_obj + } + + fn copy_to( + from: mmtk::util::ObjectReference, + to: mmtk::util::ObjectReference, + region: mmtk::util::Address, + ) -> mmtk::util::Address { + let vmkit_from = VMKitObject::from(from); + + let copy = from != to; + let bytes = if copy { + let vmkit_to = VMKitObject::from(to); + let bytes = vmkit_to.bytes_required_when_copied::(); + Self::move_object(vmkit_from, MoveTarget::ToObject(vmkit_to), bytes); + bytes + } else { + vmkit_from.bytes_used::() + }; + + let start = Self::ref_to_object_start(to); + fill_alignment_gap::>(region, start); + start + bytes + } + + fn get_reference_when_copied_to( + from: mmtk::util::ObjectReference, + to: mmtk::util::Address, + ) -> mmtk::util::ObjectReference { + let vmkit_from = VMKitObject::from(from); + let res_addr = to + OBJECT_REF_OFFSET + vmkit_from.hashcode_overhead::(); + debug_assert!(!res_addr.is_zero()); + // SAFETY: we just checked that the address is not zero + unsafe { + ObjectReference::from_raw_address_unchecked(res_addr) + } + } + + + fn get_type_descriptor(_reference: mmtk::util::ObjectReference) -> &'static [i8] { + unreachable!() + } + + fn get_align_offset_when_copied(object: mmtk::util::ObjectReference) -> usize { + VMKitObject::from_objref_nullable(Some(object)).get_offset_for_alignment::() + } + + fn get_align_when_copied(object: mmtk::util::ObjectReference) -> usize { + VMKitObject::from(object).alignment::() + } + + fn get_current_size(object: mmtk::util::ObjectReference) -> usize { + VMKitObject::from_objref_nullable(Some(object)).get_current_size::() + } + + fn get_size_when_copied(object: mmtk::util::ObjectReference) -> usize { + VMKitObject::from_objref_nullable(Some(object)).get_size_when_copied::() + } + + + fn ref_to_object_start(reference: mmtk::util::ObjectReference) -> mmtk::util::Address { + VMKitObject::from_objref_nullable(Some(reference)).object_start::() + } + + fn ref_to_header(reference: mmtk::util::ObjectReference) -> mmtk::util::Address { + reference.to_raw_address().offset(OBJECT_HEADER_OFFSET) + } + + fn dump_object(object: mmtk::util::ObjectReference) { + let _ = object; + } +} +impl VMKitObjectModel { + fn move_object(from_obj: VMKitObject, mut to: MoveTarget, num_bytes: usize) -> VMKitObject { + let mut copy_bytes = num_bytes; + let mut obj_ref_offset = OBJECT_REF_OFFSET; + let hash_state = from_obj.header::().hash_state(); + + // Adjust copy bytes and object reference offset based on hash state + match hash_state { + HashState::Hashed => { + copy_bytes -= size_of::(); // Exclude hash code from copy + if let MoveTarget::ToAddress(ref mut addr) = to { + *addr += size_of::(); // Adjust address for hash code + } + } + HashState::HashedAndMoved => { + obj_ref_offset += size_of::() as isize; // Adjust for larger header + } + _ => {} + } + + // Determine target address and object based on MoveTarget + let (to_address, to_obj) = match to { + MoveTarget::ToAddress(addr) => { + let obj = VMKitObject::from_address(addr + obj_ref_offset); + (addr, obj) + } + MoveTarget::ToObject(object) => { + let addr = object.as_address() + (-obj_ref_offset); + (addr, object) + } + }; + + let from_address = from_obj.as_address() + (-obj_ref_offset); + + // Perform the memory copy + unsafe { + std::ptr::copy( + from_address.to_ptr::(), + to_address.to_mut_ptr::(), + copy_bytes, + ); + } + + // Update hash state if necessary + if hash_state == HashState::Hashed { + unsafe { + let hash_code = from_obj.as_address().as_usize() >> LOG_BYTES_IN_ADDRESS; + to_obj + .as_address() + .offset(HASHCODE_OFFSET) + .store(hash_code as u64); + to_obj + .header::() + .set_hash_state(HashState::HashedAndMoved); + } + } else { + to_obj.header::().set_hash_state(HashState::Hashed); + } + + to_obj + } +} diff --git a/vmkit/src/object_model/compression.rs b/vmkit/src/object_model/compression.rs new file mode 100644 index 0000000..49c60c3 --- /dev/null +++ b/vmkit/src/object_model/compression.rs @@ -0,0 +1,131 @@ +//! Pointer compression support. + +use std::cell::UnsafeCell; + +use mmtk::util::Address; + +use crate::{options::OPTIONS, VirtualMachine}; + +use super::object::{VMKitNarrow, VMKitObject}; + +pub const UNSCALED_OP_HEAP_MAX: u64 = u32::MAX as u64 + 1; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum CompressionMode { + /// Use 32-bits oops without encoding when + /// NarrowOopHeapBaseMin + heap_size < 4Gb + UnscaledNarrowOp = 0, + /// Use zero based compressed oops with encoding when + /// NarrowOopHeapBaseMin + heap_size < 32Gb + ZeroBasedNarrowOp = 1, + /// Use compressed oops with disjoint heap base if + /// base is 32G-aligned and base > 0. This allows certain + /// optimizations in encoding/decoding. + /// Disjoint: Bits used in base are disjoint from bits used + /// for oops ==> oop = (cOop << 3) | base. One can disjoint + /// the bits of an oop into base and compressed oop.b + DisjointBaseNarrowOp = 2, + /// Use compressed oops with heap base + encoding. + HeapBasedNarrowOp = 3, +} + +pub struct CompressedOps { + pub base: Address, + pub shift: u32, + pub range: (Address, Address), +} + +struct CompressedOpsStorage(UnsafeCell); + +unsafe impl Send for CompressedOpsStorage {} +unsafe impl Sync for CompressedOpsStorage {} + +static COMPRESSED_OPS: CompressedOpsStorage = + CompressedOpsStorage(UnsafeCell::new(CompressedOps { + base: Address::ZERO, + shift: 0, + range: (Address::ZERO, Address::ZERO), + })); + +impl CompressedOps { + pub(crate) fn init() { + let start = mmtk::memory_manager::starting_heap_address(); + let end = mmtk::memory_manager::last_heap_address(); + if OPTIONS.tag_compressed_pointers { + let shift = 2; + let base = start.sub(4096); + + unsafe { + COMPRESSED_OPS.0.get().write(CompressedOps { + base, + shift, + range: (start, end), + }); + } + return; + } + let shift; + // Subtract a page because something can get allocated at heap base. + // This also makes implicit null checking work, because the + // memory+1 page below heap_base needs to cause a signal. + // See needs_explicit_null_check. + // Only set the heap base for compressed oops because it indicates + // compressed oops for pstack code. + if end > unsafe { Address::from_usize(UNSCALED_OP_HEAP_MAX as usize) } { + // Didn't reserve heap below 4Gb. Must shift. + shift = VM::MIN_ALIGNMENT.trailing_zeros(); + } else { + shift = 0; + } + let max = (u32::MAX as u64 + 1) << VM::MIN_ALIGNMENT.trailing_zeros(); + let base = if end <= unsafe { Address::from_usize(max as usize) } { + // heap below 32Gb, can use base == 0 + Address::ZERO + } else { + start.sub(4096) + }; + + unsafe { + COMPRESSED_OPS.0.get().write(CompressedOps { + base, + shift, + range: (start, end), + }); + } + } + + pub fn base() -> Address { + unsafe { (*COMPRESSED_OPS.0.get()).base } + } + + pub fn shift() -> u32 { + unsafe { (*COMPRESSED_OPS.0.get()).shift } + } + + pub fn decode_raw(v: VMKitNarrow) -> VMKitObject { + debug_assert!(!v.is_null()); + let base = Self::base(); + VMKitObject::from_address(base + ((v.raw() as usize) << Self::shift())) + } + + pub fn decode(v: VMKitNarrow) -> VMKitObject { + if v.is_null() { + VMKitObject::NULL + } else { + Self::decode_raw(v) + } + } + + pub fn encode(v: VMKitObject) -> VMKitNarrow { + if v.is_null() { + unsafe { VMKitNarrow::from_raw(0) } + } else { + let pd = v.as_address().as_usize() - Self::base().as_usize(); + unsafe { VMKitNarrow::from_raw((pd >> Self::shift()) as u32) } + } + } + + pub const fn is_null(v: VMKitNarrow) -> bool { + v.raw() == 0 + } +} diff --git a/vmkit/src/object_model/header.rs b/vmkit/src/object_model/header.rs new file mode 100644 index 0000000..4c7ec19 --- /dev/null +++ b/vmkit/src/object_model/header.rs @@ -0,0 +1,91 @@ +use std::marker::PhantomData; + +use crate::VirtualMachine; +use easy_bitfield::*; + +/// Offset from allocation pointer to the actual object start. +pub const OBJECT_REF_OFFSET: isize = 8; +/// Object header behind object. +pub const OBJECT_HEADER_OFFSET: isize = -OBJECT_REF_OFFSET; +pub const HASHCODE_OFFSET: isize = -(OBJECT_REF_OFFSET + size_of::() as isize); + +pub type MetadataField = BitField; +pub type HashStateField = BitField; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum HashState { + Unhashed, + Hashed, + HashedAndMoved, +} + +impl FromBitfield for HashState { + fn from_bitfield(value: u64) -> Self { + match value { + 0 => Self::Unhashed, + 1 => Self::Hashed, + 2 => Self::HashedAndMoved, + _ => unreachable!(), + } + } + + fn from_i64(value: i64) -> Self { + match value { + 0 => Self::Unhashed, + 1 => Self::Hashed, + 2 => Self::HashedAndMoved, + _ => unreachable!(), + } + } +} + +impl ToBitfield for HashState { + fn to_bitfield(self) -> u64 { + match self { + Self::Unhashed => 0, + Self::Hashed => 1, + Self::HashedAndMoved => 2, + } + } + + fn one() -> Self { + Self::Hashed + } + + fn zero() -> Self { + Self::Unhashed + } +} + +pub struct HeapObjectHeader { + pub metadata: AtomicBitfieldContainer, + pub marker: PhantomData, +} + +impl HeapObjectHeader { + pub fn new(metadata: VM::Metadata) -> Self { + Self { + metadata: AtomicBitfieldContainer::new(metadata.to_bitfield()), + marker: PhantomData, + } + } + + pub fn hash_state(&self) -> HashState { + self.metadata.read::() + } + + pub fn set_hash_state(&self, state: HashState) { + self.metadata.update_synchronized::(state); + } + + pub fn metadata(&self) -> VM::Metadata { + VM::Metadata::from_bitfield(self.metadata.read::() as _) + } + + pub fn set_metadata(&self, metadata: VM::Metadata) { + self.metadata.update_synchronized::(metadata.to_bitfield() as _); + } + + + +} \ No newline at end of file diff --git a/vmkit/src/object_model/metadata.rs b/vmkit/src/object_model/metadata.rs new file mode 100644 index 0000000..6e0608a --- /dev/null +++ b/vmkit/src/object_model/metadata.rs @@ -0,0 +1,149 @@ +pub use easy_bitfield::{FromBitfield, ToBitfield}; +use mmtk::{ + util::ObjectReference, + vm::{ObjectTracer, SlotVisitor}, +}; + +use crate::{mm::traits::ToSlot, VirtualMachine}; + +use super::object::VMKitObject; + +#[repr(C)] +pub struct GCMetadata { + pub trace: Trace, + pub instance_size: usize, + pub compute_size: Option usize>, + pub alignment: usize, +} + +#[derive(Debug)] +pub enum Trace { + ScanSlots(fn(VMKitObject, &mut dyn SlotVisitor)), + TraceObject(fn(VMKitObject, &mut dyn ObjectTracer)), + None, +} + +/// Metadata for objects in VM which uses VMKit. +/// +/// Main purpose of this trait is to provide VMKit a way to get GC metadata from the object. +/// It is also used to convert object references to metadata and vice versa if you store +/// metadata as an object field. +pub trait Metadata: ToBitfield + FromBitfield + ToSlot { + /// Size of the metadata in bits. Must be `<= 62`. + const METADATA_BIT_SIZE: usize; + + fn gc_metadata(&self) -> &'static GCMetadata; + + fn is_object(&self) -> bool; + + fn to_object_reference(&self) -> Option; + fn from_object_reference(reference: ObjectReference) -> Self; +} + +/// Maximum object size that can be handled by uncooperative GC (64GiB default). +/// +/// This is due to the fact that we store the object size in the metadata +/// and it needs to fit into 58 bits we give to runtime. By limiting the size +/// we get some free space for runtime to use as well. +pub const MAX_UNCOOPERATIVE_OBJECT_SIZE: usize = 1usize << 36; + +#[macro_export] +macro_rules! make_uncooperative_metadata { + ($max_obj_size: expr, $name: ident) => { + pub struct $name { + pub wsize: usize, + } + + const _: () = { + assert!($max_obj_size <= $crate::object_model::metadata::MAX_UNCOOPERATIVE_OBJECT_SIZE); + }; + + impl Metadata for $name { + const METADATA_BIT_SIZE: usize = 36; + + fn gc_metadata(&self) -> &'static GCMetadata { + &UNCOOPERATIVE_GC_META + } + + fn is_object(&self) -> bool { + false + } + + fn to_object_reference(&self) -> Option { + None + } + + fn from_object_reference(_: ObjectReference) -> Self { + unreachable!() + } + } + impl $crate::object_model::metadata::ToBitfield for $name { + fn to_bitfield(self) -> u64 { + self.wsize as u64 + } + } + + impl $crate::object_model::metadata::FromBitfield + for $name + { + fn from_bitfield(value: u64) -> Self { + Self { + wsize: value as usize, + } + } + } + }; +} + +impl ToBitfield for &'static GCMetadata { + fn to_bitfield(self) -> u64 { + let res = self as *const GCMetadata as usize as u64; + res + } + + fn one() -> Self { + unreachable!() + } + + fn zero() -> Self { + unreachable!() + } +} + +impl FromBitfield for &'static GCMetadata { + fn from_bitfield(value: u64) -> Self { + + unsafe { &*(value as usize as *const GCMetadata) } + } + + fn from_i64(_value: i64) -> Self { + unreachable!() + } +} + +impl Metadata for &'static GCMetadata { + const METADATA_BIT_SIZE: usize = 58; + + fn gc_metadata(&self) -> &'static GCMetadata { + *self + } + + fn is_object(&self) -> bool { + false + } + + fn to_object_reference(&self) -> Option { + None + } + + fn from_object_reference(_: ObjectReference) -> Self { + unreachable!() + } +} + +impl ToSlot for &'static GCMetadata { + fn to_slot(&self) -> Option { + None + } +} + diff --git a/vmkit/src/object_model/object.rs b/vmkit/src/object_model/object.rs new file mode 100644 index 0000000..4ee5d6b --- /dev/null +++ b/vmkit/src/object_model/object.rs @@ -0,0 +1,575 @@ +use crate::mm::traits::SlotExtra; +use crate::threading::Thread; +use crate::{mm::MemoryManager, VirtualMachine}; +use atomic::Atomic; +use mmtk::util::{ + constants::LOG_BYTES_IN_ADDRESS, conversions::raw_align_up, Address, ObjectReference, +}; + +use super::{ + compression::CompressedOps, + header::{ + HashState, HeapObjectHeader, HASHCODE_OFFSET, OBJECT_HEADER_OFFSET, OBJECT_REF_OFFSET, + }, + metadata::Metadata, +}; + +#[repr(transparent)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VMKitObject(Address); + +impl From for VMKitObject { + fn from(value: ObjectReference) -> Self { + Self::from_address(value.to_raw_address()) + } +} + +impl TryInto for VMKitObject { + type Error = (); + + fn try_into(self) -> Result { + ObjectReference::from_raw_address(self.0).ok_or(()) + } +} +impl Into> for VMKitObject { + fn into(self) -> Option { + self.try_into().ok() + } +} + +impl VMKitObject { + /// The null `VMKitObject`. + pub const NULL: Self = Self(Address::ZERO); + + /// Creates a new `VMKitObject` from a given address. + /// + /// # Arguments + /// + /// * `address` - The address of the object. + /// + /// # Returns + /// + /// * `VMKitObject` - A new `VMKitObject` instance. + #[inline(always)] + pub fn from_address(address: Address) -> Self { + Self(address) + } + + /// Returns the address of the `VMKitObject`. + /// + /// # Returns + /// + /// * `Address` - The address of the object. + #[inline(always)] + pub fn as_address(self) -> Address { + self.0 + } + + pub unsafe fn as_object_unchecked(self) -> ObjectReference { + unsafe { ObjectReference::from_raw_address_unchecked(self.as_address()) } + } + + /// Creates a new `VMKitObject` from an optional object reference. + /// + /// # Arguments + /// + /// * `objref` - An optional object reference. + /// + /// # Returns + /// + /// * `VMKitObject` - A new `VMKitObject` instance. + #[inline(always)] + pub fn from_objref_nullable(objref: Option) -> Self { + match objref { + Some(objref) => Self::from_address(objref.to_raw_address()), + None => Self::NULL, + } + } + + /// Checks if the `VMKitObject` is null. + /// + /// # Returns + /// + /// * `bool` - `true` if the object is null, `false` otherwise. + #[inline(always)] + pub fn is_null(self) -> bool { + self == Self::NULL + } + + /// Checks if the `VMKitObject` is not null. + /// + /// # Returns + /// + /// * `bool` - `true` if the object is not null, `false` otherwise. + #[inline(always)] + pub fn is_not_null(self) -> bool { + self != Self::NULL + } + + /// Returns a reference to the `HeapObjectHeader` of the `VMKitObject`. + /// + /// # Returns + /// + /// * `&HeapObjectHeader` - A reference to the header. + #[inline(always)] + pub fn header<'a, VM: VirtualMachine>(self) -> &'a HeapObjectHeader { + assert!(!self.is_null()); + unsafe { self.0.offset(OBJECT_HEADER_OFFSET).as_ref() } + } + + /// Returns the alignment of the `VMKitObject`. + /// + /// # Returns + /// + /// * `usize` - The alignment of the object. + pub fn alignment(&self) -> usize { + self.header::().metadata().gc_metadata().alignment + } + + /// Returns the number of bytes used by the `VMKitObject`. + /// + /// # Returns + /// + /// * `usize` - The number of bytes used. + #[inline(always)] + pub fn bytes_used(self) -> usize { + let metadata = self.header::().metadata().gc_metadata(); + let overhead = self.hashcode_overhead::(); + + if metadata.instance_size != 0 { + raw_align_up(metadata.instance_size + size_of::>(), align_of::()) + overhead + } else { + let Some(compute_size) = metadata.compute_size else { + panic!("compute_size is not set for object at {}", self.0); + }; + + raw_align_up(compute_size(self) + size_of::>(), align_of::()) + overhead + } + } + + /// Returns the number of bytes required when the `VMKitObject` is copied. + /// + /// # Returns + /// + /// * `usize` - The number of bytes required. + #[inline(always)] + pub fn bytes_required_when_copied(&self) -> usize { + let metadata = self.header::().metadata().gc_metadata(); + let overhead = self.hashcode_overhead::(); + + raw_align_up(metadata.instance_size, align_of::()) + overhead + } + + /// Returns the overhead for the hashcode of the `VMKitObject`. + /// + /// # Arguments + /// + /// * `WHEN_COPIED` - A constant indicating whether the object is being copied. + /// + /// # Returns + /// + /// * `usize` - The hashcode overhead. + #[inline(always)] + pub fn hashcode_overhead(&self) -> usize { + let hash_state = self.header::().hash_state(); + + let has_hashcode = if WHEN_COPIED { + hash_state != HashState::Unhashed + } else { + hash_state == HashState::HashedAndMoved + }; + + if has_hashcode { + size_of::() + } else { + 0 + } + } + + /// Returns the real starting address of the `VMKitObject`. + /// + /// # Returns + /// + /// * `Address` - The starting address of the object. + #[inline(always)] + pub fn object_start(&self) -> Address { + let res = self + .0 + .offset(-(OBJECT_REF_OFFSET as isize + self.hashcode_overhead::() as isize)); + + res + } + + /// Returns the offset for alignment of the `VMKitObject`. + /// + /// # Returns + /// + /// * `usize` - The offset for alignment. + #[inline(always)] + pub fn get_offset_for_alignment(&self) -> usize { + size_of::>() + self.hashcode_overhead::() + } + + /// Returns the current size of the `VMKitObject`. + /// + /// # Returns + /// + /// * `usize` - The current size. + #[inline(always)] + pub fn get_current_size(&self) -> usize { + self.bytes_used::() + } + + /// Returns the size of the `VMKitObject` when it is copied. + /// + /// # Returns + /// + /// * `usize` - The size when copied. + #[inline(always)] + pub fn get_size_when_copied(&self) -> usize { + self.bytes_required_when_copied::() + } + + pub fn hashcode(&self) -> usize { + let header = self.header::(); + match header.hash_state() { + HashState::HashedAndMoved => { + return unsafe { self.as_address().offset(HASHCODE_OFFSET).load() } + } + _ => (), + } + let hashcode = self.as_address().as_usize() >> LOG_BYTES_IN_ADDRESS; + header.set_hash_state(HashState::Hashed); + hashcode + } + + pub fn get_field_primitive( + &self, + offset: usize, + ) -> T + where + T: Copy + bytemuck::NoUninit + bytemuck::Pod, + { + unsafe { + debug_assert!( + offset < self.bytes_used::(), + "attempt to access field out of bounds" + ); + let ordering = if !VOLATILE { + atomic::Ordering::Relaxed + } else { + atomic::Ordering::SeqCst + }; + self.as_address() + .add(offset) + .as_ref::>() + .load(ordering) + } + } + + pub fn set_field_primitive( + &self, + offset: usize, + value: T, + ) where + T: Copy + bytemuck::NoUninit, + { + debug_assert!( + offset < self.bytes_used::(), + "attempt to access field out of bounds" + ); + unsafe { + let ordering = if !VOLATILE { + atomic::Ordering::Relaxed + } else { + atomic::Ordering::SeqCst + }; + self.as_address() + .add(offset) + .as_ref::>() + .store(value, ordering); + } + } + pub fn get_field_bool(&self, offset: usize) -> bool { + self.get_field_primitive::(offset) != 0 + } + + pub fn set_field_bool(&self, offset: usize, value: bool) { + self.set_field_primitive::(offset, if value { 1 } else { 0 }); + } + + pub fn get_field_u8(&self, offset: usize) -> u8 { + self.get_field_primitive::(offset) + } + + pub fn set_field_u8(&self, offset: usize, value: u8) { + self.set_field_primitive::(offset, value); + } + + pub fn get_field_u16(&self, offset: usize) -> u16 { + self.get_field_primitive::(offset) + } + + pub fn set_field_u16(&self, offset: usize, value: u16) { + self.set_field_primitive::(offset, value); + } + + pub fn get_field_u32(&self, offset: usize) -> u32 { + self.get_field_primitive::(offset) + } + + pub fn set_field_u32(&self, offset: usize, value: u32) { + self.set_field_primitive::(offset, value); + } + + pub fn get_field_u64(&self, offset: usize) -> u64 { + self.get_field_primitive::(offset) + } + + pub fn set_field_u64(&self, offset: usize, value: u64) { + self.set_field_primitive::(offset, value); + } + + pub fn get_field_i8(&self, offset: usize) -> i8 { + self.get_field_primitive::(offset) + } + + pub fn set_field_i8(&self, offset: usize, value: i8) { + self.set_field_primitive::(offset, value); + } + + pub fn get_field_i16(&self, offset: usize) -> i16 { + self.get_field_primitive::(offset) + } + + pub fn set_field_i16(&self, offset: usize, value: i16) { + self.set_field_primitive::(offset, value); + } + + pub fn get_field_i32(&self, offset: usize) -> i32 { + self.get_field_primitive::(offset) + } + + pub fn set_field_i32(&self, offset: usize, value: i32) { + self.set_field_primitive::(offset, value); + } + + pub fn get_field_i64(&self, offset: usize) -> i64 { + self.get_field_primitive::(offset) + } + + pub fn set_field_i64(&self, offset: usize, value: i64) { + self.set_field_primitive::(offset, value); + } + + pub fn get_field_f32(&self, offset: usize) -> f32 { + self.get_field_primitive::(offset) + } + + pub fn set_field_f32(&self, offset: usize, value: f32) { + self.set_field_primitive::(offset, value); + } + + pub fn get_field_f64(&self, offset: usize) -> f64 { + self.get_field_primitive::(offset) + } + + pub fn set_field_f64(&self, offset: usize, value: f64) { + self.set_field_primitive::(offset, value); + } + + pub fn get_field_isize(&self, offset: usize) -> isize { + self.get_field_primitive::(offset) + } + + pub fn set_field_isize(&self, offset: usize, value: isize) { + self.set_field_primitive::(offset, value); + } + + pub fn get_field_usize(&self, offset: usize) -> usize { + self.get_field_primitive::(offset) + } + + pub fn set_field_usize(&self, offset: usize, value: usize) { + self.set_field_primitive::(offset, value); + } + + pub unsafe fn set_field_object_no_write_barrier( + &self, + offset: usize, + value: VMKitObject, + ) { + self.set_field_primitive::(offset, value.as_address().as_usize()); + } + + pub fn slot_at(&self, offset: usize) -> VM::Slot { + VM::Slot::from_address(self.as_address() + offset) + } + + pub fn set_field_object( + self, + offset: usize, + value: VMKitObject, + ) { + let tls = Thread::::current(); + MemoryManager::object_reference_write_pre(tls, self, self.slot_at::(offset), value); + unsafe { + self.set_field_object_no_write_barrier::(offset, value); + } + MemoryManager::object_reference_write_post(tls, self, self.slot_at::(offset), value); + } + + /// Same as [`set_field_object`](Self::set_field_object) but sets + /// tagged value instead of object address. Accepts object address as a last + /// parameter to perform write barrier. + pub fn set_field_object_tagged( + self, + offset: usize, + value_to_set: usize, + object: VMKitObject, + ) { + let tls = Thread::::current(); + MemoryManager::object_reference_write_pre(tls, self, self.slot_at::(offset), object); + + self.set_field_primitive::(offset, value_to_set); + + MemoryManager::object_reference_write_post(tls, self, self.slot_at::(offset), object); + } + + pub fn get_field_object( + self, + offset: usize, + ) -> VMKitObject { + unsafe { + let addr = Address::from_usize(self.get_field_primitive::(offset)); + VMKitObject::from_address(addr) + } + } + + pub fn get_field_narrow( + self, + offset: usize, + ) -> VMKitNarrow { + unsafe { VMKitNarrow::from_raw(self.get_field_primitive::(offset)) } + } + + pub fn set_field_narrow( + self, + offset: usize, + value: VMKitNarrow, + ) { + let tls = Thread::::current(); + MemoryManager::object_reference_write_pre( + tls, + self, + self.slot_at::(offset), + value.to_object(), + ); + self.set_field_primitive::(offset, value.raw()); + MemoryManager::object_reference_write_post( + tls, + self, + self.slot_at::(offset), + value.to_object(), + ); + } + + pub unsafe fn set_field_narrow_no_write_barrier( + self, + offset: usize, + value: VMKitNarrow, + ) { + self.set_field_primitive::(offset, value.raw()); + } + + pub fn set_field_narrow_tagged( + self, + offset: usize, + value_to_set: u32, + object: VMKitNarrow, + ) { + let tls = Thread::::current(); + MemoryManager::object_reference_write_pre( + tls, + self, + self.slot_at::(offset), + object.to_object(), + ); + self.set_field_primitive::(offset, value_to_set); + MemoryManager::object_reference_write_post( + tls, + self, + self.slot_at::(offset), + object.to_object(), + ); + } +} + +/// Used as a parameter of `move_object` to specify where to move an object to. +pub enum MoveTarget { + /// Move an object to the address returned from `alloc_copy`. + ToAddress(Address), + /// Move an object to an `VMKitObject` pointing to an object previously computed from + /// `get_reference_when_copied_to`. + ToObject(VMKitObject), +} + +/// Narrow pointer to an object. This is used when pointer compression +/// is enabled. +#[repr(transparent)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VMKitNarrow(u32); + +impl VMKitNarrow { + pub const NULL: Self = Self(0); + + /// Return the raw value of the narrow pointer. + pub const fn raw(self) -> u32 { + self.0 + } + + pub fn from_object(object: VMKitObject) -> Self { + Self::encode(object) + } + + pub fn encode(object: VMKitObject) -> Self { + CompressedOps::encode(object) + } + + pub fn decode(self) -> VMKitObject { + CompressedOps::decode(self) + } + + /// Create a new `VMKitNarrow` from a raw value. + /// + /// # Safety + /// + /// This function is unsafe because it assumes that the raw value is a valid narrow pointer. + pub unsafe fn from_raw(raw: u32) -> Self { + Self(raw) + } + + pub const fn is_null(self) -> bool { + self.raw() == 0 + } + + pub fn to_address(self) -> Address { + CompressedOps::decode(self).as_address() + } + + pub fn to_object(self) -> VMKitObject { + CompressedOps::decode(self) + } + + pub fn header<'a, VM: VirtualMachine>(&'a self) -> &'a HeapObjectHeader { + self.to_object().header::() + } + + pub fn hashcode(&self) -> usize { + self.to_object().hashcode::() + } + + pub fn object_start(&self) -> Address { + self.to_object().object_start::() + } +} diff --git a/vmkit/src/options.rs b/vmkit/src/options.rs new file mode 100644 index 0000000..efbd31f --- /dev/null +++ b/vmkit/src/options.rs @@ -0,0 +1,33 @@ +use std::sync::LazyLock; + +use clap::Parser; + +#[derive(Parser)] +pub struct Options { + #[cfg(feature = "cooperative")] + #[clap(long, default_value_t = true)] + pub conservative_stacks: bool, + + /// Maximum amount of bytes to scan for interior pointers. + #[clap(long, default_value_t = 1024)] + pub interior_pointer_max_bytes: usize, + + #[clap(long, default_value_t = true)] + pub aslr: bool, + + #[clap(long, default_value_t = true)] + pub compressed_pointers: bool, + /// If true, the VM will guarantee two bits for the tag of compressed pointers. + /// + /// Requires MIN_ALIGNMENT to be 16. + #[clap(long, default_value_t = true)] + pub tag_compressed_pointers: bool, + + #[clap(long, default_value_t = 256 * 1024 * 1024)] + pub max_heap_size: usize, + + #[clap(long, default_value_t = 64 * 1024 * 1024)] + pub min_heap_size: usize, +} + +pub static OPTIONS: LazyLock = LazyLock::new(Options::parse); diff --git a/vmkit/src/sync.rs b/vmkit/src/sync.rs new file mode 100644 index 0000000..340733f --- /dev/null +++ b/vmkit/src/sync.rs @@ -0,0 +1,195 @@ +use std::{ + mem::ManuallyDrop, + num::NonZeroU64, + ops::Deref, + sync::atomic::{AtomicU64, AtomicUsize, Ordering}, + time::Duration, +}; + +use parking_lot::{Condvar, Mutex, MutexGuard, WaitTimeoutResult}; + +use crate::{ + threading::{parked_scope, Thread, ThreadContext}, + VirtualMachine, +}; + +fn get_thread_id() -> NonZeroU64 { + thread_local! { + static KEY: u64 = 0; + } + KEY.with(|x| { + NonZeroU64::new(x as *const _ as u64).expect("thread-local variable address is null") + }) +} + +/// Implementation of a heavy lock and condition variable implemented using +/// the primitives available from the `parking_lot`. Currently we use +/// a `Mutex` and `Condvar`. +///

+/// It is perfectly safe to use this throughout the VM for locking. It is +/// meant to provide roughly the same functionality as ReentrantMutex combined with Condvar, +/// except: +///

    +///
  • This struct provides a faster slow path than ReentrantMutex.
  • +///
  • This struct provides a slower fast path than ReentrantMutex.
  • +///
  • This struct will work in the inner guts of the VM runtime because +/// it gives you the ability to lock and unlock, as well as wait and +/// notify, without using any other VM runtime functionality.
  • +///
  • This struct allows you to optionally block without letting the thread +/// system know that you are blocked. The benefit is that you can +/// perform synchronization without depending on VM thread subsystem functionality. +/// However, most of the time, you should use the methods that inform +/// the thread system that you are blocking. Methods that have the +/// `with_handshake` suffix will inform the thread system if you are blocked, +/// while methods that do not have the suffix will either not block +/// (as is the case with `unlock()` and `broadcast()`) +/// or will block without letting anyone know (like `lock_no_handshake()` +/// and `wait_no_handshake()`). Not letting the threading +/// system know that you are blocked may cause things like GC to stall +/// until you unblock.
  • +///
  • This struct does not provide mutable access to the protected data as it is unsound, +/// instead use `RefCell` to mutate the protected data.
  • +///
+pub struct Monitor { + mutex: Mutex, + cvar: Condvar, + rec_count: AtomicUsize, + holder: AtomicU64, +} + +impl Monitor { + pub fn new(value: T) -> Self { + Self { + mutex: Mutex::new(value), + cvar: Condvar::new(), + rec_count: AtomicUsize::new(0), + holder: AtomicU64::new(0), + } + } + + pub fn lock_no_handshake(&self) -> MonitorGuard { + let my_slot = get_thread_id().get(); + let guard = if self.holder.load(Ordering::Relaxed) != my_slot { + let guard = self.mutex.lock(); + self.holder.store(my_slot, Ordering::Release); + MonitorGuard { + monitor: self, + guard: ManuallyDrop::new(guard), + } + } else { + MonitorGuard { + monitor: self, + guard: unsafe { ManuallyDrop::new(self.mutex.make_guard_unchecked()) }, + } + }; + self.rec_count.fetch_add(1, Ordering::Relaxed); + guard + } + + pub fn notify(&self) { + self.cvar.notify_one(); + } + + pub fn notify_all(&self) { + self.cvar.notify_all(); + } + + pub unsafe fn relock_with_handshake( + &self, + rec_count: usize, + ) -> MonitorGuard<'_, T> { + let thread = Thread::::current(); + thread.context.save_thread_state(); + let guard = loop { + Thread::::enter_native(); + let lock = self.mutex.lock(); + if Thread::::attempt_leave_native_no_block() { + break lock; + } else { + drop(lock); + Thread::::leave_native(); + } + }; + + self.holder.store(get_thread_id().get(), Ordering::Relaxed); + self.rec_count.store(rec_count, Ordering::Relaxed); + MonitorGuard { + monitor: self, + guard: ManuallyDrop::new(guard), + } + } +} + +pub struct MonitorGuard<'a, T> { + monitor: &'a Monitor, + guard: ManuallyDrop>, +} + +impl Deref for MonitorGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.guard + } +} + +impl<'a, T> MonitorGuard<'a, T> { + pub fn wait_no_handshake(&mut self) { + let rec_count = self.monitor.rec_count.swap(0, Ordering::Relaxed); + let holder = self.monitor.holder.swap(0, Ordering::Relaxed); + self.monitor.cvar.wait(&mut self.guard); + self.monitor.rec_count.store(rec_count, Ordering::Relaxed); + self.monitor.holder.store(holder, Ordering::Relaxed); + } + + pub fn wait_for_no_handshake(&mut self, timeout: Duration) -> WaitTimeoutResult { + let rec_count = self.monitor.rec_count.swap(0, Ordering::Relaxed); + let holder = self.monitor.holder.swap(0, Ordering::Relaxed); + let result = self.monitor.cvar.wait_for(&mut self.guard, timeout); + self.monitor.rec_count.store(rec_count, Ordering::Relaxed); + self.monitor.holder.store(holder, Ordering::Relaxed); + result + } + + pub fn notify(&self) { + self.monitor.cvar.notify_one(); + } + + pub fn notify_all(&self) { + self.monitor.cvar.notify_all(); + } + + pub fn monitor(&self) -> &Monitor { + self.monitor + } + + pub unsafe fn unlock_completely(&mut self) -> usize { + let result = self.monitor.rec_count.load(Ordering::Relaxed); + self.monitor.rec_count.store(0, Ordering::Relaxed); + self.monitor.holder.store(0, Ordering::Relaxed); + unsafe { + ManuallyDrop::drop(&mut self.guard); + } + result + } + + pub fn wait_with_handshake(mut self) -> Self { + let t = Thread::::current(); + t.context.save_thread_state(); + let rec_count = parked_scope::(|| { + self.wait_no_handshake(); + let rec_count = unsafe { self.unlock_completely() }; + rec_count + }); + unsafe { self.monitor.relock_with_handshake::(rec_count) } + } +} + +impl<'a, T> Drop for MonitorGuard<'a, T> { + fn drop(&mut self) { + if self.monitor.rec_count.fetch_sub(1, Ordering::Relaxed) == 1 { + self.monitor.holder.store(0, Ordering::Relaxed); + unsafe { ManuallyDrop::drop(&mut self.guard) }; + } + } +} diff --git a/vmkit/src/threading.rs b/vmkit/src/threading.rs new file mode 100644 index 0000000..197b58a --- /dev/null +++ b/vmkit/src/threading.rs @@ -0,0 +1,1547 @@ +use std::{ + cell::{Cell, OnceCell, RefCell, UnsafeCell}, + mem::{offset_of, MaybeUninit}, + panic::AssertUnwindSafe, + sync::{ + atomic::{AtomicBool, AtomicI32, AtomicI8, AtomicU64, AtomicUsize, Ordering}, + Arc, + }, + thread::JoinHandle, +}; + +use atomic::Atomic; +use mmtk::{ + util::{Address, VMMutatorThread, VMThread}, + vm::RootsWorkFactory, + BarrierSelector, Mutator, +}; + +use crate::{ + mm::{ + conservative_roots::ConservativeRoots, stack_bounds::StackBounds, tlab::TLAB, MemoryManager, + }, + object_model::compression::CompressedOps, + sync::{Monitor, MonitorGuard}, + VirtualMachine, +}; + +/// Threads use a state machine to indicate their current state and how they should +/// be treated in case of asynchronous requests like GC. The state machine has +/// several states and legal transitions between them. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +#[repr(u8)] +pub enum ThreadState { + /// Thread has not yet started. This state holds right up until just before we + /// call `thread::spawn()`. + #[default] + New, + /// Thread is executing "normal" Managed code + InManaged, + /// A state used by the scheduler to mark that a thread is in privileged code + /// that does not need to synchronize with the collector. This is a special + /// state. As well, this state should only be entered unsafe code only. Typically, + /// this state is entered using a call to `enter_native()` just prior to idling + /// the thread; though it would not be wrong to enter it prior to any other + /// long-running activity that does not require interaction with the GC. + InNative, + /// Thread is in Managed code but is expected to block. The transition from InManaged + /// to InManagedToBlock happens as a result of an asynchronous call by the GC + /// or any other internal VM code that requires this thread to perform an + /// asynchronous activity (another example is the request to do an isync on RISCV64). + /// The point is that we're waiting for the thread to reach a safe point and + /// expect this to happen in bounded time; but if the thread were to escape to + /// native we want to know about it. Thus, transitions into native code while + /// in the InManagedToBlock state result in a notification (broadcast on the + /// thread's monitor) and a state change to BlockedInNative. Observe that it + /// is always safe to conservatively change InManaged to InManagedToBlock. + InManagedToBlock, + /// Thread is in native code, and is to block before returning to Managed code. + /// The transition from InNative to BlockedInNative happens as a result + /// of an asynchronous call by the GC or any other internal VM code that + /// requires this thread to perform an asynchronous activity (another example + /// is the request to do an isync on RISCV64). As well, code entering privileged + /// code that would otherwise go from InManaged to InNative will go to + /// BlockedInNative instead, if the state was InManagedToBlock. + ///

+ /// The point of this state is that the thread is guaranteed not to execute + /// any Managed code until: + ///

    + ///
  1. The state changes to InNative, and + ///
  2. The thread gets a broadcast on its monitor. + ///
+ /// Observe that it is always safe to conservatively change InNative to + /// BlockedInNative. + BlockedInNative, + /// Thread has died. As in, it's no longer executing any Managed code and will + /// never do so in the future. Once this is set, the GC will no longer mark any + /// part of the thread as live; the thread's stack will be deleted. Note that + /// this is distinct from the about_to_terminate state. + Terminated, +} + +impl ThreadState { + pub fn not_running(&self) -> bool { + matches!(self, ThreadState::New | ThreadState::Terminated) + } +} + +unsafe impl bytemuck::NoUninit for ThreadState {} + +pub trait ThreadContext { + fn save_thread_state(&self); + /// Scan roots in the thread. + fn scan_roots(&self, factory: impl RootsWorkFactory); + /// Scan conservative roots in the thread. + /// + /// Note that you can also do this in `scan_roots` by constructing + /// `ConservativeRoots` manually, but this method is provided + /// to separate precise vs conservative roots code. + fn scan_conservative_roots(&self, croots: &mut ConservativeRoots); + + fn new(collector_context: bool) -> Self; + + /// Pre-yieldpoint hook. Invoked by the VM before entering [`yieldpoint`](Thread::yieldpoint) + /// and before locking the thread's monitor. + fn pre_yieldpoint(&self, where_from: i32, fp: Address) { + let _ = where_from; + let _ = fp; + } + + /// Yieldpoint hook. Invoked by the VM when the thread is at a yieldpoint. + /// + /// Thread's monitor is locked when this is called. + /// + /// `before_block` is true if this function was invoked before the thread blocks. + fn at_yieldpoint(&self, where_from: i32, fp: Address, before_block: bool) { + let _ = where_from; + let _ = fp; + let _ = before_block; + } + + /// Post-yieldpoint hook. Invoked by the VM after exiting [`yieldpoint`](Thread::yieldpoint) + /// and after unlocking the thread's monitor. + fn post_yieldpoint(&self, where_from: i32, fp: Address) { + let _ = where_from; + let _ = fp; + } +} + +static ID: AtomicU64 = AtomicU64::new(0); + +/// A generic thread's execution context. +pub struct Thread { + /// Should the next executed yieldpoint be taken? Can be true for a variety of + /// reasons. See [Thread::yieldpoint]. + ///

+ /// To support efficient sampling of only prologue/epilogues we also encode + /// some extra information into this field. 0 means that the yieldpoint should + /// not be taken. >0 means that the next yieldpoint of any type should be taken + /// <0 means that the next prologue/epilogue yieldpoint should be taken + ///

+ /// Note the following rules: + ///

    + ///
  1. If take_yieldpoint is set to 0 or -1 it is perfectly safe to set it to + /// 1; this will have almost no effect on the system. Thus, setting + /// take_yieldpoint to 1 can always be done without acquiring locks.
  2. + ///
  3. Setting take_yieldpoint to any value other than 1 should be done while + /// holding the thread's monitor().
  4. + ///
  5. The exception to rule (2) above is that the yieldpoint itself sets + /// take_yieldpoint to 0 without holding a lock - but this is done after it + /// ensures that the yieldpoint is deferred by setting yieldpoint_request_pending + /// to true. + ///
+ take_yieldpoint: AtomicI32, + + pub context: VM::ThreadContext, + pub tlab: UnsafeCell, + max_non_los_default_alloc_bytes: Cell, + barrier: Cell, + mmtk_mutator: UnsafeCell>>>, + has_collector_context: AtomicBool, + exec_status: Atomic, + ignore_handshakes_and_gc: AtomicBool, + /// Is the thread about to terminate? Protected by the thread's monitor. Note + /// that this field is not set atomically with the entering of the thread onto + /// the aboutToTerminate array - in fact it happens before that. When this + /// field is set to true, the thread's stack will no longer be scanned by GC. + /// Note that this is distinct from the [`Terminated`](ThreadState::Terminated) state. + is_about_to_terminate: AtomicBool, + /// Is this thread in the process of blocking? + is_blocking: AtomicBool, + /// Is the thread no longer executing user code? Protected by the monitor + /// associated with the Thread. + is_joinable: AtomicBool, + thread_id: u64, + index_in_manager: AtomicUsize, + + yieldpoints_enabled_count: AtomicI8, + yieldpoint_request_pending: AtomicBool, + at_yieldpoint: AtomicBool, + soft_handshake_requested: AtomicBool, + active_mutator_context: AtomicBool, + + is_blocked_for_gc: AtomicBool, + should_block_for_gc: AtomicBool, + /// The monitor of the thread. Protects access to the thread's state. + monitor: Monitor<()>, + communication_lock: Monitor<()>, + stack_bounds: OnceCell, +} + +unsafe impl Send for Thread {} +unsafe impl Sync for Thread {} + +impl Thread { + /// Create a new thread without checking threa type. + /// + /// # Safety + /// + /// This function is unsafe because it does not enforce a "thread" type + /// invariant. You can create only three types of threads: + /// 1) Collector threads: MMTk workers, they are actually created by VMKit itself. + /// 2) Mutators: created by the user (apart from main thread) + /// 3) Concurrent threads: these threads are created by the user and are not mutators nor collector threads. + /// These threads do not allow access to heap objects, they can be used to perform blocking tasks. + /// + /// If thread type is mismatched, the behavior is undefined: it could lead to deadlock, leak or memory corruption + /// as we optimize thread creation. + pub unsafe fn new_unchecked( + ctx: Option, + collector_context: bool, + is_mutator: bool, + ) -> Arc { + let vm = VM::get(); + if collector_context { + assert!( + !vm.vmkit().are_collector_threads_spawned(), + "Attempt to create a collector thread after collector threads have been spawned" + ); + assert!(!is_mutator, "Collector threads cannot be mutators"); + } + + Arc::new(Self { + tlab: UnsafeCell::new(TLAB::new()), + stack_bounds: OnceCell::new(), + barrier: Cell::new(BarrierSelector::NoBarrier), + max_non_los_default_alloc_bytes: Cell::new(0), + take_yieldpoint: AtomicI32::new(0), + context: ctx.unwrap_or_else(|| VM::ThreadContext::new(collector_context)), + mmtk_mutator: UnsafeCell::new(MaybeUninit::uninit()), + has_collector_context: AtomicBool::new(collector_context), + exec_status: Atomic::new(ThreadState::New), + ignore_handshakes_and_gc: AtomicBool::new(!is_mutator || collector_context), + is_about_to_terminate: AtomicBool::new(false), + is_blocking: AtomicBool::new(false), + is_joinable: AtomicBool::new(false), + thread_id: ID.fetch_add(1, Ordering::SeqCst), + index_in_manager: AtomicUsize::new(0), + yieldpoints_enabled_count: AtomicI8::new(0), + yieldpoint_request_pending: AtomicBool::new(false), + at_yieldpoint: AtomicBool::new(false), + soft_handshake_requested: AtomicBool::new(false), + active_mutator_context: AtomicBool::new(is_mutator), + is_blocked_for_gc: AtomicBool::new(false), + should_block_for_gc: AtomicBool::new(false), + monitor: Monitor::new(()), + communication_lock: Monitor::new(()), + }) + } + + pub fn for_mutator(context: VM::ThreadContext) -> Arc { + unsafe { Self::new_unchecked(Some(context), false, true) } + } + + pub(crate) fn for_collector() -> Arc { + unsafe { Self::new_unchecked(None, true, false) } + } + + pub fn for_concurrent(context: VM::ThreadContext) -> Arc { + unsafe { + let thread = Self::new_unchecked(Some(context), false, false); + thread.set_ignore_handshakes_and_gc(true); + thread + } + } + + /*pub(crate) fn start_gc(self: &Arc, ctx: Box>>) { + unsafe { + self.set_exec_status(ThreadState::InNative); + let this = self.clone(); + std::thread::spawn(move || { + let vmkit = VM::get().vmkit(); + init_current_thread(this.clone()); + vmkit.thread_manager().add_thread(this.clone()); + mmtk::memory_manager::start_worker( + &vmkit.mmtk, + mmtk::util::VMWorkerThread(this.to_vm_thread()), + ctx, + ); + }); + } + }*/ + + /// Start execution of `self` by creating and starting a native thread. + pub fn start(self: &Arc, f: F) -> JoinHandle> + where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, + { + unsafe { + self.set_exec_status(ThreadState::InManaged); + let this = self.clone(); + std::thread::spawn(move || this.startoff(f)) + } + } + + pub fn to_vm_thread(&self) -> VMThread { + unsafe { std::mem::transmute(self) } + } + + pub fn stack_bounds(&self) -> &StackBounds { + self.stack_bounds.get().unwrap() + } + + pub fn barrier(&self) -> BarrierSelector { + self.barrier.get() + } + + pub fn max_non_los_default_alloc_bytes(&self) -> usize { + self.max_non_los_default_alloc_bytes.get() + } + + /// Begin execution of current thread by calling `run` method + /// on the provided context. + fn startoff(self: &Arc, f: F) -> Option + where + F: FnOnce() -> R, + R: Send + 'static, + { + init_current_thread(self.clone()); + let constraints = VM::get().vmkit().mmtk.get_plan().constraints(); + self.max_non_los_default_alloc_bytes + .set(constraints.max_non_los_default_alloc_bytes); + self.barrier.set(constraints.barrier); + self.stack_bounds + .set(StackBounds::current_thread_stack_bounds()) + .unwrap(); + let vmkit = VM::get().vmkit(); + if !self.is_collector_thread() && !self.ignore_handshakes_and_gc() { + let mutator = mmtk::memory_manager::bind_mutator( + &vmkit.mmtk, + VMMutatorThread(Self::current().to_vm_thread()), + ); + unsafe { self.mmtk_mutator.get().write(MaybeUninit::new(*mutator)) }; + self.enable_yieldpoints(); + } + vmkit.thread_manager.add_thread(self.clone()); + + let _result = std::panic::catch_unwind(AssertUnwindSafe(|| f())); + + self.terminate(); + + _result.ok() + } + + fn terminate(&self) { + self.is_joinable.store(true, Ordering::Relaxed); + self.monitor.notify_all(); + self.add_about_to_terminate(); + } + + pub fn main(context: VM::ThreadContext, f: F) -> Option + where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, + { + let this = Thread::for_mutator(context); + + init_current_thread(this.clone()); + unsafe { this.set_exec_status(ThreadState::InManaged) }; + let vmkit = VM::get().vmkit(); + vmkit.thread_manager.add_main_thread(this.clone()); + mmtk::memory_manager::initialize_collection(&vmkit.mmtk, this.to_vm_thread()); + CompressedOps::init::(); + let _result = this.startoff(f); + _result + } + + /// Offset of the `take_yieldpoint` field. + /// + /// You can use this offset to emit code to check for the yieldpoint + /// inline, without calling `get_take_yieldpoint()`. + pub const TAKE_YIELDPOINT_OFFSET: usize = offset_of!(Thread, take_yieldpoint); + + pub fn get_exec_status(&self) -> ThreadState { + self.exec_status.load(atomic::Ordering::Relaxed) + } + + pub fn id(&self) -> u64 { + self.thread_id + } + + /// Check if the thread is ignoring handshakes and GC. + pub fn ignore_handshakes_and_gc(&self) -> bool { + self.ignore_handshakes_and_gc + .load(atomic::Ordering::Relaxed) + } + + /// Allow thread to ignore handshakes and GC which allows it to run + /// concurrently to GC and other runtime functionality. + /// + /// # Safety + /// + /// This removes thread from root-set. All objects stored by this thread + /// must be rooted in some other way. + pub unsafe fn set_ignore_handshakes_and_gc(&self, ignore: bool) { + self.ignore_handshakes_and_gc + .store(ignore, atomic::Ordering::Relaxed); + } + + /// Set the execution status of the thread. + /// + /// # SAFETY + /// + /// This function is unsafe because it does not enforce the thread's monitor + /// nor does it notify VM of the state change. + /// + /// Use this function only when you are holding [`monitor()`](Self::monitor) lock. + pub unsafe fn set_exec_status(&self, status: ThreadState) { + self.exec_status.store(status, atomic::Ordering::Relaxed); + } + + /// Attempt to transition the execution status of the thread. + /// + /// # SAFETY + /// + /// This function is unsafe because it does not enforce the thread's monitor + /// nor does it notify VM of the state change. + /// + /// Use this function only when you are holding [`monitor()`](Self::monitor) lock. + pub unsafe fn attempt_fast_exec_status_transition( + &self, + old_state: ThreadState, + new_state: ThreadState, + ) -> bool { + self.exec_status + .compare_exchange( + old_state, + new_state, + atomic::Ordering::Relaxed, + atomic::Ordering::Relaxed, + ) + .is_ok() + } + + pub fn is_about_to_terminate(&self) -> bool { + self.is_about_to_terminate.load(atomic::Ordering::Relaxed) + } + + /// Should the next executed yieldpoint be taken? Can be true for a variety of + /// reasons. See [Thread::yieldpoint]. + ///

+ /// To support efficient sampling of only prologue/epilogues we also encode + /// some extra information into this field. 0 means that the yieldpoint should + /// not be taken. >0 means that the next yieldpoint of any type should be taken + /// <0 means that the next prologue/epilogue yieldpoint should be taken + ///

+ /// Note the following rules: + ///

    + ///
  1. If take_yieldpoint is set to 0 or -1 it is perfectly safe to set it to + /// 1; this will have almost no effect on the system. Thus, setting + /// take_yieldpoint to 1 can always be done without acquiring locks.
  2. + ///
  3. Setting take_yieldpoint to any value other than 1 should be done while + /// holding the thread's monitor().
  4. + ///
  5. The exception to rule (2) above is that the yieldpoint itself sets + /// take_yieldpoint to 0 without holding a lock - but this is done after it + /// ensures that the yieldpoint is deferred by setting yieldpoint_request_pending + /// to true. + ///
+ /// + /// # Returns + /// + /// Returns the value of `take_yieldpoint`. + pub fn take_yieldpoint(&self) -> i32 { + self.take_yieldpoint.load(atomic::Ordering::Relaxed) + } + + /// Set the take yieldpoint value. + /// + /// # SAFETY + /// + /// It is always safe to change `take_yieldpoint` to any value as [`yieldpoint()`](Self::yieldpoint) + /// will reset it to 0. Any behaviour exeucted inside `yieldpoint` requires one of block + /// adapters to acknowledge the yieldpoint request and this can't be done by solely calling `set_take_yieldpoint`. + pub fn set_take_yieldpoint(&self, value: i32) { + self.take_yieldpoint.store(value, atomic::Ordering::Relaxed); + } + + pub fn monitor(&self) -> &Monitor<()> { + &self.monitor + } + + pub fn communication_lock(&self) -> &Monitor<()> { + &self.communication_lock + } + + /// Check if the thread has block requests (for example, for suspension and GC). + /// If it does, clear the requests and marked the thread as blocked for that request. + /// If there were any block requests, do a notify_all() on the thread's monitor(). + /// + /// This is an internal method and should only be called from code that implements + /// thread blocking. The monitor() lock must be held for this method to work properly. + fn acknowledge_block_requests(&self) { + let had_some = VM::BlockAdapterList::acknowledge_block_requests(self); + if had_some { + self.monitor.notify_all(); + } + } + + /// Checks if the thread system has acknowledged that the thread is supposed + /// to be blocked. This will return true if the thread is actually blocking, or + /// if the thread is running native code but is guaranteed to block before + /// returning to Java. Only call this method when already holding the monitor(), + /// for two reasons: + /// + /// 1. This method does not acquire the monitor() lock even though it needs + /// to have it acquired given the data structures that it is accessing. + /// 2. You will typically want to call this method to decide if you need to + /// take action under the assumption that the thread is blocked (or not + /// blocked). So long as you hold the lock the thread cannot change state from + /// blocked to not blocked. + /// + /// # Returns + /// + /// Returns `true` if the thread is supposed to be blocked. + pub fn is_blocked(&self) -> bool { + VM::BlockAdapterList::is_blocked(self) + } + + /// Checks if the thread is executing managed code. A thread is executing managed + /// code if its `exec_status` is `InManaged` or `InManagedToBlock`, and if it is not + /// `about_to_terminate`, and if it is not blocked. Only call this method when already holding + /// the monitor(), and probably only after calling `set_blocked_exec_status()`, for two reasons: + /// 1. This method does not acquire the monitor() lock even though it needs + /// to have it acquired given the data structures that it is accessing. + /// 2. You will typically want to call this method to decide if you need to + /// take action under the assumption that the thread is running managed code (or not + /// running managed code). So long as you hold the lock - and you have called + /// `set_blocked_exec_status()` - the thread cannot change state from running-managed + /// to not-running-managed. + /// + /// # Returns + /// + /// Returns `true` if the thread is running managed code. + pub fn is_in_managed(&self) -> bool { + !self.is_blocking.load(atomic::Ordering::Relaxed) + && !self.is_about_to_terminate.load(atomic::Ordering::Relaxed) + && (self.get_exec_status() == ThreadState::InManaged + || self.get_exec_status() == ThreadState::InManagedToBlock) + } + + fn check_block_no_save_context(&self) { + let mut lock = self.monitor().lock_no_handshake(); + self.is_blocking.store(true, Ordering::Relaxed); + + loop { + // deal with block requests + self.acknowledge_block_requests(); + // are we blocked? + if !self.is_blocked() { + break; + } + // what if a GC request comes while we're here for a suspend() + // request? + // answer: we get awoken, reloop, and acknowledge the GC block + // request. + lock.wait_no_handshake(); + } + + // SAFETY: We are holding the monitor lock. + unsafe { + self.set_exec_status(ThreadState::InManaged); + } + self.is_blocking.store(false, Ordering::Relaxed); + drop(lock); + } + + /// Check if the thread is supposed to block, and if so, block it. This method + /// will ensure that soft handshake requests are acknowledged or else + /// inhibited, that any blocking request is handled, that the execution state + /// of the thread (`exec_status`) is set to `InManaged` + /// once all blocking requests are cleared, and that other threads are notified + /// that this thread is in the middle of blocking by setting the appropriate + /// flag (`is_blocking`). Note that this thread acquires the + /// monitor(), though it may release it completely either by calling wait() or + /// by calling unlock_completely(). Thus, although it isn't generally a problem + /// to call this method while holding the monitor() lock, you should only do so + /// if the loss of atomicity is acceptable. + /// + /// Generally, this method should be called from the following four places: + /// + /// 1. The block() method, if the thread is requesting to block itself. + /// Currently such requests only come when a thread calls suspend(). Doing so + /// has unclear semantics (other threads may call resume() too early causing + /// a race) but must be supported because it's still part of the + /// JDK. Why it's safe: the block() method needs to hold the monitor() for the + /// time it takes it to make the block request, but does not need to continue + /// to hold it when it calls check_block(). Thus, the fact that check_block() + /// breaks atomicity is not a concern. + /// + /// 2. The yieldpoint. One of the purposes of a yieldpoint is to periodically + /// check if the current thread should be blocked. This is accomplished by + /// calling check_block(). Why it's safe: the yieldpoint performs several + /// distinct actions, all of which individually require the monitor() lock - + /// but the monitor() lock does not have to be held contiguously. Thus, the + /// loss of atomicity from calling check_block() is fine. + /// + /// 3. The "with_handshake" methods of HeavyCondLock. These methods allow you to + /// block on a mutex or condition variable while notifying the system that you + /// are not executing managed code. When these blocking methods return, they check + /// if there had been a request to block, and if so, they call check_block(). + /// Why it's safe: This is subtle. Two cases exist. The first case is when a + /// WithHandshake method is called on a HeavyCondLock instance that is not a thread + /// monitor(). In this case, it does not matter that check_block() may acquire + /// and then completely release the monitor(), since the user was not holding + /// the monitor(). However, this will break if the user is *also* holding + /// the monitor() when calling the WithHandshake method on a different lock. This case + /// should never happen because no other locks should ever be acquired when the + /// monitor() is held. Additionally: there is the concern that some other locks + /// should never be held while attempting to acquire the monitor(); the + /// HeavyCondLock ensures that checkBlock() is only called when that lock + /// itself is released. The other case is when a WithHandshake method is called on the + /// monitor() itself. This should only be done when using *your own* + /// monitor() - that is the monitor() of the thread your are running on. In + /// this case, the WithHandshake methods work because: (i) lock_with_handshake() only calls + /// check_block() on the initial lock entry (not on recursive entry), so + /// atomicity is not broken, and (ii) wait_with_handshake() and friends only call + /// check_block() after wait() returns - at which point it is safe to release + /// and reacquire the lock, since there cannot be a race with broadcast() once + /// we have committed to not calling wait() again. + pub fn check_block(&self) { + self.context.save_thread_state(); + self.check_block_no_save_context(); + } + + fn enter_native_blocked_impl(&self) { + let lock = self.monitor.lock_no_handshake(); + + unsafe { + self.set_exec_status(ThreadState::BlockedInNative); + } + + self.acknowledge_block_requests(); + + drop(lock); + } + + fn leave_native_blocked_impl(&self) { + self.check_block_no_save_context(); + } + + fn enter_native_blocked(&self) { + self.enter_native_blocked_impl(); + } + + fn leave_native_blocked(&self) { + self.leave_native_blocked_impl(); + } + + fn set_blocked_exec_status(&self) -> ThreadState { + let mut old_state; + loop { + old_state = self.get_exec_status(); + let new_state = match old_state { + ThreadState::InManaged => ThreadState::InManagedToBlock, + ThreadState::InNative => ThreadState::BlockedInNative, + _ => old_state, + }; + + if unsafe { self.attempt_fast_exec_status_transition(old_state, new_state) } { + break new_state; + } + } + } + /// Attempts to block the thread, and returns the state it is in after the attempt. + /// + /// If we're blocking ourselves, this will always return `InManaged`. If the thread + /// signals to us the intention to die as we are trying to block it, this will return + /// `Terminated`. NOTE: the thread's exec_status will not actually be `Terminated` at + /// that point yet. + /// + /// # Warning + /// This method is ridiculously dangerous, especially if you pass `asynchronous=false`. + /// Waiting for another thread to stop is not in itself interruptible - so if you ask + /// another thread to block and they ask you to block, you might deadlock. + /// + /// # Safety + /// This function is unsafe because it modifies thread state without synchronization. + /// Other threads might cause a deadlock if current thread blocks itself and other thread + /// calls `resume()`. + /// + /// # Arguments + /// * `asynchronous` - If true, the request is asynchronous (i.e. the receiver is only notified). + /// If false, the caller waits for the receiver to block. + /// + /// # Returns + /// The new state of the thread after the block attempt. + pub fn block_unchecked>(&self, asynchronous: bool) -> ThreadState { + let mut result; + + let mut lock = self.monitor.lock_no_handshake(); + let token = A::request_block(self); + + if current_thread::().thread_id == self.thread_id { + self.check_block(); + result = self.get_exec_status(); + } else { + if self.is_about_to_terminate() { + result = ThreadState::Terminated; + } else { + self.take_yieldpoint.store(1, Ordering::Relaxed); + let new_state = self.set_blocked_exec_status(); + result = new_state; + + self.monitor.notify_all(); + + if new_state == ThreadState::InManagedToBlock { + if !asynchronous { + while A::has_block_request_with_token(self, token) + && !A::is_blocked(self) + && !self.is_about_to_terminate() + { + lock.wait_no_handshake(); + } + + if self.is_about_to_terminate() { + result = ThreadState::Terminated; + } else { + result = self.get_exec_status(); + } + } + } else if new_state == ThreadState::BlockedInNative { + A::clear_block_request(self); + A::set_blocked(self, true); + } + } + } + drop(lock); + result + } + /// Returns whether this thread is currently in a mutator context. + /// + /// A mutator context is one where the thread is actively performing application work, + /// as opposed to being blocked or performing GC-related activities. + pub fn active_mutator_context(&self) -> bool { + self.active_mutator_context.load(atomic::Ordering::Relaxed) + } + + pub fn current() -> &'static Thread { + current_thread::() + } + + pub fn begin_pair_with<'a>( + &'a self, + other: &'a Thread, + ) -> (MonitorGuard<'a, ()>, MonitorGuard<'a, ()>) { + let guard1 = self.communication_lock.lock_no_handshake(); + let guard2 = other.communication_lock.lock_no_handshake(); + (guard1, guard2) + } + + pub fn begin_pair_with_current(&self) -> (MonitorGuard<'_, ()>, MonitorGuard<'_, ()>) { + self.begin_pair_with(current_thread::()) + } + + fn safe_block_impl>(&self, asynchronous: bool) -> ThreadState { + let (guard1, guard2) = self.begin_pair_with_current(); + // SAFETY: threads are paired, no deadlock can occur. + let result = self.block_unchecked::(asynchronous); + drop(guard1); + drop(guard2); + result + } + + /// Safe version of [`block_unchecked`] that blocks the thread synchronously. + /// + /// Prevents race with other threads by pairing current thread with the target thread. + pub fn block>(&self) -> ThreadState { + self.safe_block_impl::(false) + } + + /// Safe version of [`block_unchecked`] that blocks the thread asynchronously. + /// + /// Prevents race with other threads by pairing current thread with the target thread. + pub fn async_block>(&self) -> ThreadState { + self.safe_block_impl::(true) + } + + pub fn is_blocked_for>(&self) -> bool { + A::is_blocked(self) + } + + pub fn enter_native() { + let t = Self::current(); + + let mut old_state; + loop { + old_state = t.get_exec_status(); + let new_state = if old_state == ThreadState::InManaged { + ThreadState::InNative + } else { + t.enter_native_blocked(); + return; + }; + + if unsafe { t.attempt_fast_exec_status_transition(old_state, new_state) } { + break; + } + } + } + + #[must_use = "If thread can't leave native state without blocking, call [leave_native](Thread::leave_native) instead"] + pub fn attempt_leave_native_no_block() -> bool { + let t = Self::current(); + + let mut old_state; + loop { + old_state = t.get_exec_status(); + let new_state = if old_state == ThreadState::InNative { + ThreadState::InManaged + } else { + return false; + }; + + if unsafe { t.attempt_fast_exec_status_transition(old_state, new_state) } { + break true; + } + } + } + + pub fn leave_native() { + if !Self::attempt_leave_native_no_block() { + Self::current().leave_native_blocked(); + } + } + + pub fn unblock>(&self) { + let lock = self.monitor.lock_no_handshake(); + A::clear_block_request(self); + A::set_blocked(self, false); + self.monitor.notify_all(); + drop(lock); + } + + pub fn yieldpoints_enabled(&self) -> bool { + self.yieldpoints_enabled_count.load(Ordering::Relaxed) == 1 + } + + pub fn enable_yieldpoints(&self) { + let val = self + .yieldpoints_enabled_count + .fetch_add(1, Ordering::Relaxed); + debug_assert!(val <= 1); + + if self.yieldpoints_enabled() && self.yieldpoint_request_pending.load(Ordering::Relaxed) { + self.take_yieldpoint.store(1, Ordering::Relaxed); + self.yieldpoint_request_pending + .store(false, Ordering::Relaxed); + } + } + + pub fn disable_yieldpoints(&self) { + self.yieldpoints_enabled_count + .fetch_sub(1, Ordering::Relaxed); + } + + pub fn from_vm_thread(vmthread: mmtk::util::VMThread) -> &'static Thread { + unsafe { std::mem::transmute(vmthread) } + } + + pub fn from_vm_mutator_thread(vmthread: mmtk::util::VMMutatorThread) -> &'static Thread { + unsafe { std::mem::transmute(vmthread) } + } + + pub fn from_vm_worker_thread(vmthread: mmtk::util::VMWorkerThread) -> &'static Thread { + unsafe { std::mem::transmute(vmthread) } + } + + pub unsafe fn mutator_unchecked(&self) -> &'static mut Mutator> { + unsafe { + let ptr = self.mmtk_mutator.get(); + (*ptr).assume_init_mut() + } + } + + pub fn mutator(&self) -> &'static mut Mutator> { + unsafe { + assert!(Thread::::current().thread_id == self.thread_id); + self.mutator_unchecked() + } + } + + pub fn is_collector_thread(&self) -> bool { + self.has_collector_context.load(atomic::Ordering::Relaxed) + } + + fn add_about_to_terminate(&self) { + let lock = self.monitor.lock_no_handshake(); + self.is_about_to_terminate + .store(true, atomic::Ordering::Relaxed); + self.active_mutator_context + .store(false, atomic::Ordering::Relaxed); + lock.notify_all(); + + mmtk::memory_manager::destroy_mutator(self.mutator()); + // WARNING! DANGER! Since we've set is_about_to_terminate to true, when we + // release this lock the GC will: + // 1) No longer scan the thread's stack (though it will *see* the + // thread's stack and mark the stack itself as live, without scanning + // it). + // 2) No longer include the thread in any mutator phases ... hence the + // need to ensure that the mutator context is flushed above. + // 3) No longer attempt to block the thread. + // Moreover, we can no longer do anything that leads to write barriers + // or allocation. + drop(lock); + + let vmkit = VM::get().vmkit(); + unsafe { + self.mutator_unchecked().on_destroy(); + let x = self.mmtk_mutator.get(); + (*x).assume_init_drop(); + } + vmkit.thread_manager().add_about_to_terminate_current(); + } + + pub fn snapshot_handshake_threads(vm: &VM, visitor: &V) -> usize + where + V: SoftHandshakeVisitor, + { + let vmkit = vm.vmkit(); + let inner = vmkit.thread_manager.inner.lock_no_handshake(); + let inner = inner.borrow(); + let mut num_to_handshake = 0; + let handshake_threads = vmkit.thread_manager.handshake_lock.lock_no_handshake(); + let mut handshake_threads = handshake_threads.borrow_mut(); + for i in 0..inner.threads.len() { + if let Some(thread) = inner.threads[i].as_ref() { + if !thread.is_collector_thread() && visitor.include_thread(thread) { + num_to_handshake += 1; + handshake_threads.push(thread.clone()); + } + } + } + + drop(inner); + + num_to_handshake + } + + /// Tell each thread to take a yieldpoint and wait until all of them have done + /// so at least once. Additionally, call the visitor on each thread when making + /// the yieldpoint request; the purpose of the visitor is to set any additional + /// fields as needed to make specific requests to the threads that yield. Note + /// that the visitor's visit() method is called with both the + /// thread's monitor held, and the soft_handshake_data_lock held. + ///

+ pub fn soft_handshake(visitor: V) + where + V: SoftHandshakeVisitor, + { + let vm = VM::get(); + + let num_to_handshake = Self::snapshot_handshake_threads(vm, &visitor); + + let handshake_lock = vm.vmkit().thread_manager.handshake_lock.lock_no_handshake(); + let mut handshake_lock = handshake_lock.borrow_mut(); + + for thread in handshake_lock.drain(..num_to_handshake) { + let lock = thread.monitor().lock_no_handshake(); + let mut wait_for_this_thread = false; + if !thread.is_about_to_terminate() && visitor.check_and_signal(&thread) { + thread.set_blocked_exec_status(); + // Note that at this point if the thread tries to either enter or + // exit managed code, it will be diverted into either + // enter_native_blocked() or check_block(), both of which cannot do + // anything until they acquire the monitor() lock, which we now + // hold. Thus, the code below can, at its leisure, examine the + // thread's state and make its decision about what to do, fully + // confident that the thread's state is blocked from changing. + if thread.is_in_managed() { + // the thread is currently executing managed code, so we must ensure + // that it either: + // 1) takes the next yieldpoint and rendezvous with this soft + // handshake request (see yieldpoint), or + // 2) performs the rendezvous when leaving managed code + // (see enter_native_blocked, check_block, and add_about_to_terminate) + // either way, we will wait for it to get there before exiting + // this call, since the caller expects that after softHandshake() + // returns, no thread will be running Java code without having + // acknowledged. + thread + .soft_handshake_requested + .store(true, atomic::Ordering::Relaxed); + thread.take_yieldpoint.store(1, Ordering::Relaxed); + wait_for_this_thread = true; + } else { + // the thread is not in managed code (it may be blocked or it may be + // in native), so we don't have to wait for it since it will + // do the Right Thing before returning to managed code. essentially, + // the thread cannot go back to running managed code without doing whatever + // was requested because: + // A) we've set the execStatus to blocked, and + // B) we're holding its lock. + visitor.notify_stuck_in_native(&thread); + } + + drop(lock); + + // NOTE: at this point the thread may already decrement the + // softHandshakeLeft counter, causing it to potentially go negative. + // this is unlikely and completely harmless. + if wait_for_this_thread { + let lock = vm + .vmkit() + .thread_manager + .soft_handshake_data_lock + .lock_no_handshake(); + vm.vmkit() + .thread_manager + .soft_handshake_left + .fetch_add(1, atomic::Ordering::Relaxed); + drop(lock); + } + } + + // wait for all threads to reach the handshake + let mut lock = vm + .vmkit() + .thread_manager + .soft_handshake_data_lock + .lock_no_handshake(); + while vm + .vmkit() + .thread_manager + .soft_handshake_left + .load(atomic::Ordering::Relaxed) + > 0 + { + lock = lock.wait_with_handshake::(); + } + drop(lock); + + vm.vmkit().thread_manager().process_about_to_terminate(); + } + } + + /// Process a taken yieldpoint. + /// + /// # Parameters + /// + /// - `where_from`: The source of the yieldpoint (e.g. backedge) + /// - `fp`: The frame pointer of the service method that called this method + /// + /// Exposed as `extern "C-unwind"` to allow directly invoking it from JIT/AOT code. + pub extern "C-unwind" fn yieldpoint(where_from: i32, fp: Address) { + let thread = Thread::::current(); + let _was_at_yieldpoint = thread.at_yieldpoint.load(atomic::Ordering::Relaxed); + thread.at_yieldpoint.store(true, atomic::Ordering::Relaxed); + + // If thread is in critical section we can't do anything right now, defer + // until later + // we do this without acquiring locks, since part of the point of disabling + // yieldpoints is to ensure that locks are not "magically" acquired + // through unexpected yieldpoints. As well, this makes code running with + // yieldpoints disabled more predictable. Note furthermore that the only + // race here is setting takeYieldpoint to 0. But this is perfectly safe, + // since we are guaranteeing that a yieldpoint will run after we emerge from + // the no-yieldpoints code. At worst, setting takeYieldpoint to 0 will be + // lost (because some other thread sets it to non-0), but in that case we'll + // just come back here and reset it to 0 again. + if !thread.yieldpoints_enabled() { + thread + .yieldpoint_request_pending + .store(true, atomic::Ordering::Relaxed); + thread.take_yieldpoint.store(0, atomic::Ordering::Relaxed); + thread.at_yieldpoint.store(false, atomic::Ordering::Relaxed); + return; + } + + thread.context.pre_yieldpoint(where_from, fp); + + let lock = thread.monitor().lock_no_handshake(); + + if thread.take_yieldpoint() != 0 { + thread.set_take_yieldpoint(0); + thread.context.at_yieldpoint(where_from, fp, true); + // do two things: check if we should be blocking, and act upon + // handshake requests. + thread.check_block(); + } + + thread.context.at_yieldpoint(where_from, fp, false); + + drop(lock); + thread.context.post_yieldpoint(where_from, fp); + thread.at_yieldpoint.store(false, atomic::Ordering::Relaxed); + } +} + +thread_local! { + static CURRENT_THREAD: RefCell

= RefCell::new(Address::ZERO); +} + +pub fn current_thread() -> &'static Thread { + let addr = CURRENT_THREAD.with(|t| *t.borrow()); + + assert!(!addr.is_zero()); + unsafe { addr.as_ref() } +} + +pub fn try_current_thread() -> Option<&'static Thread> { + let addr = CURRENT_THREAD.with(|t| *t.borrow()); + + if addr.is_zero() { + None + } else { + Some(unsafe { addr.as_ref() }) + } +} + +pub(crate) fn init_current_thread(thread: Arc>) { + let thread = Arc::into_raw(thread); + CURRENT_THREAD.with(|t| *t.borrow_mut() = Address::from_ptr(thread)); +} + +pub(crate) fn deinit_current_thread() { + CURRENT_THREAD.with(|t| { + let mut threadptr = t.borrow_mut(); + { + let thread: Arc> = unsafe { Arc::from_raw(threadptr.to_ptr()) }; + drop(thread); + } + *threadptr = Address::ZERO; + }) +} + +pub struct ThreadManager { + inner: Monitor>>, + soft_handshake_left: AtomicUsize, + soft_handshake_data_lock: Monitor<()>, + handshake_lock: Monitor>>>>, +} + +struct ThreadManagerInner { + threads: Vec>>>, + free_thread_indices: Vec, + about_to_terminate: Vec>>, +} + +impl ThreadManagerInner { + pub fn new() -> Self { + Self { + threads: Vec::new(), + free_thread_indices: Vec::new(), + about_to_terminate: Vec::new(), + } + } +} + +impl ThreadManager { + pub fn new() -> Self { + Self { + inner: Monitor::new(RefCell::new(ThreadManagerInner::new())), + soft_handshake_left: AtomicUsize::new(0), + soft_handshake_data_lock: Monitor::new(()), + handshake_lock: Monitor::new(RefCell::new(Vec::new())), + } + } + + pub(crate) fn add_thread(&self, thread: Arc>) { + self.process_about_to_terminate(); + parked_scope::<_, VM>(|| { + let inner = self.inner.lock_no_handshake(); + let mut inner = inner.borrow_mut(); + + let idx = inner + .free_thread_indices + .pop() + .unwrap_or(inner.threads.len()); + thread + .index_in_manager + .store(idx, atomic::Ordering::Relaxed); + if idx >= inner.threads.len() { + inner.threads.push(Some(thread)); + } else { + inner.threads[idx] = Some(thread); + } + }); + } + + pub(crate) fn add_main_thread(&self, thread: Arc>) { + // no need for parked scope, there's no VM activity yet. + let inner = self.inner.lock_no_handshake(); + let mut inner = inner.borrow_mut(); + assert!(inner.threads.is_empty()); + assert!(inner.free_thread_indices.is_empty()); + assert!(inner.about_to_terminate.is_empty()); + thread.index_in_manager.store(0, atomic::Ordering::Relaxed); + inner.threads.push(Some(thread)); + } + + pub(crate) fn add_about_to_terminate_current(&self) { + let inner = self.inner.lock_no_handshake(); + let mut inner = inner.borrow_mut(); + let index = Thread::::current() + .index_in_manager + .load(atomic::Ordering::Relaxed); + // do not remove thread from threads vector, GC may still + // scan some of thread state (apart from stack). + let thread_rc = inner.threads[index].clone().unwrap(); + inner.about_to_terminate.push(thread_rc); + deinit_current_thread::(); + } + + pub(crate) fn process_about_to_terminate(&self) { + 'restart: loop { + let lock = self.inner.lock_no_handshake(); + let mut lock = lock.borrow_mut(); + for i in 0..lock.about_to_terminate.len() { + let t = lock.threads[i].clone().unwrap(); + if t.get_exec_status() == ThreadState::Terminated { + lock.about_to_terminate.swap_remove(i); + let index = t.index_in_manager.load(atomic::Ordering::Relaxed); + lock.free_thread_indices.push(index); + lock.threads[index] = None; + + drop(lock); + continue 'restart; + } + } + + drop(lock); + + break; + } + } + + pub fn threads(&self) -> impl Iterator>> { + let inner = self.inner.lock_no_handshake(); + let inner = inner.borrow(); + inner + .threads + .clone() + .into_iter() + .flat_map(|t| t.into_iter()) + } + + /// Stop all mutator threads. This is currently intended to be run by a single thread. + /// + /// Fixpoint until there are no threads that we haven't blocked. Fixpoint is needed to + /// catch the (unlikely) case that a thread spawns another thread while we are waiting. + pub fn block_all_mutators_for_gc(&self) -> Vec>> { + let mut handshake_threads = Vec::with_capacity(4); + loop { + let lock = self.inner.lock_no_handshake(); + let lock = lock.borrow(); + // (1) find all threads that need to be blocked for GC + + for i in 0..lock.threads.len() { + if let Some(t) = lock.threads[i].clone() { + if !t.is_collector_thread() && !t.ignore_handshakes_and_gc() { + handshake_threads.push(t.clone()); + } + } + } + + drop(lock); + // (2) Remove any threads that have already been blocked from the list. + handshake_threads.retain(|t| { + let lock = t.monitor().lock_no_handshake(); + if t.is_blocked_for::() + || t.block_unchecked::(true).not_running() + { + drop(lock); + false + } else { + drop(lock); + true + } + }); + + // (3) Quit trying to block threads if all threads are either blocked + // or not running (a thread is "not running" if it is NEW or TERMINATED; + // in the former case it means that the thread has not had start() + // called on it while in the latter case it means that the thread + // is either in the TERMINATED state or is about to be in that state + // real soon now, and will not perform any heap-related work before + // terminating). + if handshake_threads.is_empty() { + break; + } + // (4) Request a block for GC from all other threads. + while let Some(thread) = handshake_threads.pop() { + let lock = thread.monitor().lock_no_handshake(); + thread.block_unchecked::(false); + drop(lock); + } + } + // Deal with terminating threads to ensure that all threads are either dead to MMTk or stopped above. + self.process_about_to_terminate(); + + self.inner + .lock_no_handshake() + .borrow() + .threads + .iter() + .flatten() + .filter(|t| t.is_blocked_for::()) + .cloned() + .collect::>() + } + + /// Unblock all mutators blocked for GC. + pub fn unblock_all_mutators_for_gc(&self) { + let mut handshake_threads = Vec::with_capacity(4); + let lock = self.inner.lock_no_handshake(); + let lock = lock.borrow(); + + for thread in lock.threads.iter() { + if let Some(thread) = thread { + if !thread.is_collector_thread() { + handshake_threads.push(thread.clone()); + } + } + } + + drop(lock); + + while let Some(thread) = handshake_threads.pop() { + let lock = thread.monitor().lock_no_handshake(); + thread.unblock::(); + drop(lock); + } + } +} + +pub trait BlockAdapter { + type Token: Copy + PartialEq; + fn is_blocked(thread: &Thread) -> bool; + fn set_blocked(thread: &Thread, value: bool); + + fn request_block(thread: &Thread) -> Self::Token; + fn has_block_request(thread: &Thread) -> bool; + fn has_block_request_with_token(thread: &Thread, token: Self::Token) -> bool; + fn clear_block_request(thread: &Thread); +} + +pub struct GCBlockAdapter; + +impl BlockAdapter for GCBlockAdapter { + type Token = bool; + + fn is_blocked(thread: &Thread) -> bool { + thread.is_blocked_for_gc.load(atomic::Ordering::Relaxed) + } + + fn set_blocked(thread: &Thread, value: bool) { + thread + .is_blocked_for_gc + .store(value, atomic::Ordering::Relaxed); + } + + fn request_block(thread: &Thread) -> Self::Token { + thread + .should_block_for_gc + .store(true, atomic::Ordering::Relaxed); + true + } + + fn has_block_request(thread: &Thread) -> bool { + thread.should_block_for_gc.load(atomic::Ordering::Relaxed) + } + + fn has_block_request_with_token(thread: &Thread, token: Self::Token) -> bool { + let _ = token; + thread.should_block_for_gc.load(atomic::Ordering::Relaxed) + } + + fn clear_block_request(thread: &Thread) { + thread + .should_block_for_gc + .store(false, atomic::Ordering::Relaxed); + } +} + +pub trait BlockAdapterList: Sized { + fn acknowledge_block_requests(thread: &Thread) -> bool; + fn is_blocked(thread: &Thread) -> bool; +} + +macro_rules! block_adapter_list { + ($(($($t: ident),*))*) => { + $( + impl),*> BlockAdapterList for ($($t),*) { + fn acknowledge_block_requests(thread: &Thread) -> bool { + let mut had_some = false; + $( + if $t::has_block_request(thread) { + $t::set_blocked(thread, true); + $t::clear_block_request(thread); + had_some = true; + } + )* + + had_some + } + + fn is_blocked(thread: &Thread) -> bool { + let mut is_blocked = false; + + $( + is_blocked |= $t::is_blocked(thread); + )* + + is_blocked + } + } + + )* + }; +} + +impl BlockAdapter for () { + type Token = (); + fn clear_block_request(thread: &Thread) { + let _ = thread; + } + + fn has_block_request(thread: &Thread) -> bool { + let _ = thread; + false + } + + fn has_block_request_with_token(thread: &Thread, token: Self::Token) -> bool { + let _ = thread; + let _ = token; + false + } + + fn is_blocked(thread: &Thread) -> bool { + let _ = thread; + false + } + + fn request_block(thread: &Thread) -> Self::Token { + let _ = thread; + () + } + + fn set_blocked(thread: &Thread, value: bool) { + let _ = thread; + let _ = value; + } +} + +block_adapter_list!((X0, X1)(X0, X1, X2)(X0, X1, X2, X3)(X0, X1, X2, X3, X4)( + X0, X1, X2, X3, X4, X5 +)(X0, X1, X2, X3, X4, X5, X6)(X0, X1, X2, X3, X4, X5, X6, X7)( + X0, X1, X2, X3, X4, X5, X6, X7, X8 +)(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9)( + X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10 +)(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11)( + X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12 +)(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13)( + X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14 +)( + X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15 +)( + X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16 +)( + X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17 +)( + X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18 +)( + X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19 +)( + X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20 +)( + X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, + X21 +)( + X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, + X21, X22 +)( + X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, + X21, X22, X23 +)( + X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, + X21, X22, X23, X24 +)( + X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, + X21, X22, X23, X24, X25 +)); + +/// Execute the given function in a parked scope. +/// +/// Parked scope puts current thread to `InNative` state and then puts it back to `InManaged` state after the function returns. +/// Code inside `f` must not access any managed objects nor allocate any objects. +pub fn parked_scope(f: impl FnOnce() -> R) -> R { + Thread::::enter_native(); + let result = f(); + Thread::::leave_native(); + result +} + +/// Provides a skeleton implementation for use in soft handshakes. +///

+/// During a soft handshake, the requesting thread waits for all mutator threads +/// (i.e. non-gc threads) to perform a requested action. +pub trait SoftHandshakeVisitor { + /// Sets whatever flags need to be set to signal that the given thread should + /// perform some action when it acknowledges the soft handshake. + ///

+ /// This method is only called for threads for which {@link #includeThread(RVMThread)} + /// is {@code true}. + ///

+ /// This method is called with the thread's monitor held, but while the + /// thread may still be running. This method is not called on mutators that + /// have indicated that they are about to terminate. + /// + /// # Returns + /// `false` if not interested in this thread, `true` otherwise. + /// Returning `true` will cause a soft handshake request to be put through. + fn check_and_signal(&self, t: &Arc>) -> bool; + + /// Called when it is determined that the thread is stuck in native. While + /// this method is being called, the thread cannot return to running managed + /// code. As such, it is safe to perform actions "on the thread's behalf". + ///

+ /// This implementation does nothing. + fn notify_stuck_in_native(&self, t: &Arc>) { + let _ = t; + } + + /// Checks whether to include the specified thread in the soft handshake. + ///

+ /// This method will never see any threads from the garbage collector because + /// those are excluded from the soft handshake by design. + ///

+ /// This implementation always returns `true`. + /// + /// # Parameters + /// * `t` - The thread to check for inclusion + /// + /// # Returns + /// `true` if the thread should be included. + fn include_thread(&self, t: &Arc>) -> bool { + let _ = t; + true + } +} + +impl>) -> bool> SoftHandshakeVisitor for F { + fn check_and_signal(&self, t: &Arc>) -> bool { + self(t) + } +}