]> Witch of Git - ivy/blob - rt/src/lib.rs
[rt] Split the runtime into multiple modules
[ivy] / rt / src / lib.rs
1 use crate::{int::ObjInt, lam::ObjLam};
2 use std::sync::atomic::{AtomicU32, Ordering};
3
4 pub mod int;
5 pub mod lam;
6 pub mod sys;
7
8 const _STDIN: i32 = 0;
9 const _STDOUT: i32 = 1;
10 const STDERR: i32 = 2;
11
12 #[macro_export]
13 macro_rules! trace {
14 ($fmt:literal $(, $arg:expr)* $(,)?) => {
15 if std::env::var("IVY_RT_TRACE").is_ok() {
16 eprintln!($fmt, $($arg),*);
17 }
18 }
19 }
20
21 #[repr(u8)]
22 #[derive(PartialEq, Eq)]
23 pub enum ObjTag {
24 Lam = 0,
25 Int = 1,
26 }
27
28 #[repr(C)]
29 pub struct ObjHeader {
30 tag: ObjTag,
31 _pad: [u8; 3],
32 rc: AtomicU32,
33 }
34
35 #[derive(Clone, Copy)]
36 #[repr(C)]
37 pub union Obj {
38 int: i64,
39 header: *mut ObjHeader,
40 box_lam: *mut ObjLam,
41 box_int: *mut ObjInt,
42 }
43
44 #[no_mangle]
45 pub unsafe extern "C" fn ivy_debug(obj: Obj) -> Obj {
46 println!("DEBUG {:016x}", obj.int);
47 obj
48 }
49
50 #[no_mangle]
51 pub unsafe extern "C" fn ivy_abort(msg: *const u8, len: usize) -> ! {
52 sys::write(STDERR, msg, len);
53 sys::exit(1);
54 }
55
56 #[no_mangle]
57 pub unsafe extern "C" fn ivy_exit(code: i32) -> ! {
58 sys::exit(code)
59 }
60
61 #[no_mangle]
62 pub unsafe extern "C" fn ivy_free(obj: Obj) {
63 if !obj.is_box() {
64 return;
65 }
66 sys::free(obj.header as *mut u8)
67 }
68
69 #[no_mangle]
70 pub unsafe extern "C" fn ivy_incref(obj: Obj) {
71 obj.incref();
72 }
73
74 #[no_mangle]
75 pub unsafe extern "C" fn ivy_decref(obj: Obj) {
76 obj.decref();
77 }
78
79 #[no_mangle]
80 pub unsafe extern "C" fn ivy_clone(obj: Obj) -> Obj {
81 if obj.is_null() || !obj.is_box() {
82 return obj;
83 }
84 if obj.is_int() {
85 unimplemented!("copying boxed integers")
86 }
87 let lam = &*obj.box_lam;
88 let size = lam.size();
89 let data = sys::malloc(size);
90 core::ptr::copy(obj.box_lam as *const u8, data, size);
91 let box_hdr = data as *mut ObjHeader;
92 *(*box_hdr).rc.get_mut() = 0;
93 trace!("COPY {:016x} {:016x}", obj.int, box_hdr as usize);
94 let box_lam = data as *mut ObjLam;
95 Obj { box_lam }
96 }
97
98 impl Obj {
99 fn is_null(self) -> bool {
100 unsafe { self.int == 0 }
101 }
102
103 fn is_box(self) -> bool {
104 !self.is_null() && unsafe { self.int & 1 == 0 }
105 }
106
107 unsafe fn is_int(self) -> bool {
108 !self.is_null() && (!self.is_box() || (*self.header).tag == ObjTag::Int)
109 }
110
111 unsafe fn is_lam(self) -> bool {
112 self.is_box() && (*self.header).tag == ObjTag::Lam
113 }
114
115 unsafe fn incref(self) {
116 if !self.is_box() {
117 return;
118 }
119 // Ordering::Relaxed is appropriate here, since we assume that each thread with access to a
120 // reference owns at least one reference (rather than simply borrowing it). Therefore,
121 // another thread cannot decrement it to 0 while we are performing this increment (since we
122 // own a reference), so we only need consistency and not ordering.
123 (*self.header).rc.fetch_add(1, Ordering::Relaxed);
124 }
125
126 unsafe fn decref(self) {
127 if !self.is_box() {
128 return;
129 }
130 // Ordering::AcqRel is appropriate here. I believe we need the Acquire in order to ensure
131 // we see all previous increments/decrements, so we can properly see that the decref is
132 // decrementing to 0, and we need the Release in order to ensure that we see all writes to
133 // the memory before we deallocate.
134 // (Check against 1 instead of 0 since we're loading the old refcount.)
135 if (*self.header).rc.fetch_sub(1, Ordering::AcqRel) == 1 {
136 self.dealloc();
137 }
138 }
139
140 unsafe fn dealloc(self) {
141 if !self.is_box() {
142 return;
143 }
144 if self.is_lam() {
145 let lam = &mut *self.box_lam;
146 for param in lam.params_mut() {
147 param.decref();
148 }
149 for upvar in lam.upvars_mut() {
150 upvar.decref();
151 }
152 }
153 sys::free(self.header as *mut u8);
154 }
155 }