]> Witch of Git - ivy/blob - rt/src/lib.rs
[rt] Change is_<type> predicates to tag checks
[ivy] / rt / src / lib.rs
1 use crate::{int::ObjInt, lam::ObjLam};
2 use std::sync::atomic::{AtomicU32, Ordering};
3
4 pub mod int;
5 pub mod lam;
6 pub mod sys;
7
8 const _STDIN: i32 = 0;
9 const _STDOUT: i32 = 1;
10 const STDERR: i32 = 2;
11
12 #[macro_export]
13 macro_rules! trace {
14 ($fmt:literal $(, $arg:expr)* $(,)?) => {
15 if std::env::var("IVY_RT_TRACE").is_ok() {
16 eprintln!($fmt, $($arg),*);
17 }
18 }
19 }
20
21 #[repr(u8)]
22 #[derive(PartialEq, Eq, Clone, Copy)]
23 pub enum ObjTag {
24 Lam = 0,
25 Int = 1,
26 }
27
28 #[repr(C)]
29 pub struct ObjHeader {
30 tag: ObjTag,
31 _pad: [u8; 3],
32 rc: AtomicU32,
33 }
34
35 #[derive(Clone, Copy)]
36 #[repr(C)]
37 pub union Obj {
38 int: i64,
39 header: *mut ObjHeader,
40 box_lam: *mut ObjLam,
41 box_int: *mut ObjInt,
42 }
43
44 #[no_mangle]
45 pub unsafe extern "C" fn ivy_debug(obj: Obj) -> Obj {
46 println!("DEBUG {:016x}", obj.int);
47 obj
48 }
49
50 #[no_mangle]
51 pub unsafe extern "C" fn ivy_abort(msg: *const u8, len: usize) -> ! {
52 sys::write(STDERR, msg, len);
53 sys::exit(1);
54 }
55
56 #[no_mangle]
57 pub unsafe extern "C" fn ivy_exit(code: i32) -> ! {
58 sys::exit(code)
59 }
60
61 #[no_mangle]
62 pub unsafe extern "C" fn ivy_free(obj: Obj) {
63 if !obj.is_box() {
64 return;
65 }
66 sys::free(obj.header as *mut u8)
67 }
68
69 #[no_mangle]
70 pub unsafe extern "C" fn ivy_incref(obj: Obj) {
71 obj.incref();
72 }
73
74 #[no_mangle]
75 pub unsafe extern "C" fn ivy_decref(obj: Obj) {
76 obj.decref();
77 }
78
79 #[no_mangle]
80 pub unsafe extern "C" fn ivy_clone(obj: Obj) -> Obj {
81 if obj.is_null() || !obj.is_box() {
82 return obj;
83 }
84 match obj.tag() {
85 None => unreachable!(),
86 Some(ObjTag::Int) => {
87 unimplemented!("copying boxed integers")
88 }
89 Some(ObjTag::Lam) => {
90 let lam = &*obj.box_lam;
91 let size = lam.size();
92 let data = sys::malloc(size);
93 core::ptr::copy(obj.box_lam as *const u8, data, size);
94 let box_hdr = data as *mut ObjHeader;
95 *(*box_hdr).rc.get_mut() = 0;
96 trace!("COPY {:016x} {:016x}", obj.int, box_hdr as usize);
97 let box_lam = data as *mut ObjLam;
98 Obj { box_lam }
99 }
100 }
101 }
102
103 impl Obj {
104 fn is_null(self) -> bool {
105 unsafe { self.int == 0 }
106 }
107
108 fn is_box(self) -> bool {
109 !self.is_null() && unsafe { self.int & 1 == 0 }
110 }
111
112 unsafe fn tag(self) -> Option<ObjTag> {
113 if self.is_null() {
114 None
115 } else if self.is_box() {
116 Some((*self.header).tag)
117 } else {
118 Some(ObjTag::Int)
119 }
120 }
121
122 unsafe fn incref(self) {
123 if !self.is_box() {
124 return;
125 }
126 // Ordering::Relaxed is appropriate here, since we assume that each thread with access to a
127 // reference owns at least one reference (rather than simply borrowing it). Therefore,
128 // another thread cannot decrement it to 0 while we are performing this increment (since we
129 // own a reference), so we only need consistency and not ordering.
130 (*self.header).rc.fetch_add(1, Ordering::Relaxed);
131 }
132
133 unsafe fn decref(self) {
134 if !self.is_box() {
135 return;
136 }
137 // Ordering::AcqRel is appropriate here. I believe we need the Acquire in order to ensure
138 // we see all previous increments/decrements, so we can properly see that the decref is
139 // decrementing to 0, and we need the Release in order to ensure that we see all writes to
140 // the memory before we deallocate.
141 // (Check against 1 instead of 0 since we're loading the old refcount.)
142 if (*self.header).rc.fetch_sub(1, Ordering::AcqRel) == 1 {
143 self.dealloc();
144 }
145 }
146
147 unsafe fn dealloc(self) {
148 if !self.is_box() {
149 return;
150 }
151 match self.tag() {
152 None | Some(ObjTag::Int) => (),
153 Some(ObjTag::Lam) => {
154 let lam = &mut *self.box_lam;
155 for param in lam.params() {
156 param.decref();
157 }
158 for upvar in lam.upvars() {
159 upvar.decref();
160 }
161 }
162 }
163 sys::free(self.header as *mut u8);
164 }
165 }