]>
Witch of Git - ivy/blob - rt/src/lib.rs
1 use std
::sync
::atomic
::{Ordering
, AtomicU32
};
4 const _STDOUT
: i32 = 1;
8 ($fmt
:literal $
(, $arg
:expr
)* $
(,)?
) => {
9 if std
::env
::var("IVY_RT_TRACE").is
_ok
() {
10 eprintln
!($fmt
, $
($arg
),*);
16 #[derive(PartialEq, Eq)]
23 pub struct ObjHeader
{
43 func
: extern "C" fn(&ObjLam
) -> Obj
,
48 #[derive(Clone, Copy)]
52 header
: *mut ObjHeader
,
58 pub fn write(fd
: i32, buf
: *const u8, len
: usize) -> isize;
59 pub fn exit(code
: i32) -> !;
60 pub fn malloc(size
: usize) -> *mut u8;
61 pub fn free(ptr
: *mut u8);
66 pub unsafe extern "C" fn ivy_debug(obj
: Obj
) -> Obj
{
67 println
!("DEBUG {:016x}", obj
.int
);
72 pub unsafe extern "C" fn ivy_abort(msg
: *const u8, len
: usize) -> ! {
73 sys
::write(STDERR
, msg
, len
);
78 pub unsafe extern "C" fn ivy_exit(code
: i32) -> ! {
83 pub unsafe extern "C" fn ivy_check_int(obj
: Obj
) {
85 panic
!("ivy_check_int called with non-integer object {:016x}.", obj
.int
);
90 pub unsafe extern "C" fn ivy_check_lam(obj
: Obj
) {
92 panic
!("ivy_check_lam called with non-lambda object {:016x}.", obj
.int
);
96 // This should probably be a macro rather than a call?
97 // But it might be good to have it for completeness.
98 // Or maybe it's valuable if we want to support big integers.
100 pub unsafe extern "C" fn ivy_make_int(value
: i64) -> Obj
{
101 Obj
{ int
: value
<< 1 }
105 pub unsafe extern "C" fn ivy_make_lam(func
: extern "C" fn(&ObjLam
) -> Obj
, params
: u16, upvars
: u16) -> Obj
{
106 let size
= ObjLam
::size_of(params
, upvars
);
107 let box_lam
= sys
::malloc(size
) as *mut ObjLam
;
108 box_lam
.write(ObjLam
{
112 rc
: AtomicU32
::new(0),
119 .write_bytes(0, (params
+ upvars
) as usize);
120 trace
!("MAKE {:016x} {:016x}", box_lam
as usize, func
as usize);
125 pub unsafe extern "C" fn ivy_free(obj
: Obj
) {
129 sys
::free(obj
.header
as *mut u8)
133 pub unsafe extern "C" fn ivy_incref(obj
: Obj
) {
138 pub unsafe extern "C" fn ivy_decref(obj
: Obj
) {
143 pub unsafe extern "C" fn ivy_clone(obj
: Obj
) -> Obj
{
144 if obj
.is
_n
ull
() || !obj
.is
_box
() {
148 unimplemented
!("copying boxed integers")
150 let lam
= &*obj
.box_lam
;
151 let size
= lam
.size();
152 let data
= sys
::malloc(size
);
153 core
::ptr
::copy(obj
.box_lam
as *const u8, data
, size
);
154 let box_lam
= data
as *mut ObjLam
;
155 *(*box_lam
).rc
.get_mut() = 0;
156 trace
!("COPY {:016x} {:016x}", obj
.int
, box_lam
as usize);
161 pub unsafe extern "C" fn ivy_app(fun
: Obj
, arg
: Obj
) -> Obj
{
162 ivy_app_mut(ivy_clone(fun
), arg
)
166 pub unsafe extern "C" fn ivy_app_mut(fun
: Obj
, arg
: Obj
) -> Obj
{
167 trace
!("APP {:016x} {:016x}", fun
.int
, arg
.int
);
169 panic
!("ivy_app called with a non-lam as the function: {:016x}.", fun
.int
);
171 let lam
= &mut *fun
.box_lam
;
172 if lam
.filled
< lam
.params
{
175 "Lam @ {:016x} ({:016x}) has {} of {} arguments filled.",
176 fun
.int
, lam
.func
as usize, lam
.filled
, lam
.params
178 panic
!("ivy_app called with a null arg.");
181 let idx
= lam
.filled
as usize;
182 lam
.params_mut()[idx
] = arg
;
184 } else if lam
.params
== lam
.filled
{
186 panic
!("ivy_app called for a 0-arity application with a non-null arg.");
190 if lam
.params
== lam
.filled
{
191 trace
!("RUN {:016x}", fun
.int
);
194 trace
!("UPD8 {:016x}", fun
.int
);
201 fn is_null(self) -> bool
{
202 unsafe { self.int
== 0 }
205 fn is_box(self) -> bool
{
206 !self.is
_n
ull
() && unsafe { self.int
& 1 == 0 }
209 unsafe fn is_int(self) -> bool
{
210 !self.is
_n
ull
() && (!self.is
_box
() || (*self.header
).tag
== ObjTag
::Int
)
213 unsafe fn is_lam(self) -> bool
{
214 self.is
_box
() && (*self.header
).tag
== ObjTag
::Lam
217 unsafe fn incref(self) {
221 // Ordering::Relaxed is appropriate here, since we assume that each thread with access to a
222 // reference owns at least one reference (rather than simply borrowing it). Therefore,
223 // another thread cannot decrement it to 0 while we are performing this increment (since we
224 // own a reference), so we only need consistency and not ordering.
225 (*self.header
).rc
.fetch
_add
(1, Ordering
::Relaxed
);
228 unsafe fn decref(self) {
232 // Ordering::AcqRel is appropriate here. I believe we need the Acquire in order to ensure
233 // we see all previous increments/decrements, so we can properly see that the decref is
234 // decrementing to 0, and we need the Release in order to ensure that we see all writes to
235 // the memory before we deallocate.
236 // (Check against 1 instead of 0 since we're loading the old refcount.)
237 if (*self.header
).rc
.fetch
_s
ub
(1, Ordering
::AcqRel
) == 1 {
242 unsafe fn dealloc(self) {
247 let lam
= &mut *self.box_lam
;
248 for param
in lam
.params_mut() {
251 for upvar
in lam
.upvars
_m
ut
() {
255 sys
::free(self.header
as *mut u8);
260 fn size_of(params
: u16, upvars
: u16) -> usize {
261 core
::mem
::size_of
::<ObjLam
>() + params
as usize * 8 + upvars
as usize * 8
264 fn size(&self) -> usize {
265 ObjLam
::size_of(self.params
, self.upvars
)
268 unsafe fn raw_fields_mut(&mut self) -> *mut Obj
{
269 (self as *mut ObjLam
).add(1) as *mut Obj
272 unsafe fn params_mut(&mut self) -> &mut [Obj
] {
273 let ptr
= self.raw_fields_mut();
274 core
::slice
::from_raw_parts_mut(ptr
, self.params
as usize)
277 unsafe fn upvars_mut(&mut self) -> &mut [Obj
] {
278 let ptr
= self.raw_fields_mut().add(self.params
as usize);
279 core
::slice
::from_raw_parts_mut(ptr
, self.upvars
as usize)