]>
Witch of Git - ivy/blob - rt/src/lib.rs
1 use std
::sync
::atomic
::{Ordering
, AtomicU32
};
4 const _STDOUT
: i32 = 1;
8 ($fmt
:literal $
(, $arg
:expr
)* $
(,)?
) => {
9 if std
::env
::var("IVY_RT_TRACE").is
_ok
() {
10 eprintln
!($fmt
, $
($arg
),*);
16 #[derive(PartialEq, Eq)]
23 pub struct ObjHeader
{
40 func
: extern "C" fn(&ObjLam
) -> Obj
,
45 #[derive(Clone, Copy)]
49 header
: *mut ObjHeader
,
55 pub fn write(fd
: i32, buf
: *const u8, len
: usize) -> isize;
56 pub fn exit(code
: i32) -> !;
57 pub fn malloc(size
: usize) -> *mut u8;
58 pub fn free(ptr
: *mut u8);
63 pub unsafe extern "C" fn ivy_debug(obj
: Obj
) -> Obj
{
64 println
!("DEBUG {:016x}", obj
.int
);
69 pub unsafe extern "C" fn ivy_abort(msg
: *const u8, len
: usize) -> ! {
70 sys
::write(STDERR
, msg
, len
);
75 pub unsafe extern "C" fn ivy_exit(code
: i32) -> ! {
80 pub unsafe extern "C" fn ivy_check_int(obj
: Obj
) {
82 panic
!("ivy_check_int called with non-integer object {:016x}.", obj
.int
);
87 pub unsafe extern "C" fn ivy_check_lam(obj
: Obj
) {
89 panic
!("ivy_check_lam called with non-lambda object {:016x}.", obj
.int
);
93 // This should probably be a macro rather than a call?
94 // But it might be good to have it for completeness.
95 // Or maybe it's valuable if we want to support big integers.
97 pub unsafe extern "C" fn ivy_make_int(value
: i64) -> Obj
{
98 Obj
{ int
: value
<< 1 }
102 pub unsafe extern "C" fn ivy_make_lam(func
: extern "C" fn(&ObjLam
) -> Obj
, params
: u16, upvars
: u16) -> Obj
{
103 let size
= ObjLam
::size_of(params
, upvars
);
104 let box_lam
= sys
::malloc(size
) as *mut ObjLam
;
105 box_lam
.write(ObjLam
{
108 rc
: AtomicU32
::new(0),
115 .write_bytes(0, (params
+ upvars
) as usize);
116 trace
!("MAKE {:016x} {:016x}", box_lam
as usize, func
as usize);
121 pub unsafe extern "C" fn ivy_free(obj
: Obj
) {
125 sys
::free(obj
.header
as *mut u8)
129 pub unsafe extern "C" fn ivy_incref(obj
: Obj
) {
134 pub unsafe extern "C" fn ivy_decref(obj
: Obj
) {
139 pub unsafe extern "C" fn ivy_clone(obj
: Obj
) -> Obj
{
140 if obj
.is
_n
ull
() || !obj
.is
_box
() {
144 unimplemented
!("copying boxed integers")
146 let lam
= &*obj
.box_lam
;
147 let size
= lam
.size();
148 let data
= sys
::malloc(size
);
149 core
::ptr
::copy(obj
.box_lam
as *const u8, data
, size
);
150 let box_lam
= data
as *mut ObjLam
;
151 *(*box_lam
).rc
.get_mut() = 0;
152 trace
!("COPY {:016x} {:016x}", obj
.int
, box_lam
as usize);
157 pub unsafe extern "C" fn ivy_app(fun
: Obj
, arg
: Obj
) -> Obj
{
158 ivy_app_mut(ivy_clone(fun
), arg
)
162 pub unsafe extern "C" fn ivy_app_mut(fun
: Obj
, arg
: Obj
) -> Obj
{
163 trace
!("APP {:016x} {:016x}", fun
.int
, arg
.int
);
165 panic
!("ivy_app called with a non-lam as the function: {:016x}.", fun
.int
);
167 let lam
= &mut *fun
.box_lam
;
168 if lam
.filled
< lam
.params
{
171 "Lam @ {:016x} ({:016x}) has {} of {} arguments filled.",
172 fun
.int
, lam
.func
as usize, lam
.filled
, lam
.params
174 panic
!("ivy_app called with a null arg.");
177 let idx
= lam
.filled
as usize;
178 lam
.params_mut()[idx
] = arg
;
180 } else if lam
.params
== lam
.filled
{
182 panic
!("ivy_app called for a 0-arity application with a non-null arg.");
186 if lam
.params
== lam
.filled
{
187 trace
!("RUN {:016x}", fun
.int
);
190 trace
!("UPD8 {:016x}", fun
.int
);
197 fn is_null(self) -> bool
{
198 unsafe { self.int
== 0 }
201 fn is_box(self) -> bool
{
202 !self.is
_n
ull
() && unsafe { self.int
& 1 == 0 }
205 unsafe fn is_int(self) -> bool
{
206 !self.is
_n
ull
() && (!self.is
_box
() || (*self.header
).tag
== ObjTag
::Int
)
209 unsafe fn is_lam(self) -> bool
{
210 self.is
_box
() && (*self.header
).tag
== ObjTag
::Lam
213 unsafe fn incref(self) {
217 // Ordering::Relaxed is appropriate here, since we assume that each thread with access to a
218 // reference owns at least one reference (rather than simply borrowing it). Therefore,
219 // another thread cannot decrement it to 0 while we are performing this increment (since we
220 // own a reference), so we only need consistency and not ordering.
221 (*self.header
).rc
.fetch
_add
(1, Ordering
::Relaxed
);
224 unsafe fn decref(self) {
228 // Ordering::AcqRel is appropriate here. I believe we need the Acquire in order to ensure
229 // we see all previous increments/decrements, so we can properly see that the decref is
230 // decrementing to 0, and we need the Release in order to ensure that we see all writes to
231 // the memory before we deallocate.
232 // (Check against 1 instead of 0 since we're loading the old refcount.)
233 if (*self.header
).rc
.fetch
_s
ub
(1, Ordering
::AcqRel
) == 1 {
238 unsafe fn dealloc(self) {
243 let lam
= &mut *self.box_lam
;
244 for param
in lam
.params_mut() {
247 for upvar
in lam
.upvars
_m
ut
() {
251 sys
::free(self.header
as *mut u8);
256 fn size_of(params
: u16, upvars
: u16) -> usize {
257 core
::mem
::size_of
::<ObjLam
>() + params
as usize * 8 + upvars
as usize * 8
260 fn size(&self) -> usize {
261 ObjLam
::size_of(self.params
, self.upvars
)
264 unsafe fn raw_fields_mut(&mut self) -> *mut Obj
{
265 (self as *mut ObjLam
).add(1) as *mut Obj
268 unsafe fn params_mut(&mut self) -> &mut [Obj
] {
269 let ptr
= self.raw_fields_mut();
270 core
::slice
::from_raw_parts_mut(ptr
, self.params
as usize)
273 unsafe fn upvars_mut(&mut self) -> &mut [Obj
] {
274 let ptr
= self.raw_fields_mut().add(self.params
as usize);
275 core
::slice
::from_raw_parts_mut(ptr
, self.upvars
as usize)