va_list/
impl-x86_64-elf.rs

1// x86_64 ELF - Aka the Itanium ABI
2//
3use ::core::{mem, ptr};
4use super::VaPrimitive;	// Note: Uses `super` for testing purposes
5
6#[repr(transparent)]
7pub struct VaList<'a>(&'a mut VaListInner);
8
9#[repr(C)]
10#[derive(Debug)]
11#[doc(hidden)]
12pub struct VaListInner {
13    /// Offset of general-purpose registers in `reg_save_area`
14    gp_offset: u32,
15    /// Offset of floating-point registers in `reg_save_area`
16    fp_offset: u32,
17    /// Pointer to the on-stack arguments
18    overflow_arg_area: *const u64,
19    /// Save area for register arguments
20    reg_save_area: *const u64,
21}
22
23impl<'a> VaList<'a> {
24    fn inner(&mut self) -> &mut VaListInner {
25        &mut *self.0
26    }
27}
28
29#[doc(hidden)]
30impl VaListInner {
31    /// Checks that the specified number of registers can be read from the save area
32    fn check_space_gp(&self, num_gp: u32) -> bool {
33        self.gp_offset / 8 + num_gp <= 6
34    }
35    /// Checks that the specified number of registers can be read from the save area
36    fn check_space_fp(&self, num_fp: u32) -> bool {
37        self.fp_offset + num_fp * 16 <= 304
38    }
39
40    /// Read an argument from a general-purpose register
41    unsafe fn get_gp<T>(&mut self) -> T {
42        let n_gp = (mem::size_of::<T>() + 7) / 8;
43        assert!(self.check_space_gp(n_gp as u32));
44        let rv = ptr::read(self.reg_save_area.offset(self.gp_offset as isize / 8) as *const _);
45        self.gp_offset += (8 * n_gp) as u32;
46        rv
47    }
48
49    /// Read an argument from the overflow region
50    unsafe fn get_overflow<T>(&mut self) -> T {
51        let align = mem::align_of::<T>();
52        // 7. Align overflow_reg_area upwards to a 16-byte boundary if alignment
53        //    needed by T exceeds 8 bytes
54        let addr = self.overflow_arg_area as usize;
55        if align > 8 {
56            if addr % 16 != 0 {
57                self.overflow_arg_area = ((addr + 15) & !(16 - 1)) as *const _;
58            }
59        } else {
60            if addr % 8 != 0 {
61                self.overflow_arg_area = ((addr + 7) & !(8 - 1)) as *const _;
62            }
63        }
64        // 8. Fetch from overflow areay
65        let rv = ptr::read(self.overflow_arg_area as *const _);
66        self.overflow_arg_area =
67            ((self.overflow_arg_area as usize) + mem::size_of::<T>()) as *const _;
68        rv
69    }
70}
71
72
73impl<T: 'static> VaPrimitive for *const T {
74    unsafe fn get(list: &mut VaList) -> Self {
75        <usize>::get(list) as *const T
76    }
77}
78
79macro_rules! impl_va_prim_gp {
80    ($u: ty, $s: ty) => {
81        impl VaPrimitive for $u {
82            unsafe fn get(list: &mut VaList) -> Self {
83                let inner = list.inner();
84                // See the ELF AMD64 ABI document for a description of how this should act
85                if !inner.check_space_gp(1) {
86                    inner.get_overflow()
87                } else {
88                    inner.get_gp()
89                }
90            }
91        }
92        impl VaPrimitive for $s {
93            unsafe fn get(list: &mut VaList) -> Self {
94                mem::transmute(<$u>::get(list))
95            }
96        }
97    };
98}
99
100impl_va_prim_gp!{ usize, isize }
101impl_va_prim_gp!{ u64, i64 }
102impl_va_prim_gp!{ u32, i32 }
103//impl_va_prim!{ u16, i16 }
104//impl_va_prim!{ u8, i8 }
105
106macro_rules! impl_va_prim_fp {
107    ($t: ty) => {
108        impl VaPrimitive for $t {
109            unsafe fn get(list: &mut VaList) -> Self {
110                let inner = list.inner();
111                // See the ELF AMD64 ABI document for a description of how this should act
112                if !inner.check_space_fp(1) {
113                    inner.get_overflow()
114                } else {
115                    inner.get_gp()
116                }
117            }
118        }
119    }
120}
121impl_va_prim_fp!{ f32 }
122impl_va_prim_fp!{ f64 }
123