async def compile_as_arr(self, ctx: CompileContext) -> Register: """Compile an array literal but inline the inner values.""" if (isinstance((await self.type).to, types.Array) and (not isinstance(self.first_elem, ArrayLiteral))): # Maybe just cast the internal type to a pointer. raise self.error( "Internal type is of array type but is not a literal.") if self.var is None: self.var = ctx.declare_unique_variable(await self.type) self.var.lvalue_is_rvalue = True base = ctx.get_register(types.Pointer.size) index = ctx.get_register(types.Pointer.size) ctx.emit(LoadVar(self.var, base)) ctx.emit(Mov(index, base)) elem_size = (await self.type).to.size for i in self.exprs: await i.compile_as_arr_helper(ctx, index) # NOTE: will we ever hit this? if self.float_size: ctx.emit(Binary.add(index, Immediate(elem_size * self.float_size))) return base
async def compile(self, ctx: CompileContext) -> Register: op = {"|": "or", "^": "xor", "&": "and"}[self.op] lhs, rhs = await self.compile_meta(ctx) res = ctx.get_register(lhs.size, self.sign) ctx.emit(Binary(lhs, rhs, op, res)) return res
def emit_savevar(cls, ctx: CompileContext, save: ir_object.SaveVar): var = save.variable # we need an extra register to store the temporary address temp_reg = ctx.get_register(2) if var.stack_offset is not None: # load from a stack address yield ir_object.Mov( temp_reg, encoder.SpecificRegisters.bas) # grab base pointer # load offset off of the base pointer if var.stack_offset < 0: instr = ir_object.Binary.sub else: instr = ir_object.Binary.add yield instr( temp_reg, ir_object.Immediate(abs(var.stack_offset), temp_reg.size)) elif var.global_offset is not None: yield ir_object.Mov(temp_reg, var.global_offset) else: raise InternalCompileException( f"Variable had no stack or global offset: {var}") # emit the dereference and store yield ir_object.Mov(ir_object.Dereference(temp_reg, save.from_.size), save.from_)
async def compile(self, ctx: CompileContext) -> Register: lhs_type, rhs_type = await self.left.type, await self.right.type if not rhs_type.implicitly_casts_to(lhs_type): raise self.error( f"Right argument to boolean operator: '{self.right.matched_region}'\n" f"of type: {rhs_type} cannot be casted to left argument: '{self.left.matched_region}'\n" f"of type {lhs_type}") if isinstance(lhs_type, Void): raise self.left.error("Void type argument to boolean operator") if isinstance(rhs_type, Void): raise self.right.error("Void type argument to boolean operator") r1: Register = await self.left.compile(ctx) ctx.emit(Compare(r1, Immediate(0, r1.size))) target = JumpTarget() op = {'or': CompType.neq, 'and': CompType.eq}[self.op] cond = ctx.get_register(1) ctx.emit(SetCmp(cond, op)) ctx.emit(Jump(target, cond)) r2: Register = (await self.right.compile(ctx)) if r2.size != r1.size: r2_ = r2.resize(r1.size, r1.sign) ctx.emit(Resize(r2, r2_)) r2 = r2_ ctx.emit(Mov(r1, r2)) ctx.emit(target) return r1
def emit_loadvar(cls, ctx: CompileContext, load: ir_object.LoadVar): # pylint: disable=unused-argument var = load.variable if var.lvalue_is_rvalue and load.lvalue: raise InternalCompileException( f"Variable: {var} is marked that it's rvalue " "is it's lvalue and a lvalue load was requested.") # we need an extra register to store the temporary address temp_reg = ctx.get_register(2) if var.stack_offset is not None: # load from a stack address yield ir_object.Mov( temp_reg, encoder.SpecificRegisters.bas) # grab base pointer # load offset off of the base pointer if var.stack_offset < 0: instr = ir_object.Binary.sub else: instr = ir_object.Binary.add yield instr( temp_reg, ir_object.Immediate(abs(var.stack_offset), temp_reg.size)) elif var.global_offset is not None: yield ir_object.Mov(temp_reg, var.global_offset) else: raise InternalCompileException( f"Variable had no stack or global offset: {var}") if var.lvalue_is_rvalue or load.lvalue: yield ir_object.Mov(load.to, temp_reg) else: yield ir_object.Mov(load.to, ir_object.Dereference(temp_reg, load.to.size))
async def compile(self, ctx: CompileContext) -> Register: lhs, rhs = await self.compile_meta(ctx) # if signed, emit signed comparison if self.sign: op = { '<=': CompType.leqs, '<': CompType.lts, '==': CompType.eq, '!=': CompType.neq, '>': CompType.gts, '>=': CompType.geqs }[self.op] else: op = { '<=': CompType.leq, '<': CompType.lt, '==': CompType.eq, '!=': CompType.neq, '>': CompType.gt, '>=': CompType.geq }[self.op] res = ctx.get_register(1) ctx.emit(Compare(lhs, rhs)) ctx.emit(SetCmp(res, op)) return res
async def compile(self, ctx: CompileContext) -> Register: lhs, rhs = await self.compile_meta(ctx) res = ctx.get_register(lhs.size, self.sign) op = {"+": "add", "-": "sub"}[self.op] if isinstance(await self.type, Pointer): # adding a pointer with an integer multiplies the integer side by the pointed to type (ptr_type, non_ptr) = ((self.left_type, rhs) if isinstance( self.left_type, Pointer) else (self.right_type, lhs)) if isinstance(ptr_type.to, Void): raise self.error("Cannot perform pointer arithmetic on pointer to void.") ctx.emit(Binary.mul(non_ptr, Immediate(ptr_type.to.size, non_ptr.size))) elif (op == "sub" and isinstance(self.left_type, Pointer) and isinstance(self.right_type, Pointer)): if isinstance(self.left_type.to, Void): raise self.error("Cannot perform pointer arithmetic on pointer to void.") if self.left_type != self.right_type: raise self.error("Both sides of pointer subtraction must be the same type.") # subtracting two pointers of equal type yields the number of elements between them ctx.emit(Binary(lhs, rhs, op, res)) ctx.emit(Binary.udiv(res, Immediate(self.left_type.to.size, res.size))) return res ctx.emit(Binary(lhs, rhs, op, res)) return res
async def compile(self, ctx: CompileContext) -> Register: my_type = await self.type ptr: Register = await self.arg.load_lvalue(ctx) size = my_type.size res, temp = (ctx.get_register(size, my_type.signed), ctx.get_register(size)) increment = 1 if isinstance(my_type, Pointer): increment = my_type.to.size ctx.emit(Mov(res, Dereference(ptr, res.size))) ctx.emit(Binary(res, Immediate(increment, size), self.op, temp)) ctx.emit(Mov(Dereference(ptr, temp.size), temp)) return res
async def compile(self, ctx: CompileContext) -> Register: var = await self.retrieve_variable() reg = ctx.get_register( var.type.size, var.type.signed) # explicitly use the type size not the var size ctx.emit(LoadVar(var, reg)) return reg
async def compile_as_ref(self, ctx: CompileContext) -> Register: """Compile to the array literal and return a reference to the start. This function is for when an array of references is the wanted outcome. This function will not work on array types. """ if self.var is None: self.var = ctx.declare_unique_variable(await self.type) self.var.lvalue_is_rvalue = True if (isinstance(self.first_elem, ArrayLiteral) and (not isinstance( (await self.type).to, types.Pointer))): raise self.error( "Cannot compile to references if internal array type is an array and not a pointer" ) base = ctx.get_register(types.Pointer.size) index = ctx.get_register(types.Pointer.size) ctx.emit(LoadVar(self.var, base)) ctx.emit(Mov(index, base)) elem_type = (await self.type).to for i in self.exprs: r = await i.compile(ctx) if r.size != elem_type.size: r0 = r.resize(elem_type.size, elem_type.signed) ctx.emit(Resize(r, r0)) r = r0 ctx.emit(Mov(Dereference(index, r.size), r)) ctx.emit(Binary.add(index, Immediate(r.size, types.Pointer.size))) if self.float_size: # fill in missing values ctx.emit( Binary.add( index, Immediate(elem_type.size * self.float_size, index.size))) return base
async def compile(self, ctx: CompileContext) -> Register: my_type = await self.type ptr = await self.load_lvalue(ctx) if isinstance(my_type, Void): raise self.error("Cannot dereference a void pointer.") reg = ctx.get_register(my_type.size, my_type.signed) ctx.emit(Mov(reg, Dereference(ptr, reg.size))) return reg
async def load_lvalue(self, ctx: CompileContext) -> Register: var = await self.retrieve_variable() # if we load the lvalue when requested, error here since this is disallowed if var.lvalue_is_rvalue: raise self.error( f"Variable '{self.name}' has no lvalue information.") reg = ctx.get_register(types.Pointer(self.var.type).size) ctx.emit(LoadVar(var, reg, lvalue=True)) return reg
async def compile(self, ctx: CompileContext) -> Register: fun_typ: Function = await self.fun.type if not isinstance(fun_typ, Function): raise self.error("Called object is not a function.") # if we have varargs, just make sure we have enough arguments to fill the required arguments if fun_typ.varargs: invalid_len = len(self.args) < len(fun_typ.args) else: invalid_len = len(self.args) != len(fun_typ.args) if invalid_len: raise self.error( "Incorrect number of args to function.\n" f"Expected {len(fun_typ.args)} got {len(self.args)}") # check that the argument types are valid # If this is a varargs function then the extra args wont be typechecked arg_types = [(i, (await i.type)) for i in self.args] # zip the types from the declaration with the types in the call expression arg_iterator = enumerate(zip(fun_typ.args, arg_types)) for arg_n, (lhs_type, (rhs_obj, rhs_type)) in arg_iterator: if not rhs_type.implicitly_casts_to(lhs_type): raise rhs_obj.error( f"Argument {arg_n} to call '{self.fun.identifier}' was of " f"type {rhs_type} instead of expected {lhs_type} and cannot be casted." ) params = [] for arg, typ in zip_longest(self.args, fun_typ.args): arg_reg = await arg.compile(ctx) if typ is not None and arg_reg.size != typ.size: arg_reg0 = arg_reg.resize(typ.size, typ.signed) ctx.emit(Resize(arg_reg, arg_reg0)) arg_reg = arg_reg0 params.append(arg_reg) fun: Register = await self.fun.compile(ctx) if isinstance(fun_typ.returns, Void): ctx.emit(Call(params, fun)) else: await self.size result_reg = ctx.get_register(fun_typ.returns.size, fun_typ.returns.signed) ctx.emit(Call(params, fun, result_reg)) return result_reg
async def compile(self, ctx: CompileContext) -> Register: my_type = await self.type ptr: Register = await self.load_lvalue(ctx) tmp = ctx.get_register(my_type.size, my_type.signed) increment = 1 # in the case of pointer increments, increment by the size of the pointer's underlying type if isinstance(my_type, Pointer): increment = my_type.to.size ctx.emit(Mov(tmp, Dereference(ptr, tmp.size))) ctx.emit(Binary(tmp, Immediate(increment, tmp.size), self.op)) ctx.emit(Mov(Dereference(ptr, tmp.size), tmp)) return tmp
async def compile(self, ctx: CompileContext) -> Register: ptr: Register = await self.load_lvalue(ctx) if ptr.size != Pointer.size: ptr0 = ptr.resize(Pointer.size) ctx.emit(Resize(ptr, ptr0)) ptr = ptr0 # indexes that leave an array type dont dereference if isinstance(await self.type, Array): return ptr my_type = await self.type res = ctx.get_register(my_type.size, my_type.signed) ctx.emit(Mov(res, Dereference(ptr, res.size))) return res
async def compile(self, ctx: CompileContext) -> Register: lhs, rhs = await self.compile_meta(ctx) if rhs.sign: raise self.right.error( "RHS operand to a binary shift op must be unsigned.") res = ctx.get_register(lhs.size, self.sign) if self.op == "<<": op = "shl" elif self.op == ">>" and lhs.sign: op = "sar" else: op = "shr" ctx.emit(Binary(lhs, rhs, op, res)) return res
async def compile(self, ctx: CompileContext) -> Register: lhs, rhs = await self.compile_meta(ctx) res = ctx.get_register(lhs.size, self.sign) if self.op == "*": op = "mul" elif self.op == "%": if self.sign: op = "imod" else: op = "umod" elif self.op == "/" and self.sign: op = "idiv" else: op = "udiv" ctx.emit(Binary(lhs, rhs, op, res)) return res
async def load_lvalue(self, ctx: CompileContext) -> Register: atype = await self.arg.type if not isinstance(atype, (Pointer, Array)): raise self.error(f"Incompatible type to array index base {atype}") # don't allow void pointer arithmetic # This 'would' fail later with an error about requesting a register of size 0 # But that would be less informative than catching the error now. if isinstance(atype.to, Void): raise self.error("Cannot perform pointer arithemetic on void pointers.") argument = await self.arg.compile(ctx) offset = await self.offset.compile(ctx) # get the size of the inner type if type.to is an array, # this will be the size of the internal array size = await self.size # make sure both the offset and the arguments are the correct size (size of a pointer) if argument.size != Pointer.size: argument0 = argument.resize(Pointer.size) ctx.emit(Resize(argument, argument0)) argument = argument0 if offset.size != Pointer.size: offset0 = offset.resize(Pointer.size) ctx.emit(Resize(offset, offset0)) offset = offset0 result = ctx.get_register(Pointer.size) # multiply to the size of the inner type of the pointer/ array ctx.emit(Binary.mul(offset, Immediate(size, offset.size))) ctx.emit(Binary.add(argument, offset, result)) return result
async def compile(self, ctx: CompileContext) -> Register: reg = ctx.get_register(self._type.size, self._type.signed) ctx.emit(Mov(reg, Immediate(self.lit, self._type.size))) return reg