def trans_all(self): for c in self.codes: self.handlers[c.code_t](self, c) if not hasattr(self, 'funcname') or not self.funcname: return # a macro # handle the function start out = self.out self.out = [] self.emit('# function %s begins' % self.funcname) if self.funcname != 'main': self.emit('%s:' % util.mangle(self.funcname)) else: # is main, dont mangle self.emit('main:') self.emit('# prologue') if self.used_regs: # only save reg if required self.emit('addi', '$sp', '$sp', len(self.used_regs) * -4) for i, used_reg in enumerate(self.used_regs): self.emit('sw', used_reg, '%d($sp)' % ( (i - len(self.used_regs) + 1) * -4)) # and the pop of the vars for i, arg in enumerate(reversed(self.arg_used)): self.emit('addi', self[arg.x], '$a%d' % i, 0) self.out.extend(out)
def _call(vm, code): vm.has_called_f = True for i, arg in enumerate(vm.arg_to_push): if immp(arg.y): vm.emit('addi', '$a%d' % i, '$zero', vm[arg.y]) else: vm.emit('addi', '$a%d' % i, vm[arg.y], 0) vm.arg_to_push[:] = [] vm.emit('jal', util.mangle(code.f)) if code.x is not None: # else: dont return vm.emit('addi', vm[code.x], '$v0', 0)
def _defmacro(env, args): """no type checking""" mac_name = env.curr_evaling[1][0].n # get the macro name env = Env(env) env.emit(tac.mac_begin('%s' % mac_name)) env.emit(tac.label(util.mangle(mac_name))) # entrance fbody = args[2:] for mac in fbody: if mac[0].n != 'asm': raise CompileTimeError, 'defmacro -- body must be all asm' ret_val = env.exec_block(fbody) # must return one val ret_val.vt = 'omni' # can be cast to any type env.emit(tac.mac_end('%s' % mac_name)) return ret_val
def set_scan_items(self): scan = self.scan outputs = scan[OUTPUT] if type(outputs) is dict: outputs = [outputs] if self.is_saving(): # get labrad connection cxn = yield connectAsync() # get handle to data vault data_vault = cxn.data_vault # list of folders (trunk to leaf) for dataset directory save_dir = scan.get(SAVE_DIR,[]) # go to scans dir in dv yield data_vault.cd(data_vault_dir+save_dir,True) independent_variables = [ '%s [%s]' % ( scan[INPUT].get(NAME,'input'), scan[INPUT].get(UNITS,'arb') ) ] dependent_variables = [ '%s [%s]' % ( output.get(NAME,'output'), output.get(UNITS,'arb') ) for output in outputs ] save_name = scan.get(SAVE_NAME,None) default_name = text=scan.get(NAME,'') if save_name is None: save_name, result = QtGui.QInputDialog.getText( self.__parent, 'enter dataset name', 'enter title for data vault dataset', text = default_name ) if not result: save_name = default_name # create new dataset yield data_vault.new( str(save_name), independent_variables, dependent_variables ) # make note of dataset creation yield data_vault.add_parameter( 'time', get_datetime() ) self.data_vault = data_vault # instantiate scan input object self.input = INPUTS[ scan[INPUT][CLASS] ]( **mangle(scan[INPUT].get(ARGS,{})) ) # note initial position if we are planning to return to it after scan if self.is_returning(): self._return_input = yield self.input._get_input() # instantiate scan output object self.outputs = [ OUTPUTS[ output[CLASS] ]( **mangle(output.get(ARGS,{})) ) for output in outputs ] # intialize x and y values self.x_data = [] self.y_datas = [] # clear any stuff that happens to be in plot (TODO: do we ever scan the same object twice?) for item in self.allChildItems(): self.removeItem(item) # if optimizing or have multiple sources, \ # add legend to distinguish different sources if self.is_optimizing() or len(outputs) > 1: self.addLegend() if self.is_optimizing(): # initialize fit curve self.fit_curve = PlotDataItem( name='fit (%s)' % outputs[ self.optimize_axis ].get( NAME, 'output %d' % (self.optimize_axis+1) ), pen={'color':'BBB','width':2} ) # initialize data curves self.curves = [ self.plot( name=output.get(NAME,'output %d' % (index + 1)), pen=None, symbolSize=5, symbolPen=None, symbolBrush=('F55','5F5','55F','FF5','5FF','F5F')[index % 6] ) for index, output in enumerate(outputs) ]