def is_calibrated(board, block, loc, config, calib_obj): models = exp_delta_model_lib.get_models(board, \ ['block', 'loc','static_config','calib_obj'], \ block=block,loc=loc,config=config,calib_obj=calib_obj) if len(models) > 0: return True return False
def get_empirical_relation(self, instance, mode, port): block = self.dev.get_block(instance.block) if self.scale_method == ScaleMethod.IDEAL or \ not block.requires_calibration(): return self.get_ideal_relation(instance, mode, port) else: out = block.outputs[port] delta = out.deltas[mode] cfg = adplib.BlockConfig(instance) cfg.modes = [mode] exp_models = exp_delta_model_lib.get_models(self.dev, \ ['block','loc','output','static_config','calib_obj'], block=block, \ loc=instance.loc, \ output=out, \ config=cfg, \ calib_obj=self.calib_obj) if len(exp_models) == 0: print("[[WARN]] no experimental model %s (%s)" \ % (instance,mode)) return None if not exp_models[0].complete: print(cfg) print(exp_models[0]) raise Exception("experimental model must be complete") expr = delta.get_correctable_model(exp_models[0].params, low_level=False) return expr
def write_model_to_database(logger,pool,board,char_board): print("------- ") for idx,score in pool.meas_view.order_by_dominance(): print("%d] %s" % (idx,str(score))) idx,score = pool.meas_view.get_best() code_values = pool.pool[idx] hidden_codes = dict(zip(pool.variables, \ code_values)) new_config = pool.predictor.config.copy() for var,value in hidden_codes.items(): new_config[var].value = value exp_delta_model_lib.remove_models(board, \ ['block','loc','static_config','calib_obj'], \ block=pool.predictor.block, \ loc=pool.predictor.loc, \ config=new_config, \ calib_obj=llenums.CalibrateObjective.MODELBASED) for dataset in exp_profile_dataset_lib.get_datasets(char_board, \ ['block','loc','static_config','hidden_config'], \ block=pool.predictor.block, \ loc=pool.predictor.loc, \ config=new_config): exp_profile_dataset_lib.update(board,dataset) print("##### BEST DELTAS ######") print(new_config) print("---------- codes and score ------") print(hidden_codes) print(score) print("-------- delta models ----------------") for model in exp_delta_model_lib.get_models(char_board, \ ['block','loc','static_config','hidden_config'], \ block=pool.predictor.block, loc=pool.predictor.loc, \ config=new_config): model.calib_obj = llenums.CalibrateObjective.MODELBASED print(model) print("------------") exp_delta_model_lib.update(board,model) print("###################") print("") print("")
def profile_kernel(runtime,board,blk,cfg,calib_obj, \ min_points,max_points, \ grid_size,force=False,adp=None): for exp_delta_model in delta_model_lib.get_models(board, \ ['block','loc','static_config','calib_obj'], block=blk, \ loc=cfg.inst.loc, \ config=cfg, \ calib_obj=calib_obj): for method,n,m,reps in runtime_util.get_profiling_steps(exp_delta_model.output, \ exp_delta_model.config, \ grid_size, \ max_points=max_points): dataset = prof_dataset_lib.load(board,blk,cfg.inst.loc, \ exp_delta_model.output, \ exp_delta_model.config, \ method) print("<===========") print(cfg) print("===========>") print("output=%s method=%s" % (exp_delta_model.output.name, method)) print("relation=%s" % (exp_delta_model.output.relation[cfg.mode])) print("dataset npts=%d" % (len(dataset) if not dataset is None else 0)) print("n=%d m=%d reps=%d" % (n, m, reps)) print("---------") if not dataset is None and \ len(dataset) >= min_points and \ len(dataset) >= n*m*reps and \ not force: print("===> <%s> already profiled" % method) continue planner = planlib.SingleDefaultPointPlanner(blk, \ cfg.inst.loc, \ exp_delta_model.output, \ method, \ exp_delta_model.config, n=n, m=m, \ reps=reps) proflib.profile_all_hidden_states(runtime, board, planner, adp=adp)
def get_experimental_model(board,blk,loc,cfg,calib_obj): calib_codes = list(filter(lambda st: isinstance(st.impl, blocklib.BCCalibImpl), \ blk.state)) if len(calib_codes) == 0: return calib_cfgs = deltalib.get_models(board, \ ['block','loc','static_config','calib_obj'], \ block=blk,loc=loc,config=cfg,calib_obj=calib_obj) if len(calib_cfgs) == 0: print(cfg) raise Exception("not calibrated model_number=%s calib=%s" % (board.model_number, \ calib_obj)) calib_cfg = calib_cfgs[0] for st in calib_codes: cfg[st.name].value = calib_cfg.config[st.name].value return calib_cfg
def query_hidden_codes(logger,pool,board,blk,loc,cfg,hidden_codes,grid_size=9): new_cfg = cfg.copy() for var,value in hidden_codes.items(): int_value = blk.state[var].nearest_value(value) new_cfg[var].value = int_value for out in blk.outputs: exp_model = exp_delta_model_lib.ExpDeltaModel(blk,loc,out,new_cfg, \ calib_obj=llenums.CalibrateObjective.NONE) exp_delta_model_lib.update(board,exp_model) profile_block(logger,board,blk,loc,new_cfg,grid_size) update_model(logger,board,blk,loc,new_cfg) mdls = exp_delta_model_lib.get_models(board, \ ['block','loc','static_config','hidden_config'], \ block=blk, loc=loc, config=new_cfg) assert(len(mdls) > 0) vs = {} for mdl in mdls: vs[mdl.output.name] = mdl.variables() codes = dict(mdl.hidden_codes()) actual_obj = pool.objectives.compute(vs) pred_deltavars,_ = pool.predictor.predict(codes) pred_obj = pool.objectives.compute(pred_deltavars) print("samp %s" % (codes)) for expr,pred,act in zip(pool.objectives,pred_obj,actual_obj): print(" obj=%s pred=%f meas=%f" % (expr,pred,act)) assert(pool.has_code(codes)) pool .affix_label_to_code(codes,vs)
def is_calibrated(board, blk, loc, cfg, calib_obj): models = delta_model_lib.get_models(board, \ ['block','loc','static_config','calib_obj'], \ block=blk,loc=loc,config=cfg, calib_obj=calib_obj) return len(models) > 0
def _build_model(self, adp): def set_to_ideal_expr(): expr = self.block.outputs[self.port.name].relation[self.cfg.mode] integ_expr = mathutils.canonicalize_integration_operation(expr) self._init_cond = self._concretize(integ_expr.init_cond) self._deriv = self._concretize(integ_expr.deriv) self.error_model = None self.ic_error_model = None #expr = blk.outputs[port.name].relation[cfg.mode] out = self.block.outputs[self.port.name] models = deltalib.get_models(self.board, \ ['block','loc','output','static_config','calib_obj'], \ block=self.block, \ loc=self.loc, \ output=out, \ config=self.cfg, \ calib_obj=self.calib_obj) set_to_ideal_expr() if not self.enable_phys: return if len(models) == 0: print(self.cfg) raise Exception("no delta models for block") model = models[0] llcmdcomp.compute_expression_fields(self.board, \ adp, \ self.cfg, \ compensate=self.enable_compensate) llcmdcomp.compute_constant_fields(self.board, \ adp, \ self.cfg, \ compensate=self.enable_compensate) if not model is None and self.enable_phys: dataset = proflib.load(self.board, \ self.block, \ self.loc, \ out, \ model.config, \ method=llenums.ProfileOpType.INTEG_INITIAL_COND) spec = self.block.outputs[self.port.name].deltas[self.cfg.mode] expr = spec.get_model(model.params) \ if not self.correctable else spec.get_correctable_model(model.params, \ low_level=self.ll_correctable) if not dataset is None and self.enable_model_error: errors = model.errors(dataset, init_cond=True) surf = parsurflib.build_surface(block=self.block, \ cfg=self.cfg, \ port=out, \ dataset=dataset, \ output=errors, \ npts=self.npts) self.ic_error_model = surf validate_model(self, expr.init_cond, surf, dataset) else: print("[warn] no dataset for %s %s" \ % (self.block.name,self.loc)) integ_expr = mathutils.canonicalize_integration_operation(expr) self._init_cond = self._concretize(integ_expr.init_cond) #self._deriv = self._concretize(integ_expr.deriv) self.error_model = None
def _build_model(self, adp): if not self.block.outputs.has(self.port.name): raise Exception("port <%s> is not an output for <%s>" \ % (self.port.name,self.block.name)) out = self.block.outputs[self.port.name] models = deltalib.get_models(self.board, \ ['block','loc','output','static_config','calib_obj'], block=self.block, \ loc=self.loc, \ output=out, \ config=self.cfg, \ calib_obj=self.calib_obj) if len(models) == 0: model = None else: model = models[0] self.error_model = None # this llcmdcomp.compute_expression_fields(self.board,adp,self.cfg, \ compensate=self.enable_compensate) llcmdcomp.compute_constant_fields(self.board, \ adp, \ self.cfg, \ compensate=self.enable_compensate, \ debug=True) if not model is None and self.enable_phys: dataset = proflib.load(self.board, \ self.block, \ self.loc, \ out, \ cfg=model.config, \ method=llenums.ProfileOpType.INPUT_OUTPUT) spec = self.block.outputs[self.port.name].deltas[self.cfg.mode] expr = spec.get_model(model.params) if \ not self.correctable else spec.get_correctable_model(model.params, \ low_level=self.ll_correctable) if not dataset is None: errors = model.errors(dataset, init_cond=False) surf = parsurflib.build_surface(block=self.block, \ cfg=self.cfg, \ port=out, \ dataset=dataset, \ output=errors, \ npts=self.npts) self.error_model = surf validate_model(self, expr, surf, dataset) else: print("[warn] no dataset for %s %s" \ % (self.block.name,self.loc)) else: expr = self.block.outputs[self.port.name].relation[self.cfg.mode] self._expr = self._concretize(expr)