def main(config): logger = config.get_logger('train') # setup data_loader instances data_loader = config.init_obj('data_loader', module_data) valid_data_loader = data_loader.valid_data_loader test_data_loader = data_loader.test_data_loader # build model architecture, then print to console global model for file, types in files_models.items(): for type in types: if config["arch"]["type"] == type: model = config.init_obj('arch', eval("module_arch_" + file)) logger.info(model) # get function handles of loss and metrics if config['loss']['type'] == 'FocalLoss2d': count = data_loader.count indices = data_loader.indices w = np.max(count[indices]) / count w[indices] = 0 only_scored_classes = config['trainer'].get('only_scored_class', False) if only_scored_classes: w = w[indices] weight = config['loss'].get('args', w) criterion = getattr(module_loss, 'FocalLoss2d')(weight=weight) else: criterion = getattr(module_loss, config['loss']['type']) # get function handles of metrics challenge_metrics = ChallengeMetric(config['data_loader']['args']['label_dir']) # challenge_metrics = ChallengeMetric2(num_classes=9) metrics = [getattr(challenge_metrics, met) for met in config['metrics']] # prepare model for testing device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = model.to(device) # Evaluater checkpoint_dir = config["arch"]["checkpoint_dir"] result_dir = config["arch"]["result_dir"] evaluater = Evaluater(model, criterion, metrics, config=config, test_data_loader=test_data_loader, checkpoint_dir=checkpoint_dir, result_dir=result_dir ) evaluater.evaluate() challenge_metrics.return_metric_list() evaluater.analyze(challenge_metrics)
def random_test(nn=NetworkUnit()): """Fix a network structure, give a setting randomly and get its score""" spl = Sampler() eva = Evaluater() spl.renewp(CONFIG_RAND_VECTOR) scores = [] for i in range(TEST_MAX_EPOCH): nn.set_cell(spl.sample(len(nn.graph_part))) score = eva.evaluate(nn) scores.append(score) return scores
class Experiment_struct: def __init__(self, nn, sample_size=5, budget=20, positive_num=2, r_p=0.99, uncertain_bit=3, add_num=20000): self.nn = nn self.spl = Sampler_struct(nn) self.opt = Optimizer(self.spl.get_dim(), self.spl.get_parametets_subscript()) # sample_size = 5 # the instance number of sampling in an iteration # budget = 20 # budget in online style # positive_num = 2 # the set size of PosPop # r_p = 0.99 # the probability of sample in model # uncertain_bit = 3 # the dimension size that is sampled randomly # set hyper-parameter for optimization, budget is useless for single step optimization self.opt.set_parameters(ss=sample_size, bud=budget, pn=positive_num, rp=r_p, ub=uncertain_bit) # clear optimization model self.opt.clear() self.budget = budget pros = self.opt.sample() self.spl.renewp(pros) self.eva = Evaluater() self.eva.add_data(add_num) self.opt_p_log = [] print(self.eva.max_steps) print(len(pros)) for i in range(self.budget): self.opt_p_log.append(pros) spl_list = self.spl.sample() self.nn.cell_list.append(spl_list) # score = np.random.random() # time_tmp = time.time() # score = self.eva.evaluate(self.nn, i, time_tmp) score = self.eva.evaluate(self.nn) # Updating optimization based on the obtained scores # Upadting pros in spl self.opt.update_model(pros, -score) pros = self.opt.sample() self.spl.renewp(pros) self.res_fea = self.opt.get_optimal().get_features() self.res_fit = self.opt.get_optimal().get_fitness() print('best:') print('features', self.res_fea) # pros print('fitness', self.res_fit) # scores