def get_model(config_path, target_flops, num_classes=1000, in_chans=3, activation="relu", se=False, bn_momentum=0.1): CONFIG = get_config(config_path) if CONFIG.cuda: device = torch.device("cuda" if ( torch.cuda.is_available() and CONFIG.ngpu > 0) else "cpu") else: device = torch.device("cpu") lookup_table = LookUpTable(CONFIG) supernet = Supernet(CONFIG) arch_param_nums = supernet.get_arch_param_nums() generator = get_generator(CONFIG, arch_param_nums) if CONFIG.generator_pretrained is not None: generator.load_state_dict( torch.load(CONFIG.generator_pretrained)["model"]) generator.to(device) prior_pool = PriorPool(lookup_table, arch_param_nums, None, None, None, CONFIG) # Sample architecture parameter ======================= prior = prior_pool.get_prior(target_flops) prior = prior.to(device) hardware_constraint = torch.tensor(target_flops).to(device) normalize_hardware_constraint = min_max_normalize(CONFIG.high_flops, CONFIG.low_flops, hardware_constraint) arch_param = generator(prior, normalize_hardware_constraint) arch_param = lookup_table.get_validation_arch_param(arch_param) gen_flops = lookup_table.get_model_flops(arch_param) logging.info("Generate flops : {}".format(gen_flops)) layers_config = lookup_table.decode_arch_param(arch_param) model = Model(l_cfgs=layers_config, dataset=CONFIG.dataset, classes=CONFIG.classes, activation=activation, se=se, bn_momentum=bn_momentum) cal_model_efficient(model, CONFIG) return model
if CONFIG.cuda: device = torch.device("cuda" if (torch.cuda.is_available() and CONFIG.ngpu > 0) else "cpu") else: device = torch.device("cpu") get_logger(CONFIG.log_dir) writer = get_writer(CONFIG.write_dir) #set_random_seed(CONFIG.seed) train_transform, val_transform, test_transform = get_transforms(CONFIG) train_dataset, val_dataset, test_dataset = get_dataset(train_transform, val_transform, test_transform, CONFIG) train_loader, val_loader, test_loader = get_dataloader(train_dataset, val_dataset, test_dataset, CONFIG) model = Supernet(CONFIG) lookup_table = LookUpTable(CONFIG) arch_param_nums = model.get_arch_param_nums() #generator = ConvGenerator(CONFIG.hc_dim, 1, CONFIG.hidden_dim) generator = get_generator(CONFIG, arch_param_nums) criterion = cross_encropy_with_label_smoothing if CONFIG.generator_pretrained is not None: logging.info("Loading model") model.load_state_dict(torch.load(CONFIG.model_pretrained)["model"]) generator.load_state_dict(torch.load(CONFIG.generator_pretrained)["model"]) generator.to(device) model.to(device)
nodes_num = calculate_nodes(CONFIG) adj_matrix = get_adj_matrix(nodes_num, CONFIG) adj_matrix = get_random_architecture(adj_matrix, CONFIG) if args.train_data: for i in range(100): total_top1 = 0 for j in range(2): if args.load_architecture: adj_matrix = architectures.iloc[i].values adj_matrix = adj_matrix.reshape(nodes_num, nodes_num) else: adj_matrix = get_adj_matrix(nodes_num, CONFIG) adj_matrix = get_random_architecture(adj_matrix, CONFIG) model = Supernet(adj_matrix, CONFIG) model = model.to(device) if (device.type == "cuda" and CONFIG.ngpu >= 1): model = nn.DataParallel(model, list(range(CONFIG.ngpu))) criterion = cross_encropy_with_label_smoothing cal_model_efficient(model, CONFIG) optimizer = get_optimizer(model, CONFIG.optim_state) scheduler = get_lr_scheduler(optimizer, len(train_loader), CONFIG) start_time = time.time() trainer = Trainer(criterion, optimizer, scheduler, None, device, CONFIG) best_top1 = trainer.train_loop(train_loader, test_loader, model) logging.info("Total training time : {:.2f}".format(time.time() - start_time))
get_logger(CONFIG.log_dir) writer = get_writer(args.title, CONFIG.write_dir) logging.info( "=================================== Experiment title : {} Start ===========================" .format(args.title)) train_transform, val_transform, test_transform = get_transforms(CONFIG) train_dataset, val_dataset, test_dataset = get_dataset( train_transform, val_transform, test_transform, CONFIG) train_loader, val_loader, test_loader = get_dataloader( train_dataset, test_dataset, test_dataset, CONFIG) lookup_table = LookUpTable(CONFIG) supernet = Supernet(CONFIG) arch_param_nums = supernet.get_arch_param_nums() generator = get_generator(CONFIG, arch_param_nums) criterion = cross_encropy_with_label_smoothing if CONFIG.generator_pretrained is not None: generator.load_state_dict( torch.load(CONFIG.generator_pretrained)["model"]) generator.to(device) prior_pool = PriorPool(lookup_table, arch_param_nums, None, None, None, CONFIG) # Sample architecture parameter =======================