def main(args): file_name = 'log_%s_%d' % ('gpus', args.gpu) logger = setup_logger(file_name, args.save_dir, args.gpu, log_level='DEBUG', filename='%s.txt' % file_name) logger.info(args) if args.search_space == 'nasbench_101': with open(nas_bench_101_all_data, 'rb') as fpkl: all_data = pickle.load(fpkl) else: raise NotImplementedError( f'The search space {args.search_space} does not support now!') for k in range(args.trails): seed = random_id_int(4) set_random_seed(seed) s_results_dict = defaultdict(list) k_results_dict = defaultdict(list) logger.info( f'====================== Trails {k} Begin Setting Seed to {seed} ===========================' ) for budget in args.search_budget: train_data, test_data = data.dataset_split_idx(all_data, budget) print( f'budget: {budget}, train data size: {len(train_data)}, test data size: {len(test_data)}' ) for epochs in args.train_iterations: if args.compare_supervised == 'T': logger.info( f'==== predictor type: SUPERVISED, load pretrain model False, ' f'search budget is {budget}. Training epoch is {epochs} ====' ) spearman_corr, kendalltau_corr, duration = predictor_retrain_compare( args, 'SS_RL', train_data, test_data, flag=False, train_epochs=epochs, logger=logger) if math.isnan(spearman_corr): spearman_corr = 0 if math.isnan(kendalltau_corr): kendalltau_corr = 0 s_results_dict[f'supervised#{budget}#{epochs}'].append( spearman_corr) k_results_dict[f'supervised#{budget}#{epochs}'].append( kendalltau_corr) for predictor_type, dir in zip(args.predictor_list, args.load_dir): logger.info( f'==== predictor type: {predictor_type}, load pretrain model True. ' f'Search budget is {budget}. Training epoch is {epochs}. ' f'The model save dir is {dir.split("/")[-1][:-3]} ====' ) spearman_corr, kendalltau_corr, duration = predictor_retrain_compare( args, predictor_type, train_data, test_data, flag=True, load_dir=dir, train_epochs=epochs, logger=logger) if math.isnan(spearman_corr): spearman_corr = 0 if math.isnan(kendalltau_corr): kendalltau_corr = 0 s_results_dict[predictor_type + '#' + str(budget) + '#' + str(epochs)].append(spearman_corr) k_results_dict[predictor_type + '#' + str(budget) + '#' + str(epochs)].append(kendalltau_corr) file_id = random_id(6) save_path = os.path.join( args.save_dir, f'{file_id}_{args.predictor_list[0]}_{args.search_space.split("_")[-1]}_{args.gpu}_{k}.pkl' ) with open(save_path, 'wb') as fp: pickle.dump(s_results_dict, fp) pickle.dump(k_results_dict, fp)
parser.add_argument('--search_space', type=str, default='nasbench_101', choices=['nasbench_101'], help='The search space.') parser.add_argument('--with_g_func', type=bool, default=False, help='Using the g function after the backbone.') parser.add_argument('--trails', type=int, default=40, help='How many trails to carry out.') parser.add_argument('--seed', type=int, default=random_id_int(4), help='The seed value.') parser.add_argument( '--dataname', type=str, default='cifar10-valid', choices=['cifar10-valid', 'cifar10', 'cifar100', 'ImageNet16-120'], help='The evaluation of dataset of NASBench-201.') parser.add_argument( '--search_budget', type=list, default=[20, 50, 100, 150, 200], help= 'How many architectures are selected to train the neural predictor.') parser.add_argument( '--train_iterations',