def main(args):
    file_name = 'log_%s_%d' % ('gpus', args.gpu)
    logger = setup_logger(file_name,
                          args.save_dir,
                          args.gpu,
                          log_level='DEBUG',
                          filename='%s.txt' % file_name)
    logger.info(args)
    if args.search_space == 'nasbench_101':
        with open(nas_bench_101_all_data, 'rb') as fpkl:
            all_data = pickle.load(fpkl)
    else:
        raise NotImplementedError(
            f'The search space {args.search_space} does not support now!')

    for k in range(args.trails):
        seed = random_id_int(4)
        set_random_seed(seed)
        s_results_dict = defaultdict(list)
        k_results_dict = defaultdict(list)
        logger.info(
            f'======================  Trails {k} Begin Setting Seed to {seed} ==========================='
        )
        for budget in args.search_budget:
            train_data, test_data = data.dataset_split_idx(all_data, budget)
            print(
                f'budget: {budget}, train data size: {len(train_data)}, test data size: {len(test_data)}'
            )
            for epochs in args.train_iterations:
                if args.compare_supervised == 'T':
                    logger.info(
                        f'====  predictor type: SUPERVISED, load pretrain model False, '
                        f'search budget is {budget}. Training epoch is {epochs} ===='
                    )
                    spearman_corr, kendalltau_corr, duration = predictor_retrain_compare(
                        args,
                        'SS_RL',
                        train_data,
                        test_data,
                        flag=False,
                        train_epochs=epochs,
                        logger=logger)
                    if math.isnan(spearman_corr):
                        spearman_corr = 0
                    if math.isnan(kendalltau_corr):
                        kendalltau_corr = 0
                    s_results_dict[f'supervised#{budget}#{epochs}'].append(
                        spearman_corr)
                    k_results_dict[f'supervised#{budget}#{epochs}'].append(
                        kendalltau_corr)
                for predictor_type, dir in zip(args.predictor_list,
                                               args.load_dir):
                    logger.info(
                        f'====  predictor type: {predictor_type}, load pretrain model True. '
                        f'Search budget is {budget}. Training epoch is {epochs}. '
                        f'The model save dir is {dir.split("/")[-1][:-3]}  ===='
                    )
                    spearman_corr, kendalltau_corr, duration = predictor_retrain_compare(
                        args,
                        predictor_type,
                        train_data,
                        test_data,
                        flag=True,
                        load_dir=dir,
                        train_epochs=epochs,
                        logger=logger)
                    if math.isnan(spearman_corr):
                        spearman_corr = 0
                    if math.isnan(kendalltau_corr):
                        kendalltau_corr = 0
                    s_results_dict[predictor_type + '#' + str(budget) + '#' +
                                   str(epochs)].append(spearman_corr)
                    k_results_dict[predictor_type + '#' + str(budget) + '#' +
                                   str(epochs)].append(kendalltau_corr)
        file_id = random_id(6)
        save_path = os.path.join(
            args.save_dir,
            f'{file_id}_{args.predictor_list[0]}_{args.search_space.split("_")[-1]}_{args.gpu}_{k}.pkl'
        )
        with open(save_path, 'wb') as fp:
            pickle.dump(s_results_dict, fp)
            pickle.dump(k_results_dict, fp)
     default='nasbench_case2',
     choices=['nasbench_case1', 'nasbench_case2', 'nasbench_201'],
     help='nasbench')
 parser.add_argument('--algo_params',
                     type=str,
                     default='nasbench101_case2',
                     choices=[
                         'nasbench101_case1', 'nasbench101_case2',
                         'nasbench_201', 'scalar_prior',
                         'evaluation_compare', 'box_compare_case1',
                         'box_compare_case2', 'experiment'
                     ],
                     help='which algorithms to compare')
 parser.add_argument('--output_filename',
                     type=str,
                     default=random_id(64),
                     help='name of output files')
 parser.add_argument('--gpus',
                     type=int,
                     default=1,
                     help='The num of gpus used for search.')
 parser.add_argument('--loss_type',
                     type=str,
                     default="mae",
                     help='Loss used to train architecture.')
 parser.add_argument('--with_details',
                     type=str,
                     default="F",
                     help='Record detailed training procedure.')
 parser.add_argument(
     '--save_dir',
from nas_lib.utils.comm import random_id, setup_logger
from nas_lib.utils.utils_darts import compute_best_test_losses, compute_darts_test_losses
from nas_lib.algos_darts.build_open_algos import build_open_algos
from nas_lib.data_darts.build_ss import build_open_search_space_dataset
from nas_lib.configs import algo_params_open_domain
import pickle


if __name__ == "__main__":
    loss_type = 'mae'
    parser = argparse.ArgumentParser(description='Args for darts search space.')
    parser.add_argument('--search_space', type=str, default='darts', help='darts')
    parser.add_argument('--gpus', type=int, default=1, help='Number of gpus')
    parser.add_argument('--algorithm', type=str, default='gin_predictor',
                        choices=['gin_uncertainty_predictor', 'gin_predictor'], help='which parameters to use')
    parser.add_argument('--output_filename', type=str, default=random_id(64), help='name of output files')
    parser.add_argument('--node_nums', type=int, default=4, help='cell num')
    parser.add_argument('--log_level', type=str, default='DEBUG', help='information logging level')
    parser.add_argument('--seed', type=int, default=22, help='seed')
    parser.add_argument('--budget', type=int, default=100, help='searching budget.')
    parser.add_argument('--save_dir', type=str,
                        default='/home/albert_wei/Disk_A/train_output_npenas/npenas_open_domain_darts_2/',
                        help='name of save directory')
    args = parser.parse_args()

    # make save directory
    save_dir = args.save_dir
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
    if not os.path.exists(os.path.join(save_dir, 'model_pkl')):
        os.mkdir(os.path.join(save_dir, 'model_pkl'))