params.dict['loss_fn'] = args.loss_fn
 params.dict['finetune'] = args.finetune
 params.dict['collect'] = False
 params.dict['use_kfac'] = args.use_kfac
 params.dict['data_dir'] = args.data_dir
 if args.rl:
     params.dict['rl'] = args.rl
 params.dict['num_clusters'] = int(args.num_clusters)
 # Load the parameters from the dataset, that gives the size etc. into params
 json_path = os.path.join(args.data_dir, 'dataset_params.json')
 assert os.path.isfile(
     json_path
 ), "No json file found at {}, please generate tfrecords".format(json_path)
 params.update(json_path)
 # print(params.dict)
 params.dict['train_size'] = cal_train_size(params.train_size,
                                            args.train_range)
 last_global_epoch, global_epoch = 0, 0
 params.dict['sample_size'] = cal_train_size(params.train_size, '1')
 if args.combine:
     # train from scratch
     path_train_tfrecords = os.path.join(
         args.data_dir,
         'train-{}'.format(args.train_range) + args.tfrecords_filename)
     # path_eval_tfrecords = os.path.join(args.data_dir, 'validation' + args.tfrecords_filename)
     path_eval_tfrecords = os.path.join(
         args.data_dir,
         'validation-{}'.format(args.train_range) + args.tfrecords_filename)
     # Create the input data pipeline
     logging.info("Creating the datasets...")
     train_dataset = load_dataset_from_tfrecords(
         glob.glob(path_train_tfrecords))
 set_logger(os.path.join(args.model_dir, 'test{}.log'.format(args.log)))
 # # Get paths for tfrecords
 dataset = 'test'
 if args.combine:
     params.dict['test_size'] = params.dict['test_size'] * 2
     print('USING both Tests')
     logging.info("test size: {}".format(params.test_size))
     dataset += '*'
     path_eval_tfrecords = os.path.join(args.data_dir,
                                        dataset + args.tfrecords_filename)
     # Create the input data pipeline
     logging.info("Creating the dataset...")
     eval_dataset = load_dataset_from_tfrecords(
         glob.glob(path_eval_tfrecords))
 else:
     params.dict['test_size'] = cal_train_size(params.test_size,
                                               args.train_range)
     path_eval_tfrecords = os.path.join(
         args.data_dir,
         'test-{}'.format(args.train_range) + args.tfrecords_filename)
     # Create the input data pipeline
     logging.info("Creating the dataset...")
     # eval_dataset = load_dataset_from_tfrecords(path_eval_tfrecords)
     eval_dataset = load_dataset_from_tfrecords(
         glob.glob(path_eval_tfrecords))
 # Create iterator over the test set
 eval_inputs = input_fn('test', eval_dataset, params)
 logging.info("- done.")
 # Define the model
 logging.info("Creating the model...")
 # weak_learner_id = load_learner_id(os.path.join(args.model_dir, args.restore_from, 'learner.json'))[0]
 eval_model_spec = model_fn('test', eval_inputs, params, reuse=False)
 params.dict['use_kfac'] = args.use_kfac
 # Load the parameters from the dataset, that gives the size etc. into params
 json_path = os.path.join(args.data_dir, 'dataset_params.json')
 assert os.path.isfile(json_path), "No json file found at {}, \
 please generate tfrecords".format(json_path)
 params.update(json_path)
 global_epoch = 0
 args.restore_dir = 'best_weights'
 path_train_tfrecords = os.path.join(
     args.data_dir,
     'train-{}'.format(args.train_range) + args.tfrecords_filename)
 path_sample_train_tfrecords = os.path.join(
     args.data_dir, 'sample' + args.tfrecords_filename)
 training_files = glob.glob(path_train_tfrecords)
 if args.train_range == '[0-4]':
     params.dict['train_size'] = cal_train_size(params.train_size,
                                                args.train_range)
 else:
     params.dict['train_size'] = cal_train_size(
         params.train_size,
         args.train_range)  # sample.tfrecords is around the same size
     params.dict['train_size'] += cal_train_size(params.train_size, '1')
     training_files.append(path_sample_train_tfrecords)
     # params.dict['train_size'] = cal_train_size(params.train_size, '[0-' + args.train_range + ']')
 # Create the input data pipeline
 logging.info("Creating the datasets...")
 #########################################################
 params.dict['training_keep_prob'] = 1.0
 start_time = time.time()
 train_dataset = load_dataset_from_tfrecords(training_files)
 # Specify other parameters for the dataset and the model
 # Create the two iterators over the two datasets