def save(self, save_path='./data/', file_name='population', file_format='csv', mode='EXPERIMENT'): create_exp_dir(save_path) table = {'ID': list(), 'Encoding string': list()} if mode in ['EXPERIMENT', 'DEBUG']: for i in range(self.obj_number): table['Fitness{0}'.format(i)] = list() for ID, ind in self.individuals.items(): table['ID'].append(ID) table['Encoding string'].append(ind.to_string()) fitness = ind.get_fitness() for i in range(self.obj_number): table['Fitness{0}'.format(i)].append(fitness[i]) elif mode == 'SURROGATE': table['SG_value'] = list() for ID, ind in self.individuals.items(): table['ID'].append(ID) table['Encoding string'].append(ind.to_string()) table['SG_value'].append(ind.get_fitnessSG()[0]) table = DataFrame(table) if file_format == 'csv': table.to_csv(os.path.join(save_path, '{0}.csv'.format(file_name)), index=False) elif file_format == 'json': table.to_json(os.path.join(save_path, '{0}.json'.format(file_name)), index=False) else: raise Exception('Error file format is specified!')
def save(self, s, results): create_exp_dir(os.path.join(self.save_root, 'models')) string, suffix = self.to_string(individual=s, results=results) with open( os.path.join(self.save_root, 'models', '{0}.{1}'.format(str(s.get_Id()), suffix)), 'w') as file: file.write(string)
def __init__(self, save_root='./Experiments/', mode='EXPERIMENT', **kwargs): create_exp_dir(save_root) self.save_root = save_root self.mode = mode create_exp_dir(save_root) for name in kwargs.keys(): exec("self.{0} = kwargs['{0}']".format(str(name)))
def train(self, dataset=None, train_epoch=50, newModel=False, run_time=0, batch_size=32, num_workers=0): if newModel: self.model = RankNet(self.model_size) parameters = filter(lambda p: p.requires_grad, self.model.parameters()) self.optimizer = torch.optim.Adam(parameters) create_exp_dir(self.save_model_path) if dataset is None: dataset = self.data_set data_queue = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) def rate_fun(outputs, labels, topk=(1, 1)): outputs = outputs.cpu() predicted = outputs.detach().numpy() predicted[predicted >= 0.5] = 1 predicted[predicted < 0.5] = 0 total = labels.size(0) correct = np.sum( predicted.reshape(1, -1) == labels.cpu().numpy().reshape( 1, -1)) return torch.from_numpy(np.array([correct / total ])), torch.from_numpy( np.array([correct / total])) # train model step = 0 for epoch in range(train_epoch): train_loss, train_top1, train_top5, step = train( data_queue, self.model, self.optimizer, step, self.criterion, rate_static=rate_fun) logging.info( "[Epoch {0:>4d}] [Train] loss {1:.3f} error Top1 {2:.2f} error Top5 {3:.2f}" .format(epoch, train_loss, train_top1, train_top5)) # save model torch.save( self.model.state_dict(), os.path.join(self.save_model_path, "Seq2Rank_run_{0:>2d}.ckpt".format(run_time)))
def save(self, save_path='./data/', file_name='population', file_format='csv', epsilon=None): create_exp_dir(save_path) if epsilon is not None: table = self.existed_model[self.existed_model['epsilon'] == epsilon] else: table = self.existed_model if file_format == 'csv': table.to_csv(os.path.join(save_path, '{0}.csv'.format(file_name)), index=False) elif file_format == 'json': table.to_json(os.path.join(save_path, '{0}.json'.format(file_name)), index=False) else: raise Exception('Error file format is specified!')
def save(self, save_path, file_name='q_table', file_format='csv'): create_exp_dir(save_path) start_action = [] start_p0 = [] start_p1 = [] end_action = [] end_p0 = [] end_p1 = [] q_values = [] for start_action_list in self.q.keys(): start_state = Q_State(param_list=start_action_list) for end_action_ix in range(len( self.q[start_action_list]['action'])): end_state = Q_State(param_list=self.q[start_action_list] ['action'][end_action_ix]) q_values.append( self.q[start_action_list]['q_value'][end_action_ix]) start_action.append(start_state.action) start_p0.append(start_state.param0) start_p1.append(start_state.param1) end_action.append(end_state.action) end_p0.append(end_state.param0) end_p1.append(end_state.param1) table = pd.DataFrame({ 'Start Action': start_action, 'Start Parameter 0': start_p0, 'Start Parameter 1': start_p1, 'End Action': end_action, 'End Parameter 0': end_p0, 'End Parameter 1': end_p1, 'Q Value': q_values }) if file_format == 'csv': table.to_csv(os.path.join(save_path, '{0}.csv'.format(file_name)), index=False) elif file_format == 'json': table.to_json(os.path.join(save_path, '{0}.json'.format(file_name)), index=False) else: raise Exception('Error file format is specified!')
parser.add_argument('--label_smooth', type=float, default=0.1, help='label smoothing') parser.add_argument('--gamma', type=float, default=0.97, help='learning rate decay') parser.add_argument('--momentum', type=float, default=0.9, help='momentum') parser.add_argument('--lr_max', type=float, default=0.025,help='init learning rate') parser.add_argument('--keep_prob', type=float, default=0.6) parser.add_argument('--drop_path_keep_prob', type=float, default=0.8) parser.add_argument('--l2_reg', type=float, default=3e-5) parser.add_argument('--decay_period', type=int, default=1, help='epochs between two learning rate decays') parser.add_argument('--use_aux_head', action='store_true',default=True, help='use auxiliary tower') # You can train imagenet on N GPUs using the train_NAONet_V2_imagenet.sh script with --batch_size=128*$N and --lr=0.1*$N args = parser.parse_args() create_exp_dir(args.save_root) args.save_root = os.path.join( args.save_root, 'FINAL_{0}_{1}'.format(args.dataset, time.strftime("%Y%m%d-%H"))) create_exp_dir(args.save_root, scripts_to_save=glob.glob('*_ImageNet.*')) # logging setting logger = logging.getLogger() logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s %(message)s") fh = logging.FileHandler(os.path.join(args.save_root, 'experiments.log')) fh.setLevel(logging.INFO) fh.setFormatter(formatter) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) ch.setFormatter(formatter) logger.addHandler(ch)
parser.add_argument('--surrogate_allowed', type=recoder.args_bool, default='True') parser.add_argument('--surrogate_path', type=str, default='./Res/PretrainModel/') parser.add_argument('--surrogate_premodel', type=str, default='2020_03_10_09_29_34') parser.add_argument('--surrogate_step', type=int, default=5) parser.add_argument('--surrogate_search_times', type=int, default=10) parser.add_argument('--surrogate_preserve_topk', type=int, default=5) args = parser.parse_args() recoder.create_exp_dir(args.save_root) args.save_root = os.path.join( args.save_root, 'EA_SEARCH_{0}'.format(time.strftime("%Y%m%d-%H-%S"))) recoder.create_exp_dir(args.save_root, scripts_to_save=glob.glob('*_EA.*')) torch.cuda.set_device(args.device) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) logger = logging.getLogger() logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s %(message)s") fh = logging.FileHandler(os.path.join(args.save_root, 'experiments.log'))
import sys sys.path.append('./') import argparse import numpy as np import logging import os from Evaluator.Utils.surrogate import auto_seq2seq from Evaluator.Utils.recoder import create_exp_dir create_exp_dir('./Res/') create_exp_dir('./Res/PretrainModel/') logger = logging.getLogger() logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s %(message)s") fh = logging.FileHandler( os.path.join('./Res/PretrainModel/', 'experiments.log')) fh.setLevel(logging.INFO) fh.setFormatter(formatter) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) ch.setFormatter(formatter) logger.addHandler(ch) logger.addHandler(fh) model = auto_seq2seq('./Res/PretrainModel/', './Res/PretrainModel/', './Res/PretrainModel/') model.train()
help='num of training epochs') parser.add_argument('--device', type=str, default="cuda") parser.add_argument('--lr_max', type=float, default=0.025, help='init learning rate') parser.add_argument('--lr_min', type=float, default=0.0, help='minimum learning rate') parser.add_argument('--momentum', type=float, default=0.9, help='momentum') parser.add_argument('--keep_prob', type=float, default=0.6) parser.add_argument('--drop_path_keep_prob', type=float, default=0.8) parser.add_argument('--use_aux_head', action='store_true', default=True, help='use auxiliary tower') parser.add_argument('--l2_reg', type=float, default=3e-4) args = parser.parse_args() create_exp_dir(args.save_root) args.save_root = os.path.join( args.save_root, 'FINAL_{0}_{1}'.format(args.dataset, time.strftime("%Y%m%d-%H%M"))) create_exp_dir(args.save_root, scripts_to_save=glob.glob('*_CIFAR.*')) # logging setting logger = logging.getLogger() logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s %(message)s") fh = logging.FileHandler(os.path.join(args.save_root, 'experiments.log')) fh.setLevel(logging.INFO) fh.setFormatter(formatter) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) ch.setFormatter(formatter) logger.addHandler(ch)
import sys sys.path.append('./') from nasbench import api import random from pandas import DataFrame import os from Evaluator.Utils import recoder nasbench_data_path = './Res/nasbench_full.tfrecord.1' # the script always run on project root train_set_portion = [0.001, 0.01, 0.1, 0.3, 0.5, 0.7, 0.9] total_samples = 423000 save_path = './Res/nasbench/' recoder.create_exp_dir(save_path) operation_token = { 'input':'2', 'output':'3', 'conv3x3-bn-relu': '4', 'conv1x1-bn-relu' : '5', 'maxpool3x3' : '6' } # load data nasbench423k = api.NASBench(nasbench_data_path) # hash values hash_models = list(nasbench423k.hash_iterator()) value_list = [] for hash_str in hash_models:
def save_q_table(self, save_path, file_name, file_format='csv'): save_path = os.path.join(save_path, 'Q_tables') create_exp_dir(save_path) self.q_table.save(save_path, file_name + '{0}'.format(self.q_value_update_times), file_format=file_format)
import sys sys.path.append('./') import argparse import numpy as np import logging import os from Evaluator.Utils.surrogate import auto_seq2seq from Evaluator.Utils.recoder import create_exp_dir create_exp_dir('./Res/') create_exp_dir('./Res/nasbench/') create_exp_dir('./Res/nasbench/seq2seq/') logger = logging.getLogger() logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s %(message)s") fh = logging.FileHandler( os.path.join('./Res/nasbench/seq2seq/', 'experiments.log')) fh.setLevel(logging.INFO) fh.setFormatter(formatter) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) ch.setFormatter(formatter) logger.addHandler(ch) logger.addHandler(fh) model = auto_seq2seq('./Res/nasbench/seq2seq/', './Res/nasbench/seq2seq/', './Res/nasbench/seq2seq/') model.train(epoch=100)
parser.add_argument('--lr_max', type=float, default=0.025) parser.add_argument('--epochs', type=int, default=25) # surrogate parser.add_argument('--surrogate_allowed', type=recoder.args_bool, default='True') parser.add_argument('--surrogate_path', type=str, default='./Res/PretrainModel/') parser.add_argument('--surrogate_premodel', type=str, default='2020_03_10_09_29_34') parser.add_argument('--surrogate_step', type=int, default=5) parser.add_argument('--surrogate_search_times', type=int, default=10) parser.add_argument('--surrogate_preserve_topk', type=int, default=5) args = parser.parse_args() recoder.create_exp_dir(args.save_root) args.save_root = os.path.join( args.save_root, 'EA_SEARCH_{0}'.format(time.strftime("%Y%m%d-%H-%S"))) recoder.create_exp_dir(args.save_root, scripts_to_save=glob.glob('*_EA.*')) # logging settings logger = logging.getLogger() logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s %(message)s") fh = logging.FileHandler(os.path.join(args.save_root, 'experiments.log')) fh.setLevel(logging.INFO) fh.setFormatter(formatter) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) ch.setFormatter(formatter) logger.addHandler(ch)