def build_parser(self): parser = get_parser("Augment final model of H^s-DAS config") parser.add_argument('--name', required=True) parser.add_argument('--dataset', type=str, default='imagenet', help='Imagenet') parser.add_argument('--batch_size', type=int, default=64, help='batch size') parser.add_argument('--lr', type=float, default=0.1, help='lr for weights') parser.add_argument('--momentum', type=float, default=0.9, help='momentum') parser.add_argument('--weight_decay', type=float, default=3e-5, help='weight decay') parser.add_argument('--grad_clip', type=float, default=5., help='gradient clipping for weights') parser.add_argument('--print_freq', type=int, default=200, help='print frequency') parser.add_argument('--gpus', default='0', help='gpu device ids separated by comma. ' '`all` indicates use all gpus.') parser.add_argument('--epochs', type=int, default=250, help='# of training epochs') # 600 parser.add_argument('--init_channels', type=int, default=36) parser.add_argument('--layers', type=int, default=14, help='# of layers') parser.add_argument('--seed', type=int, default=2, help='random seed') parser.add_argument('--workers', type=int, default=4, help='# of workers') parser.add_argument('--aux_weight', type=float, default=0.4, help='auxiliary loss weight') parser.add_argument('--cutout_length', type=int, default=16, help='cutout length') parser.add_argument('--drop_path_prob', type=float, default=0, help='drop path prob') parser.add_argument('--gamma', type=float, default=0.97, help='learning rate decay') parser.add_argument('--decay_period', type=int, default=1, help='epochs between two learning rate decays') parser.add_argument('--genotype', required=True, help='Cell genotype') parser.add_argument('--DAG', required=True, help='DAG genotype') parser.add_argument('--dist', action='store_true', help='use multiprocess_distributed training') parser.add_argument('--local_rank', default=0) parser.add_argument('--resume_path', type=str, default=None) parser.add_argument('--exclude_bias_and_bn', type=bool, default=True) parser.add_argument('--warmup_epochs', type=int, default=10) return parser
if self.args.likelihoodLearning: self.likelihood = neural_likelihood(self) if self.args.posteriorLearning: self.posterior = neural_posterior(self) # Define prior self.prior = Prior(self.sim) # Main loop for round in range(self.args.numRound): print(str(round)+"-th round of training start") self.train(round) def train(self, round): # Batch of simulation input sampling if self.args.likelihoodLearning: self.thetas = sample_from_likelihood(self, self.args.simulation_budget_per_round, round == 0).detach().to(self.args.device) else: self.thetas = sample_from_posterior(self, self.args.simulation_budget_per_round, round == 0).detach().to(self.args.device) # Simulation execution self.simulated_output = self.sim.get_simulation_result(self.thetas) print("simulation input, output shape : ", self.thetas.shape, self.simulated_output.shape) # Posterior inference train(self, round) if __name__ == "__main__": args = get_parser() inference = main(args)
def build_parser(self): # ======== cifar10 ============ parser = get_parser("Search cells of H-DAS config") parser.add_argument('--name', required=True) parser.add_argument('--dataset', type=str, default='cifar10', help='CIFAR10') parser.add_argument('--batch_size', type=int, default=64, help='batch size') parser.add_argument('--w_lr', type=float, default=0.025, help='lr for weights') parser.add_argument('--w_lr_min', type=float, default=0.001, help='minimum lr for weights') parser.add_argument('--w_momentum', type=float, default=0.9, help='momentum for weights') parser.add_argument('--w_weight_decay', type=float, default=3e-4, help='weight decay for weights') parser.add_argument('--w_grad_clip', type=float, default=5., help='gradient clipping for weights') parser.add_argument('--print_freq', type=int, default=50, help='print frequency') parser.add_argument('--gpus', default='0', help='gpu device ids separated by comma. ' '`all` indicates use all gpus.') parser.add_argument('--epochs', type=int, default=50, help='# of training epochs') parser.add_argument('--init_channels', type=int, default=16) parser.add_argument('--layers', type=int, default=8, help='# of layers') parser.add_argument('--seed', type=int, default=2, help='random seed') parser.add_argument('--workers', type=int, default=4, help='# of workers') parser.add_argument('--alpha_lr', type=float, default=3e-4, help='lr for alpha') parser.add_argument('--alpha_weight_decay', type=float, default=1e-3, help='weight decay for alpha') parser.add_argument('--local_rank', default=0) parser.add_argument('--resume_path', type=str, default=None) return parser
labels_np = np.concatenate(labels_list) predict = np.argmax(score, axis=-1) acc = np.sum(np.equal(labels_np, predict).astype(np.int32))/predict.shape[0] if islog: self._print_log('\tMean {} loss of {} batches: {}.'.format( ln, len(self.data_loader[ln]), np.mean(loss_value))) self._print_log('The accuracy over {} samples is {}'.format(predict.shape[0], acc)) else: print('\tMean {} loss of {} batches: {}.'.format( ln, len(self.data_loader[ln]), np.mean(loss_value))) print('The accuracy over {} samples is {}'.format(predict.shape[0], acc)) if __name__ == '__main__': parserfn = parser.get_parser() # load arg form config file print('Begin here') p = parserfn.parse_args() if p.config is not None: # parse the yaml with open(p.config, 'r') as f: default_args = yaml.load(f) key = vars(p).keys() for k in default_args.keys(): if k not in key: print('WRONG ARG: {}'.format(k)) assert (k in key)