def main(): args = cfg.parse_args() random.seed(args.random_seed) torch.manual_seed(args.random_seed) torch.cuda.manual_seed(args.random_seed) # set tf env _init_inception() inception_path = check_or_download_inception(None) create_inception_graph(inception_path) # weight init def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv2d') != -1: if args.init_type == 'normal': nn.init.normal_(m.weight.data, 0.0, 0.02) elif args.init_type == 'orth': nn.init.orthogonal_(m.weight.data) elif args.init_type == 'xavier_uniform': nn.init.xavier_uniform(m.weight.data, 1.) else: raise NotImplementedError('{} unknown inital type'.format( args.init_type)) elif classname.find('BatchNorm2d') != -1: nn.init.normal_(m.weight.data, 1.0, 0.02) nn.init.constant_(m.bias.data, 0.0) gen_net = Generator(bottom_width=args.bottom_width, gf_dim=args.gf_dim, latent_dim=args.latent_dim).cuda() dis_net = eval('models.' + args.model + '.Discriminator')(args=args).cuda() gen_net.apply(weights_init) dis_net.apply(weights_init) initial_gen_net_weight = torch.load(os.path.join(args.init_path, 'initial_gen_net.pth'), map_location="cpu") initial_dis_net_weight = torch.load(os.path.join(args.init_path, 'initial_dis_net.pth'), map_location="cpu") os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu exp_str = args.dir args.load_path = os.path.join('output', exp_str, 'pth', 'epoch{}.pth'.format(args.load_epoch)) # state dict: assert os.path.exists(args.load_path) checkpoint = torch.load(args.load_path) print('=> loaded checkpoint %s' % args.load_path) state_dict = checkpoint['generator'] gen_net = load_subnet(args, state_dict, initial_gen_net_weight).cuda() avg_gen_net = deepcopy(gen_net) # set optimizer gen_optimizer = torch.optim.Adam( filter(lambda p: p.requires_grad, gen_net.parameters()), args.g_lr, (args.beta1, args.beta2)) dis_optimizer = torch.optim.Adam( filter(lambda p: p.requires_grad, dis_net.parameters()), args.d_lr, (args.beta1, args.beta2)) gen_scheduler = LinearLrDecay(gen_optimizer, args.g_lr, 0.0, 0, args.max_iter * args.n_critic) dis_scheduler = LinearLrDecay(dis_optimizer, args.d_lr, 0.0, 0, args.max_iter * args.n_critic) # set up data_loader dataset = datasets.ImageDataset(args) train_loader = dataset.train # fid stat if args.dataset.lower() == 'cifar10': fid_stat = 'fid_stat/fid_stats_cifar10_train.npz' else: raise NotImplementedError('no fid stat for %s' % args.dataset.lower()) assert os.path.exists(fid_stat) # epoch number for dis_net args.max_epoch = args.max_epoch * args.n_critic if args.max_iter: args.max_epoch = np.ceil(args.max_iter * args.n_critic / len(train_loader)) # initial np.random.seed(args.random_seed) fixed_z = torch.cuda.FloatTensor( np.random.normal(0, 1, (25, args.latent_dim))) start_epoch = 0 best_fid = 1e4 args.path_helper = set_log_dir('logs', args.exp_name) logger = create_logger(args.path_helper['log_path']) #logger.info('=> loaded checkpoint %s (epoch %d)' % (checkpoint_file, start_epoch)) logger.info(args) writer_dict = { 'writer': SummaryWriter(args.path_helper['log_path']), 'train_global_steps': start_epoch * len(train_loader), 'valid_global_steps': start_epoch // args.val_freq, } gen_avg_param = copy_params(gen_net) # train loop for epoch in tqdm(range(int(start_epoch), int(args.max_epoch)), desc='total progress'): lr_schedulers = (gen_scheduler, dis_scheduler) if args.lr_decay else None train(args, gen_net, dis_net, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch, writer_dict, lr_schedulers) if epoch and epoch % args.val_freq == 0 or epoch == int( args.max_epoch) - 1: backup_param = copy_params(gen_net) load_params(gen_net, gen_avg_param) inception_score, fid_score = validate(args, fixed_z, fid_stat, gen_net, writer_dict) logger.info( 'Inception score: %.4f, FID score: %.4f || @ epoch %d.' % (inception_score, fid_score, epoch)) load_params(gen_net, backup_param) if fid_score < best_fid: best_fid = fid_score is_best = True else: is_best = False else: is_best = False avg_gen_net.load_state_dict(gen_net.state_dict()) load_params(avg_gen_net, gen_avg_param) save_checkpoint( { 'epoch': epoch + 1, 'model': args.model, 'gen_state_dict': gen_net.state_dict(), 'dis_state_dict': dis_net.state_dict(), 'avg_gen_state_dict': avg_gen_net.state_dict(), 'gen_optimizer': gen_optimizer.state_dict(), 'dis_optimizer': dis_optimizer.state_dict(), 'best_fid': best_fid, 'path_helper': args.path_helper }, is_best, args.path_helper['ckpt_path'])
''' Classifying ''' ''' Getting results on testing set ''' print '\n\n ## Testing...' y_pred = net.predict(dataset['x']['testing']) c_accuracy = accuracy_score(y_true=np.array(dataset['y']['testing']), y_pred=y_pred) c_report = classification_report(np.array(dataset['y']['testing']), y_pred) cm = confusion_matrix(y_true=np.array(dataset['y']['testing']), y_pred=y_pred, labels=net.labels) cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print '\n # Kitt net accuracy score on testing data:', colored(str(c_accuracy), 'green') print '\n # Kitt net classification report on testing data:\n', colored(str(c_report), 'cyan') print '\n # Kitt net confusion matrix on testing data:\n' print_cm(cm=cm, labels=net.labels) print '\n' print_cm(cm=cm_normalized, labels=net.labels, normed=True) terrain_ids = (1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15) terrain_types = load_params('terrain_types')[0] terrains = [terrain_types[str(t_id)] for t_id in terrain_ids] plt.matshow(cm_normalized, vmin=0, vmax=1) plt.colorbar() plt.xticks(range(14), terrains, rotation=45) plt.yticks(range(14), terrains) for t1_i, terrain1 in enumerate(terrains): for t2_i, terrain2 in enumerate(terrains): if cm_normalized[t1_i][t2_i] >= 0.01: plt.text(t2_i, t1_i, round(cm_normalized[t1_i][t2_i], 2), va='center', ha='center', fontsize=12) plt.show() #plt.savefig('../../thesis/img/amter_classification_nn_cm.eps', bbox_inches='tight', pad_inches=0.1) dataset.close()
def main(): args = cfg.parse_args() torch.cuda.manual_seed(args.random_seed) # set tf env _init_inception() inception_path = check_or_download_inception(None) create_inception_graph(inception_path) # import network gen_net = eval('models.' + args.model + '.Generator')(args=args).cuda() dis_net = eval('models.' + args.model + '.Discriminator')(args=args).cuda() # weight init def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv2d') != -1: if args.init_type == 'normal': nn.init.normal_(m.weight.data, 0.0, 0.02) elif args.init_type == 'orth': nn.init.orthogonal_(m.weight.data) elif args.init_type == 'xavier_uniform': nn.init.xavier_uniform(m.weight.data, 1.) else: raise NotImplementedError('{} unknown inital type'.format( args.init_type)) elif classname.find('BatchNorm2d') != -1: nn.init.normal_(m.weight.data, 1.0, 0.02) nn.init.constant_(m.bias.data, 0.0) gen_net.apply(weights_init) dis_net.apply(weights_init) # set optimizer gen_optimizer = torch.optim.Adam( filter(lambda p: p.requires_grad, gen_net.parameters()), args.g_lr, (args.beta1, args.beta2)) dis_optimizer = torch.optim.Adam( filter(lambda p: p.requires_grad, dis_net.parameters()), args.d_lr, (args.beta1, args.beta2)) gen_scheduler = LinearLrDecay(gen_optimizer, args.g_lr, 0.0, 0, args.max_iter * args.n_critic) dis_scheduler = LinearLrDecay(dis_optimizer, args.d_lr, 0.0, 0, args.max_iter * args.n_critic) # set up data_loader dataset = datasets.ImageDataset(args) train_loader = dataset.train # fid stat if args.dataset.lower() == 'cifar10': fid_stat = 'fid_stat/fid_stats_cifar10_train.npz' elif args.dataset.lower() == 'stl10': fid_stat = 'fid_stat/stl10_train_unlabeled_fid_stats_48.npz' else: raise NotImplementedError(f'no fid stat for {args.dataset.lower()}') assert os.path.exists(fid_stat) # epoch number for dis_net args.max_epoch = args.max_epoch * args.n_critic if args.max_iter: args.max_epoch = np.ceil(args.max_iter * args.n_critic / len(train_loader)) # initial fixed_z = torch.cuda.FloatTensor( np.random.normal(0, 1, (25, args.latent_dim))) gen_avg_param = copy_params(gen_net) start_epoch = 0 best_fid = 1e4 # set writer if args.load_path: print(f'=> resuming from {args.load_path}') assert os.path.exists(args.load_path) checkpoint_file = os.path.join(args.load_path, 'Model', 'checkpoint.pth') assert os.path.exists(checkpoint_file) checkpoint = torch.load(checkpoint_file) start_epoch = checkpoint['epoch'] best_fid = checkpoint['best_fid'] gen_net.load_state_dict(checkpoint['gen_state_dict']) dis_net.load_state_dict(checkpoint['dis_state_dict']) gen_optimizer.load_state_dict(checkpoint['gen_optimizer']) dis_optimizer.load_state_dict(checkpoint['dis_optimizer']) avg_gen_net = deepcopy(gen_net) avg_gen_net.load_state_dict(checkpoint['avg_gen_state_dict']) gen_avg_param = copy_params(avg_gen_net) del avg_gen_net args.path_helper = checkpoint['path_helper'] logger = create_logger(args.path_helper['log_path']) logger.info( f'=> loaded checkpoint {checkpoint_file} (epoch {start_epoch})') else: # create new log dir assert args.exp_name args.path_helper = set_log_dir('logs', args.exp_name) logger = create_logger(args.path_helper['log_path']) logger.info(args) writer_dict = { 'writer': SummaryWriter(args.path_helper['log_path']), 'train_global_steps': start_epoch * len(train_loader), 'valid_global_steps': start_epoch // args.val_freq, } # train loop for epoch in tqdm(range(int(start_epoch), int(args.max_epoch)), desc='total progress'): lr_schedulers = (gen_scheduler, dis_scheduler) if args.lr_decay else None train(args, gen_net, dis_net, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch, writer_dict, lr_schedulers) if epoch and epoch % args.val_freq == 0 or epoch == int( args.max_epoch) - 1: backup_param = copy_params(gen_net) load_params(gen_net, gen_avg_param) inception_score, fid_score = validate(args, fixed_z, fid_stat, gen_net, writer_dict) logger.info( f'Inception score: {inception_score}, FID score: {fid_score} || @ epoch {epoch}.' ) load_params(gen_net, backup_param) if fid_score < best_fid: best_fid = fid_score is_best = True else: is_best = False else: is_best = False avg_gen_net = deepcopy(gen_net) load_params(avg_gen_net, gen_avg_param) save_checkpoint( { 'epoch': epoch + 1, 'model': args.model, 'gen_state_dict': gen_net.state_dict(), 'dis_state_dict': dis_net.state_dict(), 'avg_gen_state_dict': avg_gen_net.state_dict(), 'gen_optimizer': gen_optimizer.state_dict(), 'dis_optimizer': dis_optimizer.state_dict(), 'best_fid': best_fid, 'path_helper': args.path_helper }, is_best, args.path_helper['ckpt_path']) del avg_gen_net
def main(): args = cfg.parse_args() torch.cuda.manual_seed(args.random_seed) torch.cuda.manual_seed_all(args.random_seed) np.random.seed(args.random_seed) random.seed(args.random_seed) torch.backends.cudnn.deterministic = True # set tf env _init_inception() inception_path = check_or_download_inception(None) create_inception_graph(inception_path) # epoch number for dis_net dataset = datasets.ImageDataset(args, cur_img_size=8) train_loader = dataset.train if args.max_iter: args.max_epoch = np.ceil(args.max_iter / len(train_loader)) else: args.max_iter = args.max_epoch * len(train_loader) args.max_epoch = args.max_epoch * args.n_critic # import network gen_net = eval('models.' + args.gen_model + '.Generator')(args=args).cuda() dis_net = eval('models.' + args.dis_model + '.Discriminator')(args=args).cuda() gen_net.set_arch(args.arch, cur_stage=2) # weight init def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv2d') != -1: if args.init_type == 'normal': nn.init.normal_(m.weight.data, 0.0, 0.02) elif args.init_type == 'orth': nn.init.orthogonal_(m.weight.data) elif args.init_type == 'xavier_uniform': nn.init.xavier_uniform_(m.weight.data, 1.) else: raise NotImplementedError('{} unknown inital type'.format( args.init_type)) elif classname.find('BatchNorm2d') != -1: nn.init.normal_(m.weight.data, 1.0, 0.02) nn.init.constant_(m.bias.data, 0.0) gen_net.apply(weights_init) dis_net.apply(weights_init) gpu_ids = [i for i in range(int(torch.cuda.device_count()))] gen_net = torch.nn.DataParallel(gen_net.to("cuda:0"), device_ids=gpu_ids) dis_net = torch.nn.DataParallel(dis_net.to("cuda:0"), device_ids=gpu_ids) gen_net.module.cur_stage = 0 dis_net.module.cur_stage = 0 gen_net.module.alpha = 1. dis_net.module.alpha = 1. # set optimizer if args.optimizer == "adam": gen_optimizer = torch.optim.Adam( filter(lambda p: p.requires_grad, gen_net.parameters()), args.g_lr, (args.beta1, args.beta2)) dis_optimizer = torch.optim.Adam( filter(lambda p: p.requires_grad, dis_net.parameters()), args.d_lr, (args.beta1, args.beta2)) elif args.optimizer == "adamw": gen_optimizer = AdamW(filter(lambda p: p.requires_grad, gen_net.parameters()), args.g_lr, weight_decay=args.wd) dis_optimizer = AdamW(filter(lambda p: p.requires_grad, dis_net.parameters()), args.g_lr, weight_decay=args.wd) gen_scheduler = LinearLrDecay(gen_optimizer, args.g_lr, 0.0, 0, args.max_iter * args.n_critic) dis_scheduler = LinearLrDecay(dis_optimizer, args.d_lr, 0.0, 0, args.max_iter * args.n_critic) # fid stat if args.dataset.lower() == 'cifar10': fid_stat = 'fid_stat/fid_stats_cifar10_train.npz' elif args.dataset.lower() == 'stl10': fid_stat = 'fid_stat/stl10_train_unlabeled_fid_stats_48.npz' elif args.fid_stat is not None: fid_stat = args.fid_stat else: raise NotImplementedError(f'no fid stat for {args.dataset.lower()}') assert os.path.exists(fid_stat) # initial fixed_z = torch.cuda.FloatTensor( np.random.normal(0, 1, (64, args.latent_dim))) gen_avg_param = copy_params(gen_net) start_epoch = 0 best_fid = 1e4 # set writer if args.load_path: print(f'=> resuming from {args.load_path}') assert os.path.exists(args.load_path) checkpoint_file = os.path.join(args.load_path) assert os.path.exists(checkpoint_file) checkpoint = torch.load(checkpoint_file) start_epoch = checkpoint['epoch'] best_fid = checkpoint['best_fid'] gen_net.load_state_dict(checkpoint['gen_state_dict']) dis_net.load_state_dict(checkpoint['dis_state_dict']) gen_optimizer.load_state_dict(checkpoint['gen_optimizer']) dis_optimizer.load_state_dict(checkpoint['dis_optimizer']) # avg_gen_net = deepcopy(gen_net) # avg_gen_net.load_state_dict(checkpoint['avg_gen_state_dict']) gen_avg_param = checkpoint['gen_avg_param'] # del avg_gen_net cur_stage = cur_stages(start_epoch, args) gen_net.module.cur_stage = cur_stage dis_net.module.cur_stage = cur_stage gen_net.module.alpha = 1. dis_net.module.alpha = 1. args.path_helper = checkpoint['path_helper'] else: # create new log dir assert args.exp_name args.path_helper = set_log_dir('logs', args.exp_name) logger = create_logger(args.path_helper['log_path']) logger.info(args) writer_dict = { 'writer': SummaryWriter(args.path_helper['log_path']), 'train_global_steps': start_epoch * len(train_loader), 'valid_global_steps': start_epoch // args.val_freq, } def return_states(): states = {} states['epoch'] = epoch states['best_fid'] = best_fid_score states['gen_state_dict'] = gen_net.state_dict() states['dis_state_dict'] = dis_net.state_dict() states['gen_optimizer'] = gen_optimizer.state_dict() states['dis_optimizer'] = dis_optimizer.state_dict() states['gen_avg_param'] = gen_avg_param states['path_helper'] = args.path_helper return states # train loop for epoch in range(start_epoch + 1, args.max_epoch): train( args, gen_net, dis_net, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch, writer_dict, fixed_z, ) backup_param = copy_params(gen_net) load_params(gen_net, gen_avg_param) fid_score = validate( args, fixed_z, fid_stat, epoch, gen_net, writer_dict, ) logger.info(f'FID score: {fid_score} || @ epoch {epoch}.') load_params(gen_net, backup_param) is_best = False if epoch == 1 or fid_score < best_fid_score: best_fid_score = fid_score is_best = True if is_best or epoch % 1 == 0: states = return_states() save_checkpoint(states, is_best, args.path_helper['ckpt_path'], filename=f'checkpoint_epoch_{epoch}.pth')
def main(): args = cfg.parse_args() random.seed(args.random_seed) torch.manual_seed(args.random_seed) torch.cuda.manual_seed(args.random_seed) np.random.seed(args.random_seed) # set tf env _init_inception() inception_path = check_or_download_inception(None) create_inception_graph(inception_path) # import netwo # weight init def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv2d') != -1: if args.init_type == 'normal': nn.init.normal_(m.weight.data, 0.0, 0.02) elif args.init_type == 'orth': nn.init.orthogonal_(m.weight.data) elif args.init_type == 'xavier_uniform': nn.init.xavier_uniform(m.weight.data, 1.) else: raise NotImplementedError('{} unknown inital type'.format( args.init_type)) elif classname.find('BatchNorm2d') != -1: nn.init.normal_(m.weight.data, 1.0, 0.02) nn.init.constant_(m.bias.data, 0.0) gen_net = eval('models.' + args.model + '.Generator')(args=args).cuda() dis_net = eval('models.' + args.model + '.Discriminator')(args=args).cuda() gen_net.apply(weights_init) dis_net.apply(weights_init) avg_gen_net = deepcopy(gen_net) initial_gen_net_weight = torch.load(os.path.join(args.init_path, 'initial_gen_net.pth'), map_location="cpu") initial_dis_net_weight = torch.load(os.path.join(args.init_path, 'initial_dis_net.pth'), map_location="cpu") assert id(initial_dis_net_weight) != id(dis_net.state_dict()) assert id(initial_gen_net_weight) != id(gen_net.state_dict()) # set optimizer gen_optimizer = torch.optim.Adam( filter(lambda p: p.requires_grad, gen_net.parameters()), args.g_lr, (args.beta1, args.beta2)) dis_optimizer = torch.optim.Adam( filter(lambda p: p.requires_grad, dis_net.parameters()), args.d_lr, (args.beta1, args.beta2)) gen_scheduler = LinearLrDecay(gen_optimizer, args.g_lr, 0.0, 0, args.max_iter * args.n_critic) dis_scheduler = LinearLrDecay(dis_optimizer, args.d_lr, 0.0, 0, args.max_iter * args.n_critic) # set up data_loader dataset = datasets.ImageDataset(args) train_loader = dataset.train # fid stat if args.dataset.lower() == 'cifar10': fid_stat = 'fid_stat/fid_stats_cifar10_train.npz' elif args.dataset.lower() == 'stl10': fid_stat = 'fid_stat/fid_stats_stl10_train.npz' else: raise NotImplementedError('no fid stat for %s' % args.dataset.lower()) assert os.path.exists(fid_stat) # epoch number for dis_net args.max_epoch = args.max_epoch * args.n_critic if args.max_iter: args.max_epoch = np.ceil(args.max_iter * args.n_critic / len(train_loader)) # initial fixed_z = torch.cuda.FloatTensor( np.random.normal(0, 1, (25, args.latent_dim))) start_epoch = 0 best_fid = 1e4 print('=> resuming from %s' % args.load_path) assert os.path.exists(args.load_path) checkpoint_file = args.load_path assert os.path.exists(checkpoint_file) checkpoint = torch.load(checkpoint_file) pruning_generate(gen_net, checkpoint['gen_state_dict']) dis_net.load_state_dict(checkpoint['dis_state_dict']) total = 0 total_nonzero = 0 for m in dis_net.modules(): if isinstance(m, nn.Conv2d): total += m.weight_orig.data.numel() mask = m.weight_orig.data.abs().clone().gt(0).float().cuda() total_nonzero += torch.sum(mask) conv_weights = torch.zeros(total) index = 0 for m in dis_net.modules(): if isinstance(m, nn.Conv2d): size = m.weight_orig.data.numel() conv_weights[index:( index + size)] = m.weight_orig.data.view(-1).abs().clone() index += size y, i = torch.sort(conv_weights) # thre_index = int(total * args.percent) # only care about the non zero weights # e.g: total = 100, total_nonzero = 80, percent = 0.2, thre_index = 36, that means keep 64 thre_index = total - total_nonzero thre = y[int(thre_index)] pruned = 0 print('Pruning threshold: {}'.format(thre)) zero_flag = False masks = OrderedDict() for k, m in enumerate(dis_net.modules()): if isinstance(m, nn.Conv2d): weight_copy = m.weight_orig.data.abs().clone() mask = weight_copy.gt(thre).float() masks[k] = mask pruned = pruned + mask.numel() - torch.sum(mask) m.weight_orig.data.mul_(mask) if int(torch.sum(mask)) == 0: zero_flag = True print( 'layer index: {:d} \t total params: {:d} \t remaining params: {:d}' .format(k, mask.numel(), int(torch.sum(mask)))) print('Total conv params: {}, Pruned conv params: {}, Pruned ratio: {}'. format(total, pruned, pruned / total)) pruning_generate(avg_gen_net, checkpoint['gen_state_dict']) see_remain_rate(gen_net) if not args.finetune_G: gen_weight = gen_net.state_dict() gen_orig_weight = rewind_weight(initial_gen_net_weight, gen_weight.keys()) gen_weight.update(gen_orig_weight) gen_net.load_state_dict(gen_weight) gen_avg_param = copy_params(gen_net) if args.finetune_D: dis_net.load_state_dict(checkpoint['dis_state_dict']) else: dis_net.load_state_dict(initial_dis_net_weight) for k, m in enumerate(dis_net.modules()): if isinstance(m, nn.Conv2d): m.weight_orig.data.mul_(masks[k]) orig_dis_net = eval('models.' + args.model + '.Discriminator')(args=args).cuda() orig_dis_net.load_state_dict(checkpoint['dis_state_dict']) orig_dis_net.eval() args.path_helper = set_log_dir('logs', args.exp_name + "_{}".format(args.percent)) logger = create_logger(args.path_helper['log_path']) #logger.info('=> loaded checkpoint %s (epoch %d)' % (checkpoint_file, start_epoch)) logger.info(args) writer_dict = { 'writer': SummaryWriter(args.path_helper['log_path']), 'train_global_steps': start_epoch * len(train_loader), 'valid_global_steps': start_epoch // args.val_freq, } # train loop for epoch in tqdm(range(int(start_epoch), int(args.max_epoch)), desc='total progress'): lr_schedulers = (gen_scheduler, dis_scheduler) if args.lr_decay else None see_remain_rate(gen_net) see_remain_rate_orig(dis_net) if not args.use_kd_D: train_with_mask(args, gen_net, dis_net, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch, writer_dict, masks, lr_schedulers) else: train_with_mask_kd(args, gen_net, dis_net, orig_dis_net, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch, writer_dict, masks, lr_schedulers) if epoch and epoch % args.val_freq == 0 or epoch == int( args.max_epoch) - 1: backup_param = copy_params(gen_net) load_params(gen_net, gen_avg_param) inception_score, fid_score = validate(args, fixed_z, fid_stat, gen_net, writer_dict, epoch) logger.info( 'Inception score: %.4f, FID score: %.4f || @ epoch %d.' % (inception_score, fid_score, epoch)) load_params(gen_net, backup_param) if fid_score < best_fid: best_fid = fid_score is_best = True else: is_best = False else: is_best = False avg_gen_net.load_state_dict(gen_net.state_dict()) load_params(avg_gen_net, gen_avg_param) save_checkpoint( { 'epoch': epoch + 1, 'model': args.model, 'gen_state_dict': gen_net.state_dict(), 'dis_state_dict': dis_net.state_dict(), 'avg_gen_state_dict': avg_gen_net.state_dict(), 'gen_optimizer': gen_optimizer.state_dict(), 'dis_optimizer': dis_optimizer.state_dict(), 'best_fid': best_fid, 'path_helper': args.path_helper }, is_best, args.path_helper['ckpt_path'])
def main(): args = cfg.parse_args() random.seed(args.random_seed) torch.manual_seed(args.random_seed) torch.cuda.manual_seed(args.random_seed) # set tf env _init_inception() inception_path = check_or_download_inception(None) create_inception_graph(inception_path) # weight init gen_net = eval('models.' + args.model + '.Generator')(args=args) dis_net = eval('models.' + args.model + '.Discriminator')(args=args) # weight init def weights_init(m): if isinstance(m, nn.Conv2d): if args.init_type == 'normal': nn.init.normal_(m.weight.data, 0.0, 0.02) elif args.init_type == 'orth': nn.init.orthogonal_(m.weight.data) elif args.init_type == 'xavier_uniform': nn.init.xavier_uniform(m.weight.data, 1.) else: raise NotImplementedError('{} unknown inital type'.format( args.init_type)) elif isinstance(m, nn.BatchNorm2d): nn.init.normal_(m.weight.data, 1.0, 0.02) nn.init.constant_(m.bias.data, 0.0) gen_net.apply(weights_init) dis_net.apply(weights_init) gen_net = gen_net.cuda() dis_net = dis_net.cuda() avg_gen_net = deepcopy(gen_net) initial_gen_net_weight = deepcopy(gen_net.state_dict()) initial_dis_net_weight = deepcopy(dis_net.state_dict()) assert id(initial_dis_net_weight) != id(dis_net.state_dict()) assert id(initial_gen_net_weight) != id(gen_net.state_dict()) # set optimizer gen_optimizer = torch.optim.Adam( filter(lambda p: p.requires_grad, gen_net.parameters()), args.g_lr, (args.beta1, args.beta2)) dis_optimizer = torch.optim.Adam( filter(lambda p: p.requires_grad, dis_net.parameters()), args.d_lr, (args.beta1, args.beta2)) gen_scheduler = LinearLrDecay(gen_optimizer, args.g_lr, 0.0, 0, args.max_iter * args.n_critic) dis_scheduler = LinearLrDecay(dis_optimizer, args.d_lr, 0.0, 0, args.max_iter * args.n_critic) # set up data_loader dataset = datasets.ImageDataset(args) train_loader = dataset.train # fid stat if args.dataset.lower() == 'cifar10': fid_stat = 'fid_stat/fid_stats_cifar10_train.npz' elif args.dataset.lower() == 'stl10': fid_stat = 'fid_stat/fid_stats_stl10_train.npz' else: raise NotImplementedError('no fid stat for %s' % args.dataset.lower()) assert os.path.exists(fid_stat) # epoch number for dis_net args.max_epoch = args.max_epoch * args.n_critic if args.max_iter: args.max_epoch = np.ceil(args.max_iter * args.n_critic / len(train_loader)) # initial np.random.seed(args.random_seed) fixed_z = torch.cuda.FloatTensor( np.random.normal(0, 1, (25, args.latent_dim))) start_epoch = 0 best_fid = 1e4 args.path_helper = set_log_dir('logs', args.exp_name + "_{}".format(args.percent)) logger = create_logger(args.path_helper['log_path']) # logger.info('=> loaded checkpoint %s (epoch %d)' % (checkpoint_file, start_epoch)) logger.info(args) writer_dict = { 'writer': SummaryWriter(args.path_helper['log_path']), 'train_global_steps': start_epoch * len(train_loader), 'valid_global_steps': start_epoch // args.val_freq, } print('=> resuming from %s' % args.load_path) assert os.path.exists(args.load_path) checkpoint_file = os.path.join(args.load_path, 'Model', 'checkpoint.pth') assert os.path.exists(checkpoint_file) checkpoint = torch.load(checkpoint_file) gen_net.load_state_dict(checkpoint['gen_state_dict']) torch.manual_seed(args.random_seed) pruning_generate(gen_net, (1 - args.percent), args.pruning_method) torch.manual_seed(args.random_seed) pruning_generate(avg_gen_net, (1 - args.percent), args.pruning_method) see_remain_rate(gen_net) if args.second_seed: dis_net.apply(weights_init) if args.finetune_D: dis_net.load_state_dict(checkpoint['dis_state_dict']) else: dis_net.load_state_dict(initial_dis_net_weight) gen_weight = gen_net.state_dict() gen_orig_weight = rewind_weight(initial_gen_net_weight, gen_weight.keys()) assert id(gen_weight) != id(gen_orig_weight) gen_weight.update(gen_orig_weight) gen_net.load_state_dict(gen_weight) gen_avg_param = copy_params(gen_net) if args.use_kd_D: orig_dis_net = eval('models.' + args.model + '.Discriminator')(args=args).cuda() orig_dis_net.load(checkpoint['dis_state_dict']) orig_dis_net.eval() # train loop for epoch in tqdm(range(int(start_epoch), int(args.max_epoch)), desc='total progress'): lr_schedulers = (gen_scheduler, dis_scheduler) if args.lr_decay else None see_remain_rate(gen_net) if not args.use_kd_D: train(args, gen_net, dis_net, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch, writer_dict, lr_schedulers) else: train_kd(args, gen_net, dis_net, orig_dis_net, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch, writer_dict, lr_schedulers) if epoch and epoch % args.val_freq == 0 or epoch == int( args.max_epoch) - 1: backup_param = copy_params(gen_net) load_params(gen_net, gen_avg_param) inception_score, fid_score = validate(args, fixed_z, fid_stat, gen_net, writer_dict, epoch) logger.info( 'Inception score: %.4f, FID score: %.4f || @ epoch %d.' % (inception_score, fid_score, epoch)) load_params(gen_net, backup_param) if fid_score < best_fid: best_fid = fid_score is_best = True else: is_best = False else: is_best = False avg_gen_net.load_state_dict(gen_net.state_dict()) load_params(avg_gen_net, gen_avg_param) save_checkpoint( { 'epoch': epoch + 1, 'model': args.model, 'gen_state_dict': gen_net.state_dict(), 'dis_state_dict': dis_net.state_dict(), 'avg_gen_state_dict': avg_gen_net.state_dict(), 'gen_optimizer': gen_optimizer.state_dict(), 'dis_optimizer': dis_optimizer.state_dict(), 'best_fid': best_fid, 'path_helper': args.path_helper }, is_best, args.path_helper['ckpt_path'])
:return: normalized signal with a signal noise """ global signal_noise_std ''' First, normalize the signal ''' normed_signal = norm_signal(signal=signal, the_min=sensors_ranges[sen][0], the_max=sensors_ranges[sen][1]) ''' Adding signal noise of defined std ''' noised_signal = add_signal_noise(signal=normed_signal, std=signal_noise_std) return noised_signal if __name__ == '__main__': terrain_types, all_sensors, sensors_ranges, noise_types, noise_params = \ load_params('terrain_types', 'sensors', 'sensors_ranges', 'noise_types', 'noise_params') args = parse_arguments() terrains_to_use = [terrain_types[str(i)] for i in [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15] if i not in args.rem_terrains] if not args.rem_terrains: terrains_flag = 'allt' else: terrains_flag = 'rt' for t_i in sorted(args.rem_terrains): terrains_flag += str(t_i) sensors_to_use = all_sensors if args.sensors == 'angle': sensors_to_use = all_sensors[:18] elif args.sensors == 'foot': sensors_to_use = all_sensors[18:]
help='Number of simulation runs') parser.add_argument('-t', '--terrains', type=int, default=range(1, 16), nargs='+', choices=range(1, 16), help='Terrains to be generated (integers)') parser.add_argument('-n', '--noise', type=str, default='no_noise', choices=noise_types, help='Terrain noise type') parser.add_argument('-nt', '--n_timesteps', type=int, default=100, help='Number of simulation steps') parser.add_argument('-g', '--gait', type=str, default='tripod', choices=['tripod'], help='Amos II gait') parser.add_argument('-sn', '--sim_noise', type=float, default=0.0, help='Amos II simulation noise') return parser.parse_args() if __name__ == '__main__': terrain_types, noise_types, noise_params = load_params('terrain_types', 'noise_types', 'noise_params') args = parse_arguments() noise_type, (noise_prefix, noise_param) = args.noise, noise_params[args.noise] gait = args.gait n_jobs = args.n_jobs n_timesteps = args.n_timesteps sim_noise = args.sim_noise terrains_i = sorted(args.terrains) terrains_to_use = [terrain_types[str(i)] for i in terrains_i] os.chdir('../../simulation/mbulinai22015-gorobots_edu-fork/practices/amosii') for i_terrain, terrain_type in zip(terrains_i, terrains_to_use): destination_dir = '../../../../data/'+noise_type+'/'+noise_prefix+terrain_type+'/' if not os.path.exists(destination_dir):
def main(): args = cfg_train.parse_args() torch.cuda.manual_seed(args.random_seed) # set tf env _init_inception() inception_path = check_or_download_inception(None) create_inception_graph(inception_path) # import network # gen_net = eval('models.' + args.gen_model + '.' + args.gen)(args=args).cuda() genotype_gen = eval('genotypes.%s' % args.arch_gen) gen_net = eval('models.' + args.gen_model + '.' + args.gen)( args, genotype_gen).cuda() # gen_net = eval('models.' + args.gen_model + '.' + args.gen)(args = args).cuda() if 'Discriminator' not in args.dis: genotype_dis = eval('genotypes.%s' % args.arch_dis) dis_net = eval('models.' + args.dis_model + '.' + args.dis)( args, genotype_dis).cuda() else: dis_net = eval('models.' + args.dis_model + '.' + args.dis)(args=args).cuda() # weight init def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv2d') != -1: if args.init_type == 'normal': nn.init.normal_(m.weight.data, 0.0, 0.02) elif args.init_type == 'orth': nn.init.orthogonal_(m.weight.data) elif args.init_type == 'xavier_uniform': nn.init.xavier_uniform(m.weight.data, 1.) else: raise NotImplementedError('{} unknown inital type'.format( args.init_type)) elif classname.find('BatchNorm2d') != -1: nn.init.normal_(m.weight.data, 1.0, 0.02) nn.init.constant_(m.bias.data, 0.0) gen_net.apply(weights_init) dis_net.apply(weights_init) # set up data_loader dataset = datasets.ImageDataset(args) train_loader = dataset.train val_loader = dataset.valid # set optimizer gen_optimizer = torch.optim.Adam( filter(lambda p: p.requires_grad, gen_net.parameters()), args.g_lr, (args.beta1, args.beta2)) dis_optimizer = torch.optim.Adam( filter(lambda p: p.requires_grad, dis_net.parameters()), args.d_lr, (args.beta1, args.beta2)) gen_scheduler = LinearLrDecay(gen_optimizer, args.g_lr, args.g_lr * 0.01, 260 * len(train_loader), args.max_iter * args.n_critic) dis_scheduler = LinearLrDecay(dis_optimizer, args.d_lr, args.d_lr * 0.01, 260 * len(train_loader), args.max_iter * args.n_critic) # fid stat if args.dataset.lower() == 'cifar10': fid_stat = 'fid_stat/fid_stats_cifar10_train.npz' elif args.dataset.lower() == 'stl10': fid_stat = 'fid_stat/stl10_train_unlabeled_fid_stats_48.npz' elif args.dataset.lower() == 'mnist': fid_stat = 'fid_stat/stl10_train_unlabeled_fid_stats_48.npz' else: raise NotImplementedError(f'no fid stat for {args.dataset.lower()}') assert os.path.exists(fid_stat) # epoch number for dis_net args.max_epoch = args.max_epoch * args.n_critic if args.max_iter: args.max_epoch = np.ceil(args.max_iter * args.n_critic / len(train_loader)) # initial fixed_z = torch.cuda.FloatTensor( np.random.normal(0, 1, (25, args.latent_dim))) fixed_z_sample = torch.cuda.FloatTensor( np.random.normal(0, 1, (args.eval_batch_size, args.latent_dim))) gen_avg_param = copy_params(gen_net) start_epoch = 0 best_fid = 1e4 best_fid_epoch = 0 is_with_fid = 0 std_with_fid = 0. best_is = 0 best_is_epoch = 0 fid_with_is = 0 best_dts = 0 # set writer if args.load_path: print(f'=> resuming from {args.load_path}') assert os.path.exists(args.load_path) checkpoint_file = os.path.join(args.load_path, 'Model', 'checkpoint.pth') assert os.path.exists(checkpoint_file) checkpoint = torch.load(checkpoint_file) start_epoch = checkpoint['epoch'] best_fid = checkpoint['best_fid'] gen_net.load_state_dict(checkpoint['gen_state_dict']) dis_net.load_state_dict(checkpoint['dis_state_dict']) gen_optimizer.load_state_dict(checkpoint['gen_optimizer']) dis_optimizer.load_state_dict(checkpoint['dis_optimizer']) avg_gen_net = deepcopy(gen_net) avg_gen_net.load_state_dict(checkpoint['avg_gen_state_dict']) gen_avg_param = copy_params(avg_gen_net) del avg_gen_net args.path_helper = checkpoint['path_helper'] logger = create_logger(args.path_helper['log_path']) logger.info( f'=> loaded checkpoint {checkpoint_file} (epoch {start_epoch})') else: # create new log dir assert args.exp_name args.path_helper = set_log_dir('logs', args.exp_name) logger = create_logger(args.path_helper['log_path']) logger.info(args) writer_dict = { 'writer': SummaryWriter(args.path_helper['log_path']), 'train_global_steps': start_epoch * len(train_loader), 'valid_global_steps': start_epoch // args.val_freq, } # calculate the FLOPs and param count of G input = torch.randn(args.gen_batch_size, args.latent_dim).cuda() flops, params = profile(gen_net, inputs=(input, )) flops, params = clever_format([flops, params], "%.3f") logger.info('FLOPs is {}, param count is {}'.format(flops, params)) # train loop dg_list = [] worst_lr = 1e-5 for epoch in tqdm(range(int(start_epoch), int(args.max_epoch)), desc='total progress'): lr_schedulers = (gen_scheduler, dis_scheduler) if args.lr_decay else None train(args, gen_net, dis_net, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch, writer_dict, args.consistent, lr_schedulers) if epoch and epoch % args.val_freq == 0 or epoch == int( args.max_epoch) - 1: backup_param = copy_params(gen_net) load_params(gen_net, gen_avg_param) inception_score, std, fid_score = validate(args, fixed_z, fid_stat, gen_net, writer_dict, args.path_helper, search=False) logger.info( f'Inception score: {inception_score}, FID score: {fid_score}+-{std} || @ epoch {epoch}.' ) load_params(gen_net, backup_param) if fid_score < best_fid: best_fid = fid_score best_fid_epoch = epoch is_with_fid = inception_score std_with_fid = std is_best = True else: is_best = False if inception_score > best_is: best_is = inception_score best_std = std fid_with_is = fid_score best_is_epoch = epoch else: is_best = False # save generated images if epoch % args.image_every == 0: gen_noise = torch.cuda.FloatTensor( np.random.normal(0, 1, (args.eval_batch_size, args.latent_dim))) # gen_images = gen_net(fixed_z_sample) # gen_images = gen_images.reshape(args.eval_batch_size, 32, 32, 3) # gen_images = gen_images.cpu().detach() gen_images = gen_net(fixed_z_sample).mul_(127.5).add_( 127.5).clamp_(0.0, 255.0).permute(0, 2, 3, 1).to('cpu', torch.uint8).numpy() fig = plt.figure() grid = ImageGrid(fig, 111, nrows_ncols=(10, 10), axes_pad=0) for x in range(args.eval_batch_size): grid[x].imshow(gen_images[x]) # cmap="gray") grid[x].set_xticks([]) grid[x].set_yticks([]) plt.savefig( os.path.join(args.path_helper['sample_path'], "epoch_{}.png".format(epoch))) plt.close() avg_gen_net = deepcopy(gen_net) # avg_gen_net = eval('models.'+args.gen_model+'.' + args.gen)(args, genotype_gen).cuda() # avg_gen_net = eval('models.' + args.gen_model + '.' + args.gen)(args=args).cuda() load_params(avg_gen_net, gen_avg_param) save_checkpoint( { 'epoch': epoch + 1, 'gen_model': args.gen_model, 'dis_model': args.dis_model, 'gen_state_dict': gen_net.state_dict(), 'dis_state_dict': dis_net.state_dict(), 'avg_gen_state_dict': avg_gen_net.state_dict(), 'gen_optimizer': gen_optimizer.state_dict(), 'dis_optimizer': dis_optimizer.state_dict(), 'best_fid': best_fid, 'path_helper': args.path_helper }, is_best, args.path_helper['ckpt_path']) del avg_gen_net logger.info( 'best_is is {}+-{}@{} epoch, fid is {}, best_fid is {}@{}, is is {}+-{}' .format(best_is, best_std, best_is_epoch, fid_with_is, best_fid, best_fid_epoch, is_with_fid, std_with_fid))
def main(): args = cfg.parse_args() random.seed(args.random_seed) torch.manual_seed(args.random_seed) torch.cuda.manual_seed(args.random_seed) np.random.seed(args.random_seed) torch.backends.cudnn.deterministic = False torch.backends.cudnn.benchmark = True os.environ['PYTHONHASHSEED'] = str(args.random_seed) # set tf env _init_inception() inception_path = check_or_download_inception(None) create_inception_graph(inception_path) # import network gen_net = eval('models.'+args.model+'.Generator')(args=args) dis_net = eval('models.'+args.model+'.Discriminator')(args=args) initial_gen_net_weight = torch.load(os.path.join(args.init_path, 'initial_gen_net.pth'), map_location="cpu") initial_dis_net_weight = torch.load(os.path.join(args.init_path, 'initial_dis_net.pth'), map_location="cpu") gen_net = gen_net.cuda() dis_net = dis_net.cuda() gen_net.load_state_dict(initial_gen_net_weight) dis_net.load_state_dict(initial_dis_net_weight) # set optimizer gen_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, gen_net.parameters()), args.g_lr, (args.beta1, args.beta2)) dis_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, dis_net.parameters()), args.d_lr, (args.beta1, args.beta2)) gen_scheduler = LinearLrDecay(gen_optimizer, args.g_lr, 0.0, 0, args.max_iter * args.n_critic) dis_scheduler = LinearLrDecay(dis_optimizer, args.d_lr, 0.0, 0, args.max_iter * args.n_critic) # set up data_loader dataset = datasets.ImageDataset(args) train_loader = dataset.train # fid stat if args.dataset.lower() == 'cifar10': fid_stat = 'fid_stat/fid_stats_cifar10_train.npz' elif args.dataset.lower() == 'stl10': fid_stat = 'fid_stat/fid_stats_stl10_train.npz' else: raise NotImplementedError('no fid stat for %s' % args.dataset.lower()) assert os.path.exists(fid_stat) # epoch number for dis_net args.max_epoch = args.max_epoch * args.n_critic if args.max_iter: args.max_epoch = np.ceil(args.max_iter * args.n_critic / len(train_loader)) # initial fixed_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (25, args.latent_dim))) gen_avg_param = copy_params(gen_net) start_epoch = 0 best_fid = 1e4 # set writer if args.load_path: print('=> resuming from %s' % args.load_path) assert os.path.exists(args.load_path) checkpoint_file = os.path.join(args.load_path, 'Model', 'checkpoint.pth') assert os.path.exists(checkpoint_file) checkpoint = torch.load(checkpoint_file) start_epoch = checkpoint['epoch'] best_fid = checkpoint['best_fid'] gen_net.load_state_dict(checkpoint['gen_state_dict']) dis_net.load_state_dict(checkpoint['dis_state_dict']) gen_optimizer.load_state_dict(checkpoint['gen_optimizer']) dis_optimizer.load_state_dict(checkpoint['dis_optimizer']) avg_gen_net = deepcopy(gen_net) avg_gen_net.load_state_dict(checkpoint['avg_gen_state_dict']) gen_avg_param = copy_params(avg_gen_net) del avg_gen_net args.path_helper = checkpoint['path_helper'] logger = create_logger(args.path_helper['log_path']) logger.info('=> loaded checkpoint %s (epoch %d)' % (checkpoint_file, start_epoch)) else: # create new log dir assert args.exp_name args.path_helper = set_log_dir('logs', args.exp_name) logger = create_logger(args.path_helper['log_path']) logger.info(args) writer_dict = { 'writer': SummaryWriter(args.path_helper['log_path']), 'train_global_steps': start_epoch * len(train_loader), 'valid_global_steps': start_epoch // args.val_freq, } # train loop switch = False for epoch in range(int(start_epoch), int(args.max_epoch)): lr_schedulers = (gen_scheduler, dis_scheduler) if args.lr_decay else None train(args, gen_net, dis_net, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch, writer_dict, lr_schedulers) if epoch and epoch % args.val_freq == 0 or epoch == int(args.max_epoch)-1: backup_param = copy_params(gen_net) load_params(gen_net, gen_avg_param) inception_score, fid_score = validate(args, fixed_z, fid_stat, gen_net, writer_dict, epoch) logger.info('Inception score: %.4f, FID score: %.4f || @ epoch %d.' % (inception_score, fid_score, epoch)) load_params(gen_net, backup_param) if fid_score < best_fid: best_fid = fid_score is_best = True else: is_best = False else: is_best = False avg_gen_net = deepcopy(gen_net) load_params(avg_gen_net, gen_avg_param) save_checkpoint({ 'epoch': epoch + 1, 'model': args.model, 'gen_state_dict': gen_net.state_dict(), 'dis_state_dict': dis_net.state_dict(), 'avg_gen_state_dict': avg_gen_net.state_dict(), 'gen_optimizer': gen_optimizer.state_dict(), 'dis_optimizer': dis_optimizer.state_dict(), 'best_fid': best_fid, 'path_helper': args.path_helper, 'seed': args.random_seed }, is_best, args.path_helper['ckpt_path']) del avg_gen_net
def main(): args = cfg.parse_args() torch.cuda.manual_seed(args.random_seed) # set tf env _init_inception() inception_path = check_or_download_inception(None) create_inception_graph(inception_path) # import network gen_net = eval("models_search." + args.gen_model + ".Generator")(args=args).cuda() dis_net = eval("models_search." + args.dis_model + ".Discriminator")(args=args).cuda() gen_net.set_arch(args.arch, cur_stage=2) dis_net.cur_stage = 2 # weight init def weights_init(m): classname = m.__class__.__name__ if classname.find("Conv2d") != -1: if args.init_type == "normal": nn.init.normal_(m.weight.data, 0.0, 0.02) elif args.init_type == "orth": nn.init.orthogonal_(m.weight.data) elif args.init_type == "xavier_uniform": nn.init.xavier_uniform(m.weight.data, 1.0) else: raise NotImplementedError("{} unknown inital type".format( args.init_type)) elif classname.find("BatchNorm2d") != -1: nn.init.normal_(m.weight.data, 1.0, 0.02) nn.init.constant_(m.bias.data, 0.0) gen_net.apply(weights_init) dis_net.apply(weights_init) # set optimizer gen_optimizer = torch.optim.Adam( filter(lambda p: p.requires_grad, gen_net.parameters()), args.g_lr, (args.beta1, args.beta2), ) dis_optimizer = torch.optim.Adam( filter(lambda p: p.requires_grad, dis_net.parameters()), args.d_lr, (args.beta1, args.beta2), ) gen_scheduler = LinearLrDecay(gen_optimizer, args.g_lr, 0.0, 0, args.max_iter * args.n_critic) dis_scheduler = LinearLrDecay(dis_optimizer, args.d_lr, 0.0, 0, args.max_iter * args.n_critic) # set up data_loader dataset = datasets.ImageDataset(args) train_loader = dataset.train # fid stat if args.dataset.lower() == "cifar10": fid_stat = "fid_stat/fid_stats_cifar10_train.npz" elif args.dataset.lower() == "stl10": fid_stat = "fid_stat/stl10_train_unlabeled_fid_stats_48.npz" else: raise NotImplementedError(f"no fid stat for {args.dataset.lower()}") assert os.path.exists(fid_stat) # epoch number for dis_net args.max_epoch = args.max_epoch * args.n_critic if args.max_iter: args.max_epoch = np.ceil(args.max_iter * args.n_critic / len(train_loader)) # initial fixed_z = torch.cuda.FloatTensor( np.random.normal(0, 1, (25, args.latent_dim))) gen_avg_param = copy_params(gen_net) start_epoch = 0 best_fid = 1e4 # set writer if args.load_path: print(f"=> resuming from {args.load_path}") assert os.path.exists(args.load_path) checkpoint_file = os.path.join(args.load_path, "Model", "checkpoint.pth") assert os.path.exists(checkpoint_file) checkpoint = torch.load(checkpoint_file) start_epoch = checkpoint["epoch"] best_fid = checkpoint["best_fid"] gen_net.load_state_dict(checkpoint["gen_state_dict"]) dis_net.load_state_dict(checkpoint["dis_state_dict"]) gen_optimizer.load_state_dict(checkpoint["gen_optimizer"]) dis_optimizer.load_state_dict(checkpoint["dis_optimizer"]) avg_gen_net = deepcopy(gen_net) avg_gen_net.load_state_dict(checkpoint["avg_gen_state_dict"]) gen_avg_param = copy_params(avg_gen_net) del avg_gen_net args.path_helper = checkpoint["path_helper"] logger = create_logger(args.path_helper["log_path"]) logger.info( f"=> loaded checkpoint {checkpoint_file} (epoch {start_epoch})") else: # create new log dir assert args.exp_name args.path_helper = set_log_dir("logs", args.exp_name) logger = create_logger(args.path_helper["log_path"]) logger.info(args) writer_dict = { "writer": SummaryWriter(args.path_helper["log_path"]), "train_global_steps": start_epoch * len(train_loader), "valid_global_steps": start_epoch // args.val_freq, } # train loop for epoch in tqdm(range(int(start_epoch), int(args.max_epoch)), desc="total progress"): lr_schedulers = (gen_scheduler, dis_scheduler) if args.lr_decay else None train( args, gen_net, dis_net, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch, writer_dict, lr_schedulers, ) if epoch and epoch % args.val_freq == 0 or epoch == int( args.max_epoch) - 1: backup_param = copy_params(gen_net) load_params(gen_net, gen_avg_param) inception_score, fid_score = validate(args, fixed_z, fid_stat, gen_net, writer_dict) logger.info( f"Inception score: {inception_score}, FID score: {fid_score} || @ epoch {epoch}." ) load_params(gen_net, backup_param) if fid_score < best_fid: best_fid = fid_score is_best = True else: is_best = False else: is_best = False avg_gen_net = deepcopy(gen_net) load_params(avg_gen_net, gen_avg_param) save_checkpoint( { "epoch": epoch + 1, "gen_model": args.gen_model, "dis_model": args.dis_model, "gen_state_dict": gen_net.state_dict(), "dis_state_dict": dis_net.state_dict(), "avg_gen_state_dict": avg_gen_net.state_dict(), "gen_optimizer": gen_optimizer.state_dict(), "dis_optimizer": dis_optimizer.state_dict(), "best_fid": best_fid, "path_helper": args.path_helper, }, is_best, args.path_helper["ckpt_path"], ) del avg_gen_net
""" import argparse import matplotlib.pyplot as plt from functions import load_params, norm def parse_arguments(): parser = argparse.ArgumentParser(description='Plots chosen terrains parameters and makes a simple analysis.') parser.add_argument('-t', '--terrains', type=int, default=range(1, 16), nargs='+', choices=range(1, 16), help='Terrains to be plotted (integers)') return parser.parse_args() if __name__ == '__main__': terrains, qualities, ranges, env = load_params('terrain_types', 'terrain_qualities', 'qualities_ranges', 'env') args = parse_arguments() terrains_to_use = [terrains[str(i)] for i in sorted(args.terrains)] ''' Terrains Parameters ''' plt.matshow([[norm(env[terrain][quality], ranges[quality][1]) for terrain in terrains_to_use] for quality in qualities], vmin=0.0, vmax=1.0) plt.xticks(range(len(terrains_to_use)), terrains_to_use, rotation=45) plt.yticks(range(len(qualities)), qualities) plt.colorbar() plt.suptitle('Chosen Terrains Parameters') for t_i, terrain in enumerate(terrains_to_use): for q_i, quality in enumerate(qualities): plt.text(t_i, q_i, norm(env[terrain][quality], ranges[quality][1]), va='center', ha='center') plt.savefig('../../results/png/terrains_parameters.png', bbox_inches='tight')
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ''' Run our simulation of the system. ''' from functions import load_params from node import Node from service_provider import ServiceProvider from adversaries import load_adversary from observer import Observer if __name__ == '__main__': params = load_params() T = [[] for _ in params['percent_adv']] for p, p_adv in enumerate(params['percent_adv']): print(f"Running sim with {p_adv:%} adversaries...") nodes = [] for i in range(params['number_nodes']): if i < p_adv * params['number_nodes']: nodes.append(load_adversary(params['adv_type'])(i, params)) else: nodes.append(Node(i, params)) observer = Observer(params['number_nodes'] + 1, params) nodes.append(observer) sp = ServiceProvider() for e in range(params['epochs']): for n in nodes[:-1]: n.transact(nodes, sp)
for i, structure in enumerate(kitt_mnist['structure_mean']): kitt_mnist['structure_mean'][i] = [int(nn) for nn in structure.tolist()] print '\n ## Loaded mnist pruned nets ('+str(n_obs)+' obs) ## --------------------' print '@ Pruning steps:\t', max_len print '@ Mean n synapses:\t', kitt_mnist['n_syn_mean'][-1] print '@ Mean structure:\t', kitt_mnist['structure_mean'][-1] print '@ Mean accuracy:\t', kitt_mnist['acc_mean'][-1] print '----------------------------------------------------' return kitt_mnist if __name__ == '__main__': sensors_ranges, terrain_types, all_sensors = load_params('sensors_ranges', 'terrain_types', 'sensors') # loading examples terrains = [terrain_types[str(i)] for i in [6, 8, 15]] data = read_data(noises=['no_noise'], terrains=terrains, sensors=all_sensors, n_samples=10) samples = dict() for terrain in terrains: samples[terrain] = [[] for i in range(len(data['no_noise'][terrain][all_sensors[0]]))] for sensor in all_sensors: for i_sample, sample_terrain in enumerate(data['no_noise'][terrain][sensor]): samples[terrain][i_sample] += prepare_signal(signal=sample_terrain[10:40 + 10], sen=sensor) nets = load_amter(na='nn') #nets = load_mnist() net = nets['net'][-1] structure = net[0]