def main(): random_seed = config.random_seed subset_num = config.subset_num image_list = get_image_list(benign_dir, random_seed, subset_num) run_fga_attack(config) run_defense(image_list,config) run_eval(image_list,config)
def main(): random_seed = config.random_seed subset_num = config.subset_num image_list = get_image_list(benign_dir, random_seed, subset_num) #run_attack(image_list) # TODO: image_list needs to be passed into attack.py #run_defense(image_list,config) #run_eval(image_list,config) #run_bpda_attack(image_list, config) run_fga_attack(config) run_defense(image_list, config) run_eval(image_list, config)
def eval(test_filename, train_filename, param_filename, method, **kwparams): X, y = design_matrix(test_filename, train_filename) predictor = gen_predictor(param_filename) probs = predictor(X) return run_eval(probs, y, method, **kwparams)
def run_train(train_params=None, test_params=None): opt = TrainOptions().parse(train_params) testopt = TestOptions().parse(test_params) testopt.timestamp = opt.timestamp testopt.batch_size = 30 # model init model = SketchModel(opt) model.print_detail() writer = Writer(opt) # data load trainDataloader = load_data(opt, datasetType='train', permutation=opt.permutation, shuffle=opt.shuffle) validDataloader = load_data(opt, datasetType='valid') testDataloader = load_data(opt, datasetType='test') # train epoches # with torchsnooper.snoop(): ii = 0 min_test_avgloss = 100 min_test_avgloss_epoch = 0 for epoch in range(opt.epoch): for i, data in enumerate(trainDataloader): model.step(data) if ii % opt.plot_freq == 0: writer.plot_train_loss(model.loss, ii) if ii % opt.print_freq == 0: writer.print_train_loss(epoch, i, model.loss) ii += 1 model.update_learning_rate() if opt.plot_weights: writer.plot_model_wts(model, epoch) # test if epoch % opt.run_test_freq == 0: model.save_network('latest') loss_avg, P_metric, C_metric = run_eval(opt=testopt, loader=validDataloader, dataset='valid', write_result=False) writer.print_test_loss(epoch, 0, loss_avg) writer.plot_test_loss(loss_avg, epoch) writer.print_eval_metric(epoch, P_metric, C_metric) writer.plot_eval_metric(epoch, P_metric, C_metric) if loss_avg < min_test_avgloss: min_test_avgloss = loss_avg min_test_avgloss_epoch = epoch print( 'saving the model at the end of epoch {} with best avgLoss {}' .format(epoch, min_test_avgloss)) model.save_network('bestloss') testopt.which_epoch = 'latest' testopt.metric_way = 'wlen' loss_avg, P_metric, C_metric = run_eval(opt=testopt, loader=testDataloader, dataset='test', write_result=False) testopt.which_epoch = 'bestloss' testopt.metric_way = 'wlen' loss_avg_2, P_metric_2, C_metric_2 = run_eval(opt=testopt, loader=testDataloader, dataset='test', write_result=False) record_list = { 'p_metric': round(P_metric * 100, 2), 'c_metric': round(C_metric * 100, 2), 'loss_avg': round(loss_avg, 4), 'best_epoch': min_test_avgloss_epoch, 'p_metric_2': round(P_metric_2 * 100, 2), 'c_metric_2': round(C_metric_2 * 100, 2), 'loss_avg_2': round(loss_avg_2, 4), } writer.train_record(record_list=record_list) writer.close() return record_list, opt.timestamp
encoder, '{}wunet_2:256_3:256_64x16_encoder_{}'.format( './model/', train_iter)) torch.save( binarizer, '{}wunet_2:256_3:256_64x16_binarizer_{}'.format( './model/', train_iter)) torch.save( decoder, '{}wunet_2:256_3:256_64x16_decoder_{}'.format( './model/', train_iter)) set_eval(nets) eval_loaders = get_eval_loaders() for eval_name, eval_loader in eval_loaders.items(): eval_begin = time.time() eval_loss, mssim, psnr = run_eval(nets, eval_loader, args, output_suffix='iter%d' % train_iter) print('Evaluation @iter %d done in %d secs' % (train_iter, time.time() - eval_begin)) print('%s Loss : ' % eval_name + '\t'.join(['%.5f' % el for el in eval_loss.tolist()])) print('%s MS-SSIM: ' % eval_name + '\t'.join(['%.5f' % el for el in mssim.tolist()])) print('%s PSNR : ' % eval_name + '\t'.join(['%.5f' % el for el in psnr.tolist()])) set_train(nets) just_resumed = False
def eval(test_filename, train_filename, param_filename, method, **kwparams): X, y = design_matrix(test_filename, train_filename) predictor = gen_predictor(param_filename) probs = predictor(X) return run_eval(probs, y, method, **kwparams)
generator_args['mta_encoding'] = args.mta_encoding seq_len, inputs, labels = data_generator.generate_batches( **generator_args)[0] train_loss, _, outputs = sess.run( [model.loss, model.train_op, model.outputs], feed_dict={ inputs_placeholder: inputs, outputs_placeholder: labels, max_seq_len_placeholder: seq_len }) if args.curriculum == 'prediction_gain': loss, _ = run_eval(sess, model, inputs_placeholder, outputs_placeholder, max_seq_len_placeholder, data_generator, args, target_point, labels, outputs, inputs, [(seq_len, inputs, labels)]) v = train_loss - loss exp3s.update_w(v, seq_len) avg_errors_per_seq = data_generator.error_per_seq( labels, outputs, args.batch_size) if args.verbose: logger.info('Train loss ({0}): {1}'.format(i, train_loss)) logger.info('curriculum_point: {0}'.format(curriculum_point)) logger.info( 'Average errors/sequence: {0}'.format(avg_errors_per_seq)) logger.info('TRAIN_PARSABLE: {0},{1},{2},{3}'.format( i, curriculum_point, train_loss, avg_errors_per_seq))
break train_loop(batch, crops, ctx_frames, check_code_size=True) check_code_size = False if just_resumed or train_iter % args.eval_iters == 0: print('Start evaluation...') set_eval(nets) eval_loaders = get_eval_loaders() for eval_name, eval_loader in eval_loaders.items(): eval_begin = time.time() eval_loss, mssim, psnr, baseline_scores = run_eval( nets, eval_loader, args, output_suffix='iter%d' % train_iter) print('Evaluation @iter %d done in %d secs' % (train_iter, time.time() - eval_begin)) print('%s Loss : ' % eval_name + '\t'.join(['%.5f' % el for el in eval_loss.tolist()])) print('%s Baseline MS-SSIM: ' % eval_name + str(baseline_scores[0])) print('%s MS-SSIM: ' % eval_name + '\t'.join(['%.5f' % el for el in mssim.tolist()])) print('%s Baseline PSNR: ' % eval_name + str(baseline_scores[1])) print('%s PSNR : ' % eval_name + '\t'.join(['%.5f' % el for el in psnr.tolist()]))
def test(): args = parser.parse_args() print(args) print('Start evaluation...') # Load model encoder, binarizer, decoder, unet = get_models( args=args, v_compress=args.v_compress, bits=args.bits, encoder_fuse_level=args.encoder_fuse_level, decoder_fuse_level=args.decoder_fuse_level) nets = [encoder, binarizer, decoder] if unet is not None: nets.append(unet) # Using GPUS gpus = [int(gpu) for gpu in args.gpus.split(',')] if len(gpus) > 1: print("Using GPUs {}.".format(gpus)) for net in nets: net = nn.DataParallel(net, device_ids=gpus) # Get params from checkpoint names = ['encoder', 'binarizer', 'decoder', 'unet'] if args.load_model_name: print('Loading %s@iter %d' % (args.load_model_name, args.load_iter)) index = args.load_iter train_iter = args.load_iter else: print("please specify the model and iterration for evaluation") exit(1) for net_idx, net in enumerate(nets): if net is not None: # print(">>target net:") # print(net) name = names[net_idx] checkpoint_path = '{}/{}_{}_{:08d}.pth'.format( args.model_dir, args.load_model_name, name, index) print('Loading %s from %s...' % (name, checkpoint_path)) loaded_net = torch.load(checkpoint_path) # print(">>loaded:") # print(loaded_net) net.load_state_dict(loaded_net) set_eval(nets) eval_loaders = get_eval_loaders(args) for eval_name, eval_loader in eval_loaders.items(): eval_begin = time.time() eval_loss, mssim, psnr = run_eval(nets, eval_loader, args, output_suffix='iter%d' % train_iter) print('Evaluation @iter %d done in %d secs' % (train_iter, time.time() - eval_begin)) print('%s Loss : ' % eval_name + '\t'.join(['%.5f' % el for el in eval_loss.tolist()])) print('%s MS-SSIM: ' % eval_name + '\t'.join(['%.5f' % el for el in mssim.tolist()])) print('%s PSNR : ' % eval_name + '\t'.join(['%.5f' % el for el in psnr.tolist()]))