Beispiel #1
0
    print("The best test l2 is {:.3f}".format(test_l2))
    print(
        "=============================================================================="
    )


if __name__ == '__main__':
    file = dataset_name + ".json"
    args = utils.load_params(file)
    if w > 0:
        log_file = "attack" + '_' + target_name + '_{}_{}.txt'.format(
            target_mode, w)
    else:
        log_file = "attack" + '_' + target_name + '_{}.txt'.format(target_mode)
    logger = utils.Tee(os.path.join(save_log_path, log_file), 'w')
    utils.print_params(args)

    train_file = args['dataset']['test_file']
    test_file = args['dataset']['train_file']
    trainloader = utils.init_dataloader(args, train_file, mode="train")
    testloader = utils.init_dataloader(args, test_file, mode="test")

    eval_model = utils.get_model(args, "VGG16", "reg")
    eval_model = torch.nn.DataParallel(eval_model).to(device)
    utils.load_state_dict(eval_model, eval_path)

    save_img_path = os.path.join(
        save_img_path, "attack_{}_{}".format(target_name, target_mode))
    os.makedirs(save_img_path, exist_ok=True)
    main(args, trainloader, testloader, eval_model)
Beispiel #2
0
def main(args):
  print(args)
  random.seed(args.seed)
  np.random.seed(args.seed)
  torch.manual_seed(args.seed)
  track_running_stats = (args.device != 'xla')
  if args.device == 'cuda':
    assert torch.cuda.is_available()
    torch.backends.cudnn.benchmark = True
    print('Enable cuDNN heuristics!')

  device = (torch.device(args.device)
            if args.device in {'cpu', 'cuda'} else xm.xla_device())
  if args.device == 'cuda' and args.amp:
    scaler = amp.GradScaler()
  else:
    scaler = None

  train_loader, test_loader = init_dataloader(args)

  B = len(args.lr) if args.hfta else 0

  model = Resnet18(num_classes=10, B=B,
                   track_running_stats=track_running_stats).to(device)
  if not args.convergence_test:
    if B == 0 and args.save_init_model:
      torch.save(model, args.model_dir)
      print("model saved! exiting...")
      exit(0)
    if args.load_init_model:
      model.init_load([args.model_dir] * max(1, B))
  print('B={} lr={}'.format(B, args.lr))

  optimizer = get_hfta_optim_for(optim.Adadelta, B=B)(
      model.parameters(),
      lr=args.lr if B > 0 else args.lr[0],
  )

  all_losses = []
  epoch_timer = EpochTimer()
  for epoch in range(args.epochs):
    epoch_timer.epoch_start(epoch)
    num_samples_per_epoch, epoch_losses = train(args,
                                                model,
                                                device,
                                                train_loader,
                                                optimizer,
                                                epoch,
                                                B,
                                                save_loss=args.convergence_test,
                                                scaler=scaler)
    epoch_timer.epoch_stop(num_samples_per_epoch)
    if args.convergence_test:
      all_losses.append(epoch_losses)
    print('Epoch {} took {} s!'.format(epoch, epoch_timer.epoch_latency(epoch)))

  if args.convergence_test:
    all_losses = torch.cat(all_losses, 0).transpose(0, 1).cpu().numpy()
    print(all_losses.shape)
    loss_dict = {}
    for i, lr in enumerate(args.lr):
      loss_dict[lr] = all_losses[i]
    data = pd.DataFrame(loss_dict)
    data.to_csv(os.path.join(args.outf, "convergence.csv"))
  else:
    if args.device == 'xla':
      print(met.metrics_report())
    if args.outf is not None:
      epoch_timer.to_csv(args.outf)

  if args.eval:
    test(model, device, test_loader, B)
  print('All jobs Finished!')
Beispiel #3
0
    '''
    pbar = tqdm(total=len(img_list), desc='computing mean pixel value for training dataset...')
    for img_name in img_list:
        if img_name.endswith(".png"):
            path = img_path + "/" + img_name
            img = Image.open(path)
            x = np.array(img, dtype=np.float32) / 255.
            mpv += x.mean(axis=(0,1))
            pbar.update()
    mpv /= len(img_list)
    pbar.close()
    '''
    mpv = np.array([0.5061, 0.4254, 0.3828])
    mpv = torch.tensor(mpv.astype(np.float32).reshape(1, 3, 1, 1)).to(device)

    data_set, data_loader = init_dataloader(args, file_path, bsize)
    test_set, test_loader = init_dataloader(args, test_file_path, bsize)

    # ================================================
    # Training Phase 1
    # ================================================
    lr = 0.0002
    G = CompletionNetwork().cuda()
    g_optimizer = Adam(G.parameters())

    G = torch.nn.DataParallel(G)  #.cuda()

    DL = DLWGAN().cuda()
    DG = DGWGAN().cuda()

    DL = torch.nn.DataParallel(DL)  #.cuda()
Beispiel #4
0
    net.eval()
    cnt, acc = 0, 0
    for img, label in dataloader:
        img, label = img.to(device), label.to(device)
        label = label.view(-1)
        out_prob = net(img)
        out_label = torch.argmax(out_prob, dim=1).view(-1)
        acc += torch.sum(out_label == label).item()
        cnt += img.size(0)

    return acc * 100.0 / cnt


root_path = "./result/align"
model_path = os.path.join(root_path, "result_models/VGG16_reg.tar")
model_name = "VGG16"
dataset_name = "facescrub"

if __name__ == '__main__':
    file = dataset_name + ".json"
    args = utils.load_params(file)
    file_path = args['dataset']['file_path']
    print("Use GPU:{}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
    test_file = os.path.join(file_path, "test_list.txt")
    testloader = utils.init_dataloader(args, test_file, mode="test")
    net = utils.get_model(args, model_name)
    net = torch.nn.DataParallel(net).to(device)
    utils.load_state_dict(net, model_path)
    acc = eval_net(net, testloader)
    print("ACC:{:.2f}".format(acc))
def main(args):
    print(args)
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    track_running_stats = (args.device != 'xla')
    if args.device == 'cuda':
        assert torch.cuda.is_available()
        torch.backends.cudnn.benchmark = True
        print('Enable cuDNN heuristics!')

    device = (torch.device(args.device)
              if args.device in {'cpu', 'cuda'} else xm.xla_device())
    if args.device == 'cuda' and args.amp:
        scaler = amp.GradScaler()
    else:
        scaler = None

    train_loader, test_loader = init_dataloader(args)

    B = len(args.lr) if args.hfta else 0
    model_config = generate_partially_fused_config(args.serial_num)
    print("Model config:", model_config)

    normal_block = str_to_class(model_config["normal_block"])
    serial_block = str_to_class(model_config["serial_block"])
    model = PartiallyFusedResNet(
        model_config["arch"],
        normal_block,
        serial_block,
        num_classes=10,
        B=B,
        track_running_stats=track_running_stats,
    ).to(device)

    if len(model.unfused_layers) > 0:
        model.unfused_to(device)
        optimizer = get_hfta_optim_for(optim.Adadelta,
                                       B=B,
                                       partially_fused=True)(
                                           model.parameters(),
                                           model.get_unfused_parameters(),
                                           lr=args.lr if B > 0 else args.lr[0],
                                       )
    else:
        optimizer = get_hfta_optim_for(optim.Adadelta, B=B)(
            model.parameters(),
            lr=args.lr if B > 0 else args.lr[0],
        )

    epoch_timer = EpochTimer()
    for epoch in range(args.epochs):
        epoch_timer.epoch_start(epoch)
        num_samples_per_epoch, _ = train(args,
                                         model,
                                         device,
                                         train_loader,
                                         optimizer,
                                         epoch,
                                         B,
                                         scaler=scaler)
        epoch_timer.epoch_stop(num_samples_per_epoch)
        print('Epoch {} took {} s!'.format(epoch,
                                           epoch_timer.epoch_latency(epoch)))

    if args.device == 'xla':
        print(met.metrics_report())
    if args.outf is not None:
        epoch_timer.to_csv(args.outf)

    if args.eval:
        test(model, device, test_loader, B)
    print('All jobs Finished!')
Beispiel #6
0
import torch.optim as optim
from utils import init_dataloader, show_image_batch
from model import Generator, Discriminator

# args
n_hidden = 128
noise_dim = 100
img_dim = 28 * 28
batch_size = 256
n_epoch = 256
lr = 0.001
smooth = 0.1
device = 'cuda' if torch.cuda.is_available() else 'cpu'

# init datasets
data_loader = init_dataloader(batch_size=batch_size)

# init generator
generator = Generator(noise_dim, n_hidden, img_dim).to(device)
optimizer_g = optim.Adam(generator.parameters(), lr=lr)
print(generator)

# init discriminator
discriminator = Discriminator(img_dim, n_hidden).to(device)
optimizer_d = optim.Adam(discriminator.parameters(), lr=lr)
print(discriminator)

# define loss function
loss_function = nn.BCELoss()

# 测试使用...