Exemple #1
0
def main():

    global opt, model
    opt = parser.parse_args()
    print opt

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    train_set = DatasetFromHdf5("../lapsrn/data/data.h5")
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      shuffle=True)
    print("===> Building model")
    model = Net()
    criterion = L1_Charbonnier_loss()
    print("===> Setting GPU")
    if cuda:
        model = model.cuda()
        criterion = criterion.cuda()
    else:
        model = model.cpu()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['model'].state_dict())
        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("===> Setting Optimizer")
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch)
        save_checkpoint(model, epoch)
Exemple #2
0
def train(train_loader_path,
          test_loader_path,
          epochs,
          lr=1e-2,
          momentum=0.5,
          seed=1):
    import os
    os.system("nvidia-smi")
    writer = SummaryWriter("./logs/{}_{}".format(epochs, lr))
    torch.manual_seed(seed)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    with open(train_loader_path, 'rb') as f:
        train_dataloader = pickle.loads(f.read())
    with open(test_loader_path, 'rb') as f:
        test_dataloader = pickle.loads(f.read())
    if torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(model)
    model = Net().to(device)

    optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)

    for epoch in range(1, epochs + 1):
        train_epoch(model, train_dataloader, optimizer, epoch, device, writer)
        test_epoch(model, test_dataloader, epoch, device, writer)

    torch.save(model.cpu().state_dict(), "mnist.pt")
    with open('output.txt', 'w') as f:
        f.write(model_path)
        print(f'Model written to: {model_path}')
Exemple #3
0
def main():
  args = parser_args()
  print('Batch size: %d' % args.batchsize)
  print('Initial learning rate: %.5f' % args.lr)
  print('Weight decay: %.6f' % args.wd)

  device = torch.device('cuda:' + str(args.gpu) 
    if torch.cuda.is_available() else 'cpu')
  cudnn.benchmark = True

  # fix random seed
  torch.manual_seed(args.seed)
  torch.cuda.manual_seed_all(args.seed)
  np.random.seed(args.seed)

  net = Net(nclasses=args.nclass)
  fnet = torch.load(args.fnet_path)  # feature net (theta_1)
  hnet = torch.load(args.hnet_path)  # head net (theta_2)
  clf = torch.load(args.clf_path)    # classifier (omega)
  
  # load parameters (random vs. pre-trained) as appropriate
  net.load_fnet(fnet, freeze=True)
  net.load_hnet(hnet, reinit_idx=(), 
    freeze_idx=args.freeze_hnet, linearize_idx=args.linearize_hnet)
  net.load_clf(clf, reinit=False, linearize=args.linearize_clf)
  net.to(device)

  params = list(filter(lambda p: p.requires_grad, net.parameters()))
  if args.optim == 'sgd':
    optimizer = optim.SGD(
      params, lr=args.lr, weight_decay=args.wd, momentum=0.9)
  elif args.optim == 'adam':
    optimizer = optim.Adam(
      params, lr=args.lr, betas=(0.5, 0.999), weight_decay=args.wd)

  # check trainable parameters
  for p in params:
    print(p.size()) 

  # load training and test data
  train_loader, test_loader = load_data(
    args.dataset, args.data_path, args.batchsize, args.normalize)
  
  print('----- Training phase -----')
  it = 0
  losses = AverageMeter()

  while it < args.niter:
    it = train(
      device, train_loader, net, optimizer, 
      args.niter, args.stepsize, losses, it=it)

  print('----- Evaluation phase -----')
  print('> test accuracy:')
  evaluate(device, test_loader, net)
  torch.save(net.cpu(), args.model_path)
Exemple #4
0
import torch
import sys
import numpy as np
from model import Net
import coremltools as ct
from coremltools.models.neural_network import quantization_utils

model_in = sys.argv[1]
label_count = sys.argv[2]

model = Net(output_label_count=int(label_count))
model.load_state_dict(torch.load(model_in))

model.cpu()  # convert model to cpu
model.eval()  # switch to eval mode

random_input = torch.rand(1, 1, 98, 40)
traced_model = torch.jit.trace(model, random_input, check_trace=False)

print("converting pymodl to coreml model")
converted_model = ct.convert(
    traced_model,  # convert using Unified Conversion API
    inputs=[ct.TensorType(shape=random_input.shape)])
print("convertion is completed saving to disk f{}")

# allowed values of nbits = 16, 8, 7, 6, ...., 1
quantized_model = quantization_utils.quantize_weights(converted_model, 8)
converted_model.save(model_in.replace(".pymodel", "") + ".mlmodel")
quantized_model.save(model_in.replace(".pymodel", "_quantized") + ".mlmodel")
Exemple #5
0
def train(args):
    check_paths(args)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    if args.cuda:
        torch.cuda.manual_seed(args.seed)
        kwargs = {'num_workers': 0, 'pin_memory': False}
    else:
        kwargs = {}

    transform = transforms.Compose([transforms.Scale(args.image_size),
                                    transforms.CenterCrop(args.image_size),
                                    transforms.ToTensor(),
                                    transforms.Lambda(lambda x: x.mul(255))])
    train_dataset = datasets.ImageFolder(args.dataset, transform)
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, **kwargs)

    style_model = Net(ngf=args.ngf)
    if args.resume is not None:
        print('Resuming, initializing using weight from {}.'.format(args.resume))
        style_model.load_state_dict(torch.load(args.resume))
    print(style_model)
    optimizer = Adam(style_model.parameters(), args.lr)
    mse_loss = torch.nn.MSELoss()

    vgg = Vgg16()
    utils.init_vgg16(args.vgg_model_dir)
    vgg.load_state_dict(torch.load(os.path.join(args.vgg_model_dir, "vgg16.weight")))

    if args.cuda:
        style_model.cuda()
        vgg.cuda()

    style_loader = utils.StyleLoader(args.style_folder, args.style_size)

    tbar = trange(args.epochs)
    for e in tbar:
        style_model.train()
        agg_content_loss = 0.
        agg_style_loss = 0.
        count = 0
        for batch_id, (x, _) in enumerate(train_loader):
            n_batch = len(x)
            count += n_batch
            optimizer.zero_grad()
            x = Variable(utils.preprocess_batch(x))
            if args.cuda:
                x = x.cuda()

            style_v = style_loader.get(batch_id)
            style_model.setTarget(style_v)

            style_v = utils.subtract_imagenet_mean_batch(style_v)
            features_style = vgg(style_v)
            gram_style = [utils.gram_matrix(y) for y in features_style]

            y = style_model(x)
            xc = Variable(x.data.clone())

            y = utils.subtract_imagenet_mean_batch(y)
            xc = utils.subtract_imagenet_mean_batch(xc)

            features_y = vgg(y)
            features_xc = vgg(xc)

            f_xc_c = Variable(features_xc[1].data, requires_grad=False)

            content_loss = args.content_weight * mse_loss(features_y[1], f_xc_c)

            style_loss = 0.
            for m in range(len(features_y)):
                gram_y = utils.gram_matrix(features_y[m])
                gram_s = Variable(gram_style[m].data, requires_grad=False).repeat(args.batch_size, 1, 1, 1)
                style_loss += args.style_weight * mse_loss(gram_y, gram_s[:n_batch, :, :])

            total_loss = content_loss + style_loss
            total_loss.backward()
            optimizer.step()

            agg_content_loss += content_loss.data[0]
            agg_style_loss += style_loss.data[0]

            if (batch_id + 1) % args.log_interval == 0:
                mesg = "{}\tEpoch {}:\t[{}/{}]\tcontent: {:.6f}\tstyle: {:.6f}\ttotal: {:.6f}".format(
                    time.ctime(), e + 1, count, len(train_dataset),
                                agg_content_loss / (batch_id + 1),
                                agg_style_loss / (batch_id + 1),
                                (agg_content_loss + agg_style_loss) / (batch_id + 1)
                )
                tbar.set_description(mesg)

            
            if (batch_id + 1) % (4 * args.log_interval) == 0:
                # save model
                style_model.eval()
                style_model.cpu()
                save_model_filename = "Epoch_" + str(e) + "iters_" + str(count) + "_" + \
                    str(time.ctime()).replace(' ', '_') + "_" + str(
                    args.content_weight) + "_" + str(args.style_weight) + ".model"
                save_model_path = os.path.join(args.save_model_dir, save_model_filename)
                torch.save(style_model.state_dict(), save_model_path)
                style_model.train()
                style_model.cuda()
                tbar.set_description("\nCheckpoint, trained model saved at", save_model_path)

    # save model
    style_model.eval()
    style_model.cpu()
    save_model_filename = "Final_epoch_" + str(args.epochs) + "_" + \
        str(time.ctime()).replace(' ', '_') + "_" + str(
        args.content_weight) + "_" + str(args.style_weight) + ".model"
    save_model_path = os.path.join(args.save_model_dir, save_model_filename)
    torch.save(style_model.state_dict(), save_model_path)

    print("\nDone, trained model saved at", save_model_path)
loader_tr = DataLoader(MyDataset(X_tr, Y_tr), shuffle=True, **args['loader_tr_args'])

model = Net().to(device)
optimizer = optim.SGD(model.parameters(), **args['optimizer_args'])
train(model, device, loader_tr, optimizer, args['n_epoch'])

attacker = FGSM(eps=0.15, clip_max=CLIP_MAX, clip_min=CLIP_MIN)
# attacker = BIM(eps=0.15, eps_iter=0.01, n_iter=50, clip_max=CLIP_MAX, clip_min=CLIP_MIN)
# attacker = DeepFool(max_iter=50, clip_max=CLIP_MAX, clip_min=CLIP_MIN)
print('attacker: {}'.format(type(attacker).__name__))

demo_idxs = [545, 107, 38, 142, 65, 15, 21, 171, 257, 20]
X_te_cln = X_te[demo_idxs]
Y_te_cln = Y_te[demo_idxs]
X_te_adv = torch.zeros(X_te_cln.shape)
model.cpu()
for i in range(len(X_te_cln)):
    X_te_adv[i] = attacker.generate(model, X_te_cln[i], Y_te_cln[i])

loader_te_cln = DataLoader(MyDataset(X_te_cln, Y_te_cln), shuffle=False, **args['loader_te_args'])
loader_te_adv = DataLoader(MyDataset(X_te_adv, Y_te_cln), shuffle=False, **args['loader_te_args'])

model.cuda()
P_cln = predict(model, device, loader_te_cln)
P_adv = predict(model, device, loader_te_adv)

print('labels of clean images:       {}'.format(P_cln.numpy()))
print('labels of adversarial images: {}'.format(P_adv.numpy()))

for i in range(10):
    recover_image(X_te_cln.numpy()[i][0]).save('results/Clean/{}.png'.format(i))
                             './outputs', 'original', False, logger)

    df = data_gen.generate_data()

    df.drop(['timestamp'], axis=1, inplace=True)

    scaler = transform_scale(df.drop(['labels'], axis=1))

    train_loader = _get_train_data_loader(args.batch_size, df)

    model = Net(args.hidden_dim)

    criterion = nn.CrossEntropyLoss()

    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    model = train(model, scaler, train_loader, args.epochs, optimizer,
                  criterion, device)

    print(args.model_dir)

    model_path = os.path.join(args.model_dir, 'model_main.pt')
    with open(model_path, 'wb') as f:
        torch.save(model.cpu().state_dict(), f)

    dump(scaler, open(os.path.join(args.model_dir, 'scaler.pkl'), 'wb'))

    print(model)

    print("Model generated successfully")
Exemple #8
0
    # serialize the scalar data to a .json
    scalar_path = "results/" + str(time_stamp) + "_all_scalars.json"
    if not os.path.exists("results/"):
        os.makedirs("results/")

    writer.export_scalars_to_json(scalar_path)
    writer.close()

    # pickle the model and save to a file
    model_path = "models/" + str(time_stamp) + "_saved_state.pkl"
    if not os.path.exists("models/"):
        os.makedirs("models/")
    torch.save(model.state_dict(), model_path)

    # evaluate on the testing data
    model.cpu().eval()
    test_data = KinaseDataset(data_path=args.data,
                              oversample=None,
                              split="test",
                              features_list=features_list,
                              protein_name_list=protein_name_list)
    test_y_probs = model(Variable(test_data.data.float(), requires_grad=False))
    test_y_preds = np.argmax(test_y_probs.data.numpy(), axis=1)
    y_test = np.argmax(test_data.labels.numpy(), axis=1)
    print()
    print("testing data # examples: {}".format(len(test_data)))
    print("Unweighted performance metrics: ")
    print("Accuracy: {} \t F1-Score: {} \t Precision: {} \t Recall: {}".format(
        accuracy_score(y_test, test_y_preds), f1_score(y_test, test_y_preds),
        precision_score(y_test, test_y_preds),
        recall_score(y_test, test_y_preds)))
Exemple #9
0
            loss = loss_fn(output, target)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            print("Train epoch: {} batch: {} loss: {}".format(
                epoch_, batch_idx, loss))

            if batch_idx % 100 == 0:
                net.eval()
                with torch.no_grad():
                    data, target = test_data_helper()
                    data, target = torch.FloatTensor(
                        data).cuda(), torch.LongTensor(target).cuda()
                    output = net(data)
                    pred_t = target
                    pred_p = torch.argmax(output, 1)
                    num = pred_t.size(0)
                    sum = torch.sum(torch.eq(pred_t, pred_p))
                    accuracy = sum.item() / num
                    print('accuracy : {}'.format(accuracy))
                    if accuracy > best_acc:
                        net.cpu()
                        torch.save(net, 'best.pth')
                        best_acc = accuracy
                        print(
                            "save model. best accuracy is {}".format(best_acc))
                        net.cuda()
                net.train()
def train(args):

    check_paths(args)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    try:
        with open(param_path, 'r') as tc:

            trainingParams = json.load(tc)
            ngf = int(trainingParams.get('ngf', args.ngf))
            epochs = int(trainingParams.get('epochs', args.epochs))
            batch_size = int(trainingParams.get('batch_size', args.batch_size))
            log_interval = int(
                trainingParams.get('log_interval', args.log_interval))
            learning_rate = float(
                trainingParams.get('learning_rate', args.learning_rate))
            cuda = int(trainingParams.get('cuda', args.cuda))

            if cuda:
                logger.info("Using CUDA")
                torch.cuda.manual_seed(args.seed)
                kwargs = {'num_workers': 8, 'pin_memory': True}
                logger.info("Using kwarguments: \n" + str(kwargs))
            else:
                kwargs = {}

            transform = transforms.Compose([
                transforms.Scale(args.image_size),
                transforms.CenterCrop(args.image_size),
                transforms.ToTensor(),
                transforms.Lambda(lambda x: x.mul(255))
            ])
            train_dataset = datasets.ImageFolder(args.dataset, transform)
            train_loader = DataLoader(train_dataset,
                                      batch_size=args.batch_size,
                                      **kwargs)
            style_model = Net(ngf=ngf)

            print(style_model)

            optimizer = Adam(style_model.parameters(), learning_rate)
            mse_loss = torch.nn.MSELoss()

            vgg = Vgg16()

            utils.mod_utils.init_vgg16(args.vgg_model_dir)
            vgg.load_state_dict(
                torch.load(os.path.join(args.vgg_model_dir, "vgg16.weight")))

            if cuda:
                style_model.cuda()
                vgg.cuda()

            style_loader = StyleLoader(args.style_folder, args.style_size)

            for e in range(epochs):
                style_model.train()
                agg_content_loss = 0.
                agg_style_loss = 0.
                count = 0

                for batch_id, (x, _) in enumerate(train_loader):
                    n_batch = len(x)
                    count += n_batch
                    optimizer.zero_grad()
                    x = Variable(preprocess_batch(x))
                    if cuda:
                        x.cuda()

                    style_v = style_loader.get(batch_id)
                    style_model.setTarget(style_v)

                    style_v = utils.img_utils.subtract_imagenet_mean_batch(
                        style_v)
                    features_style = vgg(style_v)
                    gram_style = [
                        utils.img_utils.gram_matrix(y) for y in features_style
                    ]

                    y = style_model(x.cuda())
                    xc = Variable(x.data.clone(), volatile=True)

                    y = utils.img_utils.subtract_imagenet_mean_batch(y)
                    xc = utils.img_utils.subtract_imagenet_mean_batch(xc)

                    features_y = vgg(y)
                    features_xc = vgg(xc.cuda())

                    f_xc_c = Variable(features_xc[1].data, requires_grad=False)

                    content_loss = args.content_weight * \
                        mse_loss(features_y[1], f_xc_c)

                    style_loss = 0.
                    for m in range(len(features_y)):
                        gram_y = utils.img_utils.gram_matrix(features_y[m])
                        gram_s = Variable(gram_style[m].data,
                                          requires_grad=False).repeat(
                                              args.batch_size, 1, 1, 1)
                        style_loss += args.style_weight * mse_loss(
                            gram_y, gram_s[:n_batch, :, :])

                    total_loss = content_loss + style_loss
                    total_loss.backward()
                    optimizer.step()

                    agg_content_loss += content_loss.data[0]
                    agg_style_loss += style_loss.data[0]

                    if (batch_id + 1) % log_interval == 0:
                        msg = "{}\tEpoch {}:\t[{}/{}]\tcontent: {:.6f}\tstyle: {:.6f}\ttotal: {:.6f}".format(
                            time.ctime(), e + 1, count, len(train_dataset),
                            agg_content_loss / (batch_id + 1),
                            agg_style_loss / (batch_id + 1),
                            (agg_content_loss + agg_style_loss) /
                            (batch_id + 1))
                        print(msg)

                    if (batch_id + 1) % (20 * log_interval) == 0:
                        # save model
                        style_model.eval()
                        style_model.cpu()
                        save_model_filename = "Epoch_" + str(e) + "_" +\
                                              "iters_" + str(count) + \
                                              "_" + str(time.ctime()).replace(' ','_') + \
                                               "_" + str(args.content_weight) + "_" + \
                                               str(args.style_weight) + ".model"
                        save_model_path = os.path.join(temp_save_model_dir,
                                                       save_model_filename)

                        torch.save(style_model.state_dict(), save_model_path)
                        style_model.train()
                        style_model.cuda()
                        logger.info("Checkpoint, trained model saved at " +
                                    str(save_model_path))

            # save the final model

            style_model.eval()
            style_model.cpu()
            save_final_model_path = os.path.join(model_path,
                                                 final_model_filename)
            torch.save(style_model.state_dict(), save_final_model_path)

            logger.info("Done, trained model saved at " +
                        save_final_model_path)

            # Write out the success file
            with open(os.path.join(output_path, 'success'), 'w') as s:
                s.write('Done')

    except Exception as e:
        with open(os.path.join(output_path, 'failure'), 'w') as s:
            trc = traceback.format_exc()
            logger.info('Exception during training: ' + str(e) + '\n' + trc)
            s.write('Exception during training: ' + str(e) + '\n' + trc)