def main():
    global args
    net = UNet(3, 1)
    net.load(opt.ckpt_path)
    loss = Loss('soft_dice_loss')
    torch.cuda.set_device(0)
    net = net.cuda()
    loss = loss.cuda()

    if args.phase == 'train':
        # train
        dataset = NucleiDetector(opt, phase=args.phase)
        train_loader = DataLoader(dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=True,
                                  num_workers=opt.num_workers,
                                  pin_memory=opt.pin_memory)
        lr = opt.lr
        optimizer = torch.optim.Adam(net.parameters(),
                                     lr=lr,
                                     weight_decay=opt.weight_decay)
        previous_loss = None  # haven't run
        for epoch in range(opt.epoch + 1):
            now_loss = train(train_loader, net, loss, epoch, optimizer,
                             opt.model_save_freq, opt.model_save_path)
            if previous_loss is not None and now_loss > previous_loss:
                lr *= opt.lr_decay
                for param_group in optimizer.param_groups:
                    param_group['lr'] = lr
                save_lr(net.model_name, opt.lr_save_path, lr)
            previous_loss = now_loss
    elif args.phase == 'val':
        # val phase
        dataset = NucleiDetector(opt, phase='val')
        val_loader = DataLoader(dataset,
                                batch_size=opt.batch_size,
                                shuffle=True,
                                num_workers=opt.num_workers,
                                pin_memory=opt.pin_memory)
        val(val_loader, net, loss)
    else:
        # test phase
        dataset = NucleiDetector(opt, phase='test')
        test_loader = DataLoader(dataset,
                                 batch_size=1,
                                 shuffle=True,
                                 num_workers=opt.num_workers,
                                 pin_memory=opt.pin_memory)
        test(test_loader, net, opt)
Exemple #2
0
def test(dataset_name,
         model_name,
         metric_name,
         path_history="checkpoints/",
         path_best_model=""):

    history = ut.load_json(path_history)

    transformer = ut.ComposeJoint([[transforms.ToTensor(), None],
                                   [transforms.Normalize(*ut.mean_std), None],
                                   [None, ut.ToLong()]])
    test_set = dataset_dict[dataset_name](split="test",
                                          transform_function=transformer)

    model = model_dict[model_name](n_classes=test_set.n_classes).cuda()
    # path_best_model = "/mnt/home/issam/LCFCNSaves/pascal/State_Dicts/best_model.pth"
    model.load_state_dict(torch.load(path_best_model))

    model.trained_images = set(history["trained_images"])

    testDict = ut.val(model=model,
                      dataset=test_set,
                      epoch=history["best_val_epoch"],
                      metric_name=metric_name)

    print(pd.DataFrame([testDict]))
Exemple #3
0
def train(dataset_name, model_name, metric_name, path_history, path_model, path_opt, path_best_model, reset=False):  
  # SET SEED
  np.random.seed(1)
  torch.manual_seed(1) 
  torch.cuda.manual_seed_all(1)

  # Train datasets
  transformer = ut.ComposeJoint(
                    [ut.RandomHorizontalFlipJoint(),            
                    [transforms.ToTensor(), None],
                    [transforms.Normalize(*ut.mean_std), None],
                    [None,  ut.ToLong() ]
                    ])

  train_set = dataset_dict[dataset_name](split="train", 
                                         transform_function=transformer)
  
  trainloader = torch.utils.data.DataLoader(train_set, batch_size=1, 
                                            num_workers=0,
                                            drop_last=False,
                                            sampler=ut.RandomSampler(train_set))
  # Val datasets
  transformer = ut.ComposeJoint(
                    [
                         [transforms.ToTensor(), None],
                         [transforms.Normalize(*ut.mean_std), None],
                         [None,  ut.ToLong() ]
                    ])  

  val_set = dataset_dict[dataset_name](split="val", 
                                       transform_function=transformer)

  test_set = dataset_dict[dataset_name](split="test", 
                                       transform_function=transformer)

 

  # Model 
  model = model_dict[model_name](train_set.n_classes).cuda()
  opt = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
                         lr=1e-5, weight_decay=0.0005)

  # Train
  if os.path.exists(path_history) and not reset:
    history = ut.load_json(path_history)
    model.load_state_dict(torch.load(path_model))
    opt.load_state_dict(torch.load(path_opt))
    s_epoch = history["train"][-1]["epoch"]
    print("Resuming epoch...{}".format(s_epoch))

  else:
    history = {"train":[], "val":[], "test":[],
               "model_name":model_name,
               "dataset_name":dataset_name, 
               "path_model":path_model,
               "path_opt":path_opt,
               "path_best_model":path_best_model,
               "best_val_epoch":-1, "best_val_mae":np.inf}
    s_epoch = 0
    print("Starting from scratch...")
  

  for epoch in range(s_epoch + 1, 1000):    
    train_dict = ut.fit(model, trainloader, opt, 
                        loss_function=losses.lc_loss,
                        epoch=epoch)
    
    # Update history
    history["trained_images"] = list(model.trained_images)
    history["train"] += [train_dict]

    # Save model, opt and history
    torch.save(model.state_dict(), path_model)
    torch.save(opt.state_dict(), path_opt)
    ut.save_json(path_history, history)

    # %%%%%%%%%%% 2. VALIDATION PHASE %%%%%%%%%%%%"
    with torch.no_grad():      
      val_dict = ut.val(model=model, dataset=val_set, epoch=epoch, 
                        metric_name=metric_name)

      # Update history
      history["val"] += [val_dict]

      # Lower is better
      if val_dict[metric_name] <= history["best_val_mae"]:
        history["best_val_epoch"] = epoch
        history["best_val_mae"] = val_dict[metric_name]
        torch.save(model.state_dict(), path_best_model)

        # Test Model
        if not (dataset_name == "penguins" and epoch < 50):
          testDict = ut.val(model=model, 
                                dataset=test_set, 
                                epoch=epoch, metric_name=metric_name)
          history["test"] += [testDict]
        
      ut.save_json(path_history, history)
Exemple #4
0
                                  nhead=args.nhead,
                                  nlayer=args.nlayer, 
                                  norm_mode=args.norm_mode,
                                  norm_scale=args.norm_scale,
                                  residual=args.residual)
net = net.cuda()
optimizer = torch.optim.Adam(net.parameters(), args.lr, weight_decay=args.wd)
criterion = torch.nn.CrossEntropyLoss()
logging.info(net)

# train
best_acc = 0 
best_loss = 1e10
for epoch in range(args.epochs):
    train_loss, train_acc = train(net, optimizer, criterion, data)
    val_loss, val_acc = val(net, criterion, data)
    logging.debug('Epoch %d: train loss %.3f train acc: %.3f, val loss: %.3f val acc %.3f.'%
                (epoch, train_loss, train_acc, val_loss, val_acc))
    # save model 
    if best_acc < val_acc:
        best_acc = val_acc
        torch.save(net.state_dict(), OUT_PATH+'checkpoint-best-acc.pkl')
    if best_loss > val_loss:
        best_loss = val_loss
        torch.save(net.state_dict(), OUT_PATH+'checkpoint-best-loss.pkl')

# pick up the best model based on val_acc, then do test

net.load_state_dict(torch.load(OUT_PATH+'checkpoint-best-acc.pkl'))
val_loss, val_acc = val(net, criterion, data)
test_loss, test_acc = test(net, criterion, data)
        checkpoint_folder = experiment_folder + '/checkpoint/'

        if not os.path.exists(experiment_folder):
            os.makedirs(experiment_folder)

        if not os.path.exists(checkpoint_folder):
            os.makedirs(checkpoint_folder)

    for epoch in range(100):

        # updating model weights
        train_loss, train_time = train(model, optimizer, criterion,
                                       train_loader)

        # validation
        val_loss, val_time, f1 = val(model, optimizer, criterion, val_loader)

        # logging message
        log = "Epoch : {} |".format(epoch + 1), \
              "LR    : {} |".format(get_learning_rate(optimizer)), \
              "Train Loss : {:.3f} |".format(train_loss), \
              "Val   Loss : {:.3f} |".format(val_loss), \
              "Val   F1   : {:.3f} |".format(f1), \
              "Time  Train: {:.3f} |".format(train_time), \
              "Time  Val  : {:.3f} |".format(val_time),

        print(log)

        if not params['debug']:
            # save model state
            torch.save(model.module.state_dict(),
Exemple #6
0
    checkpoint = torch.load('./checkpoint/{}.t7'.format(args.model_name))
    net.load_state_dict(checkpoint['net'])
    best_F2 = checkpoint['F2']

train_loader = Data.DataLoader(loader('datafile/train.txt'),
                               batch_size=args.batch_size,
                               shuffle=True,
                               num_workers=8,
                               drop_last=True)
test_loader = Data.DataLoader(loader('datafile/val.txt', test=True),
                              batch_size=args.batch_size,
                              num_workers=8)

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(),
                      lr=args.lr,
                      momentum=0.9,
                      weight_decay=5e-4)

for epoch in range(args.num_epoch):
    print('Epoch %d' % (epoch))
    train(train_loader, net, criterion, optimizer)
    acc = val(test_loader, net, criterion)
    if acc > best_acc:
        print('Saving..')
        state = {'net': net.state_dict(), 'acc': acc}
        if not os.path.isdir('checkpoint'):
            os.mkdir('checkpoint')
        torch.save(state, './checkpoint/{}.t7'.format(args.model_name))
    best_acc = acc
Exemple #7
0
    return student


teacher = getTeacherModel()
student = getStudentModel()
teacher = teacher.cuda()
student = student.cuda()

# Baseline
max_acc_baseline = 0.0
for i in range(epochs):

    print("Training epoch {}".format(i))
    train(student, teacher, T, train_load, 1.5, 0, 0, lr)
    print("Validating epoch {}".format(i))
    micro_auprc = val(student, val_load, i)
    print("Micro AUPRC: {}".format(micro_auprc))

    if (micro_auprc > max_acc_baseline):
        torch.save(student.state_dict(), 'weights/dcase_small_baseline')
        max_acc_baseline = micro_auprc

student = getStudentModel()
student = student.cuda()

# KD
max_acc_kd = 0.0
for i in range(epochs):

    print("Training epoch {}".format(i))
    train(student, teacher, T, train_load, 1.5, 1.5, 0, lr)
Exemple #8
0
# for plotting
mvn = torch.distributions.Normal(0, 1)
z_norm = mvn.sample([num_samples, np.ceil(n_in).astype(int)])
val_batch = next(iter(val_loader)).float()

start = time.time()
# for early stopping
i = 0
max_loss = np.inf
epochs_list = []
train_losses = []
val_losses = []
for epoch in range(1, epochs):
    epochs_list.append(epoch)
    train_loss = train_one_epoch(model, epoch, optimizer, train_loader)
    val_loss = val(model, train, val_loader)
    train_losses.append(train_loss)
    # val_losses.append(val_loss)
    val_loss = 100
    if val_loss < max_loss:
        max_loss = val_loss
        i = 0
        torch.save(
            model,
            (path + "model.pt"),
        )
    else:
        i += 1
    if i >= 30:
        break
    print("Patience counter: {}/30".format(i))
Exemple #9
0
import torch
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import os

from rnvp import RealNVP
from new_model import NewFlow
from utils import train_one_epoch, val, test
from data.hepmass import train
from data.hepmass import val_loader as test_loader

from gif import make_gif
import time

model = torch.load("/Users/edvardhulten/real_nvp_2d/model.pt")
v = val(model, train, test_loader)
test_loss = test(model, train, test_loader)
Exemple #10
0
                entropy[rank[args.k:]].mean().item(), i)

        i = conf.size(0)
        record['s_all_confidence'].update(conf.mean().item(), i)
        record['s_all_margin'].update(margin.mean().item(), i)
        record['s_all_entropy'].update(entropy.mean().item(), i)

        i = t_conf.size(0)
        record['t_confidence'].update(t_conf.mean().item(), i)
        record['t_margin'].update(t_margin.mean().item(), i)
        record['t_entropy'].update(t_entropy.mean().item(), i)

    for item in items:
        logger.add_scalar('train/{}'.format(item), record[item].avg, epoch + 1)

    # val
    acc = val(val_loader, args, t_model, s_model, logger, epoch)

    if acc > best_acc:
        best_acc = acc
        state_dict = dict(state_dict=s_model.state_dict(), best_acc=best_acc)
        name = osp.join(exp_path, 'ckpt/student_best.pth')
        os.makedirs(osp.dirname(name), exist_ok=True)
        torch.save(state_dict, name)

    scheduler.step()

if args.seed == 0:
    counter = counter.cpu().numpy()
    np.save(osp.join(exp_path, 'counter.npy'), counter)
Exemple #11
0
def train(args, nci_id):
    # fixed seed
    torch.manual_seed(0)
    # np.random.seed(0)

    # cuda test
    if torch.cuda.is_available():
        is_cuda = True
        torch.cuda.manual_seed_all(0)
    # is_cuda = False

    # load dataset
    train_set, test_set = loadNCI.load_nci_data(nci_id, cuda=is_cuda)

    # Use Pytorch's DataLoader and th collate function
    train_data_loader = DataLoader(train_set,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   collate_fn=loadNCI.collate)
    test_data_loader = DataLoader(test_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  collate_fn=loadNCI.collate)

    input_dim = train_set[0][0].ndata['feature'].size(1)
    output_dim = 2
    # create model
    model = GINClassifier(args.num_layers, args.num_mlp_layers, input_dim,
                          args.hidden_dim, output_dim, args.feat_drop,
                          args.learn_eps, args.graph_pooling_type,
                          args.neigh_pooling_type, args.final_drop, is_cuda)
    if is_cuda:
        model.cuda()
    loss_func = nn.CrossEntropyLoss()  # define loss function
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    epoch_losses = []
    for epoch in range(args.num_epochs):
        model.train()
        epoch_loss = 0
        train_acc = 0
        for iter, (bg, label) in enumerate(
                train_data_loader):  # bg means batch of graph
            prediction = model(bg)
            if is_cuda:
                label = label.cuda()
            loss = loss_func(prediction, label)
            optimizer.zero_grad()
            # loss.backward(retain_graph=True)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.detach().item()
            train_acc += utils.accuracy(prediction, label)
        epoch_loss /= (iter + 1)
        train_acc /= (iter + 1)

        test_acc = utils.val(model, test_data_loader, is_cuda)
        print('Task {}: Epoch{}, loss {:.4f}, TrainACC {:.4f}, TestACC {:.4f}'.
              format(nci_id, epoch, epoch_loss, train_acc, test_acc))
        # print('Task {}: Epoch{}, loss {:.4f}, TrainACC {:.4f}'.format(nci_id, epoch, epoch_loss, train_acc))
        epoch_losses.append(epoch_loss)
    torch.save(model.state_dict(),
               MODEL_PATH + 'Model-NCI-' + str(nci_id) + '.kpl')
    test_acc = utils.val(model, test_data_loader, is_cuda)
    train_acc = utils.val(model, train_data_loader, is_cuda)

    return train_acc, test_acc