Ejemplo n.º 1
0
 def __init__(self, neural_network,lr, lowerbound, upperbound, lamda=1, sigma=0.02, kernelsize=5):
     super(ADMM_networks, self).__init__()
     self.lowbound = lowerbound
     self.upbound = upperbound
     self.neural_net = neural_network
     self.reset()
     self.optimiser = torch.optim.Adam(self.neural_net.parameters(), lr=lr)
     self.CEloss_criterion = CrossEntropyLoss2d()
     self.p_u = 10
     self.p_v = 10
     self.lamda = lamda
     self.sigma = sigma
     self.kernelsize = kernelsize
     self.initial_kernel()
Ejemplo n.º 2
0
def pretrain(train_dataloader, val_dataloader_, network, path=None, split_ratio=0.1):
    highest_iou = -1
    class config:
        lr = 1e-3
        epochs = 100
        path = 'checkpoint'


    pretrain_config = config()
    if path :
        pretrain_config.path = path
    network.to(device)
    criterion_ = CrossEntropyLoss2d()
    optimiser_ = torch.optim.Adam(network.parameters(),pretrain_config.lr)
    loss_meter = AverageValueMeter()
    fiou_tables = []

    for iteration in range(pretrain_config.epochs):
        loss_meter.reset()

        for i, (img,mask,weak_mask,_) in tqdm(enumerate(train_dataloader)):
            img,mask = img.to(device), mask.to(device)
            optimiser_.zero_grad()
            output = network(img)
            loss = criterion_(output,mask.squeeze(1))
            loss.backward()
            optimiser_.step()
            loss_meter.add(loss.item())
        print('train_loss: %.6f'%loss_meter.value()[0])

        if (iteration+1) %50 ==0:
            for param_group in optimiser_.param_groups:
                param_group['lr'] = param_group['lr'] * 0.5
                print('learning rate:', param_group['lr'])

        val_iou = val(val_dataloader_,network)
        fiou_tables.append(val_iou)
        if val_iou > highest_iou:
            highest_iou = val_iou
            torch.save(network.state_dict(),
                       os.path.join(pretrain_config.path, 'model_%.4f_split_%.3f.pth' % (val_iou, split_ratio)))
            print('pretrained model saved with %.4f.'%highest_iou)
    return fiou_tables
Ejemplo n.º 3
0
def main(lr, loss_function):
    from datetime import datetime
    writer = SummaryWriter('log/' + str(lr) + '_' + str(loss_function) + '_' +
                           datetime.now().strftime('%b%d_%H-%M-%S'))

    neural_net = Enet(2)
    neural_net.to(device)
    criterion = CrossEntropyLoss2d(weight=torch.Tensor([0.5, 2])).to(
        device) if loss_function == 'CE' else MSE_2D()
    optimizer = torch.optim.Adam(params=neural_net.parameters(),
                                 lr=lr,
                                 weight_decay=1e-5)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=[50, 100, 150, 200, 250], gamma=0.25)
    highest_iou = -1

    plt.ion()
    for epoch in range(max_epoch):
        scheduler.step()
        for param_group in optimizer.param_groups:
            _lr = param_group['lr']
        for i, (img, full_mask, _, _) in tqdm(enumerate(train_loader)):
            if full_mask.sum() == 0: continue
            img, full_mask = img.to(device), full_mask.to(device)
            optimizer.zero_grad()
            output = neural_net(img)
            loss = criterion(output, full_mask.squeeze(1))
            loss.backward()
            optimizer.step()

        ## evaluate the model:
        [train_ious, train_grid] = val(train_loader, neural_net, save=True)
        writer.add_scalars('data/train_dice', {
            'bdice': train_ious[0],
            'fdice': train_ious[1]
        },
                           global_step=epoch)
        writer.add_image('train_grid', train_grid, epoch)
        train_ious.insert(0, _lr)
        train_iou_tables.append(train_ious)
        [val_ious, val_grid] = val(val_loader, neural_net, save=True)
        writer.add_scalars('data/test_dice', {
            'bdice': val_ious[0],
            'fdice': val_ious[1]
        },
                           global_step=epoch)
        writer.add_image('val_grid', val_grid, epoch)
        val_ious.insert(0, _lr)
        val_iou_tables.append(val_ious)
        print(
            '%d epoch: training fiou is: %.5f and val fiou is %.5f, with learning rate of %.6f'
            % (epoch, train_ious[2], val_ious[2], _lr))
        try:
            pd.DataFrame(train_iou_tables,
                         columns=['learning rate', 'background', 'foregound'
                                  ]).to_csv('results/%s/train_lr_%f_%s.csv' %
                                            (filename, lr, loss_function))
            pd.DataFrame(val_iou_tables,
                         columns=['learning rate', 'background', 'foregound'
                                  ]).to_csv('results/%s/val_lr_%f_%s.csv' %
                                            (filename, lr, loss_function))

        except Exception as e:
            print(e)

        if val_ious[2] > highest_iou:
            print('The highest val fiou is %f' % val_ious[2])
            highest_iou = val_ious[2]
            try:
                torch.save(
                    neural_net.state_dict(),
                    'full_checkpoint/pretrained_%.5f_%s.pth' %
                    (val_ious[2], loss_function))
            except:
                os.mkdir('full_checkpoint')
                torch.save(
                    neural_net.state_dict(),
                    'full_checkpoint/pretrained_%.5f_%s.pth' %
                    (val_ious[2], loss_function))
Ejemplo n.º 4
0
 def __init__(self, neural_network, lr, lowerbound, upperbound, lamda=1, sigma=0.02, kernelsize=5,dilation_level = 7):
     super().__init__(neural_network, lr,lowerbound, upperbound, lamda, sigma, kernelsize)
     self.optimiser = torch.optim.Adam(self.neural_net.parameters(), lr=lr)
     self.CEloss_criterion = CrossEntropyLoss2d(torch.Tensor([0, 1]).float()).to(device)
     self.dilation_level = dilation_level
Ejemplo n.º 5
0
    def train_epoch(self):
        self.model.train()

        n_class = max(get_label_classes())
        train_loss = 0
        for batch_idx, (data, target,
                        _) in tqdm.tqdm(enumerate(self.train_loader),
                                        total=len(self.train_loader),
                                        desc='Train epoch=%d' % self.epoch,
                                        ncols=80,
                                        leave=False):

            iteration = batch_idx + self.epoch * len(self.train_loader)

            if self.iteration != 0 and (iteration - 1) != self.iteration:
                continue  # for resuming

            self.iteration = iteration

            # if self.iteration % self.interval_validate == 0:
            #   self.validate()

            assert self.model.training

            self.adjust_learning_rate(self.optim, iteration, args=self.args)

            if self.cuda:
                data, target = data.cuda(), target.cuda()

            data, target = Variable(data), Variable(target)
            self.optim.zero_grad()

            score = self.model(data, self.input_size)
            loss = CrossEntropyLoss2d(score, target)
            loss /= len(data)

            if np.isnan(float(loss.data[0])):
                raise ValueError('loss is nan while training')

            loss.backward()

            self.optim.step()

            train_loss += loss.data[0]

            metrics = []

            lbl_pred = score.data.max(1)[1].cpu().numpy()[:, :, :]
            lbl_true = target.data.cpu().numpy()

            for lt, lp in zip(lbl_true, lbl_pred):
                acc, acc_cls, mean_iu, fwavacc = \
                  label_accuracy_score([lt], [lp], n_class=n_class)
                metrics.append((acc, acc_cls, mean_iu, fwavacc))
            metrics = np.mean(metrics, axis=0)

            #visualize
            if self.iteration % 100 == 0:
                metrics = {
                    'loss': train_loss / 100,
                    'acc': metrics[0],
                    'acc_cls': metrics[1],
                    'mean_iu': metrics[2],
                    'fwavacc': metrics[3]
                }
                self.train_loss_list.append(metrics)

                # print('train loss: %.4f (epoch: %d, step: %d)' % \
                #   (metrics[-1]['loss'],self.epoch, self.iteration%len(self.train_loader))

                # image = data[0].data.cpu()
                # image[0] = image[0] + 122.67891434
                # image[1] = image[1] + 116.66876762
                # image[2] = image[2] + 104.00698793
                # step = self.iteration % len(self.train_loader)
                # title = 'input: (epoch: %d, step: %d)' % (self.epoch,step)
                # vis.image(image, win=win1, env='fcn', opts=dict(title=title))
                # title = 'output (epoch: %d, step: %d)' % (self.epoch,step)
                # vis.image(index2rgb(lbl_pred[0]),
                #           win=win2, env='fcn', opts=dict(title=title))
                # title = 'target (epoch: %d, step: %d)' % (self.epoch,step)
                # vis.image(index2rgb(lbl_true[0]),
                #           win=win3, env='fcn', opts=dict(title=title))
                # epoch_loss = train_loss / 100
                # x = np.arange(1, len(epoch_loss) + 1, 1)
                # title = 'loss (epoch: %d, step: %d)' % (self.epoch,step)
                # vis.line(np.array(epoch_loss), x, env='fcn', win=win0,
                #          opts=dict(title=title))

                train_loss = 0
            if self.iteration >= self.max_iter:
                break
Ejemplo n.º 6
0
# SegNet, FCN8, FCN16, FCN32, PSPNet, UNet
from networks.SegNet import *
Net = SegNet

model = Net(NUM_CLASSES)
if cuda_enabled:
    model = model.cuda()

model.train()

loader = DataLoader(
    VOCTrain(DATASET_PATH, input_transform, target_transform), 
    num_workers=NUM_WORKERS, batch_size=BATCH_SIZE, shuffle=True
)

criterion = CrossEntropyLoss2d()

#optimizer = Adam(model.parameters(), lr=1e-3) # default Adam
optimizer = SGD(model.parameters(), lr=.1, momentum=.9) # default SGD
#optimizer = SGD(model.parameters(), lr=1e-3, momentum=.9) # original SGD
#optimizer = Adadelta(model.parameters()) # default Adadelta
#optimizer = Adagrad(model.parameters()) # default Adagrad
#optimizer = Adamax(model.parameters()) # default Adamax
#optimizer = ASGD(model.parameters()) # default ASGD
#optimizer = LBFGS(model.parameters()) # default LBFGS
#optimizer = RMSprop(model.parameters()) # default RMSprop
#optimizer = Rprop(model.parameters()) # default Rprop

iteration = 1
for epoch in range(1, NUM_EPOCHS+1):
    epoch_loss = []
Ejemplo n.º 7
0
  def validate(self):
    training = self.model.training
    self.model.eval()

    n_class = max(get_label_classes())

    val_loss = 0
    visualizations = []
    label_trues, label_preds = [], []
    
    for batch_idx, (data, target) in tqdm.tqdm(
        enumerate(self.val_loader), total=len(self.val_loader),
        desc='Valid iteration=%d' % self.iteration, ncols=80,
        leave=False):
      
      if self.cuda:
        data, target = data.cuda(), target.cuda()
      data, target = Variable(data, volatile=True), Variable(target)
      score = self.model(data)

      loss = CrossEntropyLoss2d(score, target)
      if np.isnan(float(loss.data[0])):
        raise ValueError('loss is nan while validating')
      
      val_loss += float(loss.data[0]) / len(data)

      imgs = data.data.cpu()
      lbl_pred = score.data.max(1)[1].cpu().numpy()[:, :, :]
      lbl_true = target.data.cpu()
      
      for img, lt, lp in zip(imgs, lbl_true, lbl_pred):
        img, lt = self.val_loader.dataset.untransform(img, lt)
        label_trues.append(lt)
        label_preds.append(lp)
            
    metrics = label_accuracy_score(label_trues, label_preds, n_class)

    val_loss /= len(self.val_loader)

    mean_iu = metrics[2]
    is_best = mean_iu > self.best_mean_iu
    if is_best:
      self.best_mean_iu = mean_iu
    
    filename = ('%s/epoch-%d.pth' \
                    % (self.checkpoint_dir, self.epoch))
    torch.save({
      'epoch': self.epoch,
      'iteration': self.iteration,
      'arch': self.model.__class__.__name__,
      'optim_state_dict': self.optim.state_dict(),
      'model_state_dict': self.model.state_dict(),
      'best_mean_iu': self.best_mean_iu,
    }, filename)
    
    if is_best:
      shutil.copy(osp.join(self.out, filename),
            osp.join(self.out, 'model_best.pth.tar'))

    metrics ={
        'loss':loss.data[0],
        'acc' : metrics[0],
        'acc_cls':metrics[1],
        'mean_iu':metrics[2],
        'fwavacc':metrics[3]
    }
    self.val_loss_list.append(metrics)

    if training:
      self.model.train()