Example #1
0
def train_epoch(model,
                opt,
                lr_scheduler,
                epoch,
                dataloader,
                gpu_id=0,
                verbose=True):
    _ = model.train()
    batches_per_epoch = len(dataloader)
    train_loss, correct, total = 0, 0, 0
    for batch_idx, (data, targets) in enumerate(dataloader):
        data, targets = Variable(data.cuda(gpu_id)), Variable(
            targets.cuda(gpu_id))

        # Set LR
        LRSchedule.set_lr(opt,
                          lr_scheduler(epoch + batch_idx / batches_per_epoch))

        opt.zero_grad()
        outputs = model(data)
        loss = F.cross_entropy(outputs, targets)
        loss.backward()
        opt.step()

        train_loss += loss.data[0]
        predicted = torch.max(outputs.data, 1)[1]
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()

        if verbose:
            progress_bar(
                batch_idx, batches_per_epoch, 'Loss: %.3f | Acc: %.3f%%' %
                (train_loss / (batch_idx + 1), 100. * correct / total))

    return float(correct) / total
Example #2
0
 def train_step(self, data, targets, progress):
     self.lr = self.lr_scheduler(progress)
     LRSchedule.set_lr(self.opt, self.lr)
     
     self.opt.zero_grad()
     outputs = self(data)
     loss = F.cross_entropy(outputs, targets)
     loss.backward()
     self.opt.step()
     return outputs, loss.data[0]
Example #3
0
def set_progress(optimizer, lr_scheduler, progress):
    lr = lr_scheduler(progress)
    LRSchedule.set_lr(optimizer, lr)
Example #4
0
        mult = 2
        Tcur = 0

    for epoch in range(args.epochs):
        # early stopping
        if tolerance > args.tolerance:
            break
        train_loss = 0

        # Train
        _ = model.train()
        for ids, targets, epoch_progress in problem.iterate(
                mode='train', shuffle=True, batch_size=args.batch_size):
            if args.lr_schedule == 'cosine':
                lr = lr_scheduler(Tcur + epoch_progress, epochs=Ti)
                LRSchedule.set_lr(optimizer, lr)
                print('learning rate:{}'.format(lr))
            else:
                # set_progress(optimizer, lr_scheduler, (epoch + epoch_progress) / args.epochs)
                pass
            loss, preds = train_step(
                model=model,
                optimizer=optimizer,
                ids=ids,
                targets=targets,
                loss_fn=problem.loss_fn,
            )
            train_loss += loss.item()
            train_metric = problem.metric_fn(to_numpy(targets),
                                             to_numpy(preds))
            #print(json.dumps({
 def set_progress(self, progress):
     self.lr = self.lr_scheduler(progress)
     LRSchedule.set_lr(self.optimizer, self.lr)
Example #6
0
 def set_progress(self, progress):
     self.progress = progress
     self.epoch = np.floor(progress)
     if self.lr_scheduler is not None:
         self.lr = self.lr_scheduler(progress)
         LRSchedule.set_lr(self.opt, self.lr)