Exemple #1
0
    def train(self, epoch):
        self.model.train()
        losses = AverageMeter()
        top1 = AverageMeter()
        pbar = tqdm(self.train_dataloader)
        for batch_idx, data in enumerate(pbar):
            self.iters += 1
            images, labels = data['images'], data['labels']
            if self.use_cuda:
                images, labels = images.cuda(), labels.cuda()
            outputs = self.model(images)
            loss = self.criterion(outputs, labels)
            self.optimizer.zero_grad()
            loss.backward(loss.data)
            self.optimizer.step()
            if self.args.optimizer_type == 'sgd':
                adjust_learning_rate(self.optimizer, self.args.base_lr, epoch,
                                     self.args.lr_decay_epoch)
            prec1 = accuracy(outputs.data, labels.data, topk=(1, ))
            loss = loss.view(-1)
            losses.update(loss.data[0], images.size(0))
            top1.update(prec1[0].detach().item(), images.size(0))
            self.writer.add_scalar('train/loss', loss.data[0], self.iters)
            self.writer.add_scalar('train/acc', top1.val, self.iters)

            if batch_idx % self.args.log_interval == 0:
                print('epoch:{}, iter:{}/{}, loss:{}, acc:{}'.format(
                    epoch, batch_idx, self.iters, losses.avg,
                    round(top1.avg, 6)))
                losses.reset(), top1.reset()
Exemple #2
0
        score_train = trainer.get_scores()
        precision = score_train['precision']
        mAP = score_train['mAP']


        batch_time.update(time.time()-start_time)
        start_time = time.time()
        tbar.set_description('Train loss: %.3f; precision: %.3f; mAP: %.3f; data time: %.3f; batch time: %.3f'% 
                (train_loss / (i_batch + 1), precision, mAP, data_time.avg, batch_time.avg))
    writer.add_scalar('loss', train_loss/len(tbar), epoch)
    writer.add_scalar('mAP/train', mAP, epoch)
    writer.add_scalar('precision/train', precision, epoch)
    writer.add_scalar('distance/train', score_train['distance'], epoch)

    trainer.reset_metrics()
    data_time.reset()
    batch_time.reset()

    if epoch % 1 == 0:
        with torch.no_grad():
            model.eval()
            print("evaluating...")

            if test: 
                tbar = tqdm(dataloader_test)
            else: 
                tbar = tqdm(dataloader_val)
            
            start_time = time.time()
            for i_batch, sample_batched in enumerate(tbar):
                data_time.update(time.time()-start_time)