Пример #1
0
	def _train(self, epoch):
		train_loss = MetricTracker()

		for idx, batch in enumerate(self.train_loader):

			image = batch['image'].to(self.device)
			label = batch['label'].to(self.device)

			self.optimizer.zero_grad()

			out = self.model(image)


			loss = self.criterion(out, label)
			#print(f'loss: {loss}')

			loss.backward()

			self.optimizer.step()
			self.lr_scheduler.step()
			
			train_loss.update(loss.item(), out.shape[0])
			print(f'#{idx} -- loss: {train_loss.avg:.4f}')
		print(f'Training loss: {train_loss.avg:.4f}')

		return {'loss': train_loss.avg}
Пример #2
0
def train(trainloader, model, optimizer, lossfunc, label_type, epoch,
          use_cuda):

    lossTracker = MetricTracker()

    # set model to train mode
    model.train()

    # main training loop
    for idx, data in enumerate(tqdm(trainloader, desc="training")):

        numSample = data["image"].size(0)

        # unpack sample
        bands = data["image"]
        if label_type == 'multi_label':
            labels = data["label"]
        else:
            labels = (torch.max(data["label"], 1)[1]).type(torch.long)

        # move data to gpu if model is on gpu
        if use_cuda:
            bands = bands.to(torch.device("cuda"))
            labels = labels.to(torch.device("cuda"))

        # reset gradients
        optimizer.zero_grad()

        # forward pass
        logits = model(bands)
        loss = lossfunc(logits, labels)

        # backward pass
        loss.backward()
        optimizer.step()

        #
        lossTracker.update(loss.item(), numSample)

    # train_writer.add_scalar("loss", lossTracker.avg, epoch)
    wandb.log({'loss': lossTracker.avg, 'epoch': epoch})

    print('Train loss: {:.6f}'.format(lossTracker.avg))
Пример #3
0
    def _train(self, epoch):
        step = 0
        metric = MetricTracker()
        for idx, sample in enumerate(self.train_load):
            step += 1
            self.optimizer.zero_grad()
            img = sample['img'].to(self.device)
            lab = sample['lab'].float()
            lab.to(self.device)
            out = self.model(img)
            loss = self.criterion(out, lab)

            # backward
            loss.backward()
            self.optimizer.step()
            self.lr_scheduler.step()

            # updata acc&avg
            metric.update_avg(loss)
            metric.update_acc(out, lab)

            print(f"train--step:{step}/epoch:{epoch+1}--",
                  f"train_loss: {metric.avg:.4f}",
                  f"acc:{metric.acc:.4f}",
                  f"lr: {self.lr_scheduler.get_lr()[0]: .2f}")
            # tensorboard
            self.writer.add_scalar('train_loss', metric.avg, step)
        print(f'---Metrics in {epoch+1}/{self.epochs}---',
              f'Training Loss : {metric.avg}',
              f'Acc : {metric.acc}')

        return {'loss': metric.avg, 'acc': metric.acc}
Пример #4
0
    def _valid(self, epoch):
        step = 0
        metric = MetricTracker()
        self.model.eval()
        for idx, sample in enumerate(self.valie_load):
            step += 1
            img = sample['img'].to(self.device)
            lab = sample['lab'].float()
            lab.to(self.device)
            out = self.model(img)
            loss = self.criterion(out, lab)

            # update acc&avg
            metric.update_avg(loss)
            metric.update_acc(out, lab)

            if step % 500 == 0:
                print(f"valid--step:{step}/epoch:{epoch+1}--",
                      f"valid_loss:{metric.avg:.4f}",
                      f"acc:{metric.acc:.4f}")

            self.writer.add_scalar('valid_loss', metric.avg, step)
        print(f'----Valid---',
              f'Valid_loss:{metric.avg}',
              f'Acc:{metric.acc}')
        return {'valid_loss': metric.avg, 'acc': metric.acc}
Пример #5
0
	def _valid(self, epoch):
		valid_loss = MetricTracker()
    
		#valid_acc  = MetricTracker()

		self.model.eval()

        # Iterate over data
		for idx, sample in enumerate(self.valid_loader):
            # get the inputs
			image = sample['image'].to(self.device)
			label = sample['label'].to(self.device)

            # forward
			out  = self.model(image)
			loss = self.criterion(out, label)
        
			valid_loss.update(loss.item(), out.shape[0])
        
		print(f'Valid Loss: {valid_loss.avg:.4f}')
    
		self.model.train()        
		return {'loss': valid_loss.avg}
Пример #6
0
    with torch.no_grad():
        for (images, labels) in dataloader:
            images, labels = images.to(device), labels.to(device)
            outputs = net(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    return correct / total


net = ConvNet().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

mt = MetricTracker()

for epoch in range(EPOCHS):
    net.train()

    running_loss = 0.0
    for i, (inputs, labels) in enumerate(trainloader):
        inputs, labels = inputs.to(device), labels.to(device)

        # zero the parameter gradients
        optimizer.zero_grad()

        # forward + backward + optimize
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()