def _train_epoch(self, epoch_id, epochs):
        losses = tools.AverageMeter()
        accuracies = tools.AverageMeter()

        # Total training files count / batch_size
        batch_size = self.train_loader.batch_size
        it_count = len(self.train_loader)

        with tqdm(total=it_count,
                  desc="Epochs {}/{}".format(epoch_id + 1, epochs),
                  bar_format=
                  '{l_bar}{bar}| {n_fmt}/{total_fmt} [{remaining}{postfix}]'
                  ) as pbar:

            for ind, loader_dict in enumerate(self.train_loader):

                #train
                loss, acc = self._batch_train_validation(loader_dict,
                                                         volatile=False)

                # backward + optimize
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

                losses.update(loss.data[0], batch_size)
                accuracies.update(acc, batch_size)

                # Update pbar
                pbar.set_postfix(
                    OrderedDict(loss='{0:1.5f}'.format(loss.data[0]),
                                acc='{0:1.5f}'.format(acc)))
                pbar.update(1)
        return losses.avg, accuracies.avg
Ejemplo n.º 2
0
    def _validate_epoch(self, valid_loader, threshold):
        losses = tools.AverageMeter()
        dice_coeffs = tools.AverageMeter()

        it_count = len(valid_loader)
        batch_size = valid_loader.batch_size

        images = None  # To save the last images batch
        targets = None  # To save the last target batch
        preds = None  # To save the last prediction batch
        with tqdm(total=it_count, desc="Validating", leave=False) as pbar:
            for ind, (images, targets) in enumerate(valid_loader):
                if self.use_cuda:
                    images = images.cuda()
                    targets = targets.cuda()

                # Volatile because we are in pure inference mode
                # http://pytorch.org/docs/master/notes/autograd.html#volatile
                images = Variable(images, volatile=True)
                targets = Variable(targets, volatile=True)

                # forward
                logits = self.net(images)
                probs = F.sigmoid(logits)
                preds = (probs > threshold).float()

                loss = self._criterion(logits, targets)
                acc = losses_utils.dice_coeff(preds, targets)
                losses.update(loss.data[0], batch_size)
                dice_coeffs.update(acc.data[0], batch_size)
                pbar.update(1)

        return losses.avg, dice_coeffs.avg, images, targets, preds
Ejemplo n.º 3
0
    def _validate_epoch(self, valid_loader, threshold):
        """负责在一个epoch验证"""
        losses = tools.AverageMeter()
        dice_coeffs = tools.AverageMeter()

        it_count = len(valid_loader)
        batch_size = valid_loader.batch_size

        images = None  # To save the last images batch
        targets = None  # To save the last target batch
        preds = None  # To save the last prediction batch
        with tqdm(total=it_count, desc="Validating", leave=False) as pbar:
            for ind, (images, targets) in enumerate(valid_loader):
                if self.use_cuda:
                    images = images.to(self.device)
                    targets = targets.to(self.device)

                with torch.no_grad():
                    # forward
                    logits = self.net(images)
                    probs = torch.sigmoid(logits)
                    preds = (probs > threshold).float()

                    loss = self.criterion(logits, targets)
                    acc = losses_utils.dice_coeff(preds, targets)
                    losses.update(loss.data.item(), batch_size)
                    dice_coeffs.update(acc.data.item(), batch_size)
                pbar.update(1)
        batch_results = [ x.cpu() if self.use_cuda else x for x in [images[:2], targets[:2], preds[:2]]]
        return losses.avg, dice_coeffs.avg, batch_results
Ejemplo n.º 4
0
    def _train_epoch(self, train_loader, optimizer, threshold):
        losses = tools.AverageMeter()
        dice_coeffs = tools.AverageMeter()

        # Total training files count / batch_size
        batch_size = train_loader.batch_size
        it_count = len(train_loader)
        with tqdm(total=it_count,
                  desc="Epochs {}/{}".format(self.epoch_counter + 1, self.max_epochs),
                  bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{remaining}{postfix}]'
                  ) as pbar:
            for ind, (inputs, target, mask) in enumerate(train_loader):

                if self.use_cuda:
                    inputs = inputs.cuda()
                    target = target.cuda()
                    mask = mask.cuda()
                inputs, target, mask = Variable(inputs), Variable(target), Variable(mask)

                # forward
                logits = self.net.forward(inputs)
                probs = F.sigmoid(logits)
                pred = (probs > threshold).float()

                # backward + optimize
                loss = self._criterion(logits, target, mask)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                # print statistics
                acc = losses_utils.dice_coeff(pred*mask, target*mask)

                # No longer works for pytorch>=0.5
                # losses.update(loss.data[0], batch_size)
                losses.update(loss.data, batch_size)

                # No longer works for pytorch>=0.5
                # dice_coeffs.update(acc.data[0], batch_size)
                dice_coeffs.update(acc.data, batch_size)

                # Update pbar
                # No longer works for pytorch>=0.5
                # pbar.set_postfix(OrderedDict(loss='{0:1.5f}'.format(loss.data[0]),
                #                              dice_coeff='{0:1.5f}'.format(acc.data[0])))
                pbar.set_postfix(OrderedDict(loss='{0:1.5f}'.format(loss.data),
                                             dice_coeff='{0:1.5f}'.format(acc.data)))
                pbar.update(1)
        return losses.avg, dice_coeffs.avg
    def _validate_epoch(self):
        losses = tools.AverageMeter()
        accuracies = tools.AverageMeter()

        it_count = len(self.valid_loader)
        batch_size = self.train_loader.batch_size
        with tqdm(total=it_count, desc="Validating", leave=False) as pbar:
            for ind, loader_dict in enumerate(self.valid_loader):
                #train
                loss, acc = self._batch_train_validation(loader_dict,
                                                         volatile=True)

                losses.update(loss.data[0], batch_size)
                accuracies.update(acc, batch_size)
                pbar.update(1)

        return losses.avg, accuracies.avg
Ejemplo n.º 6
0
    def _train_epoch(self, train_loader, threshold):
        """负责训练一个epoch"""
        losses = tools.AverageMeter()  # 每个epoch都一个meter
        dice_coeffs = tools.AverageMeter()

        # Total training files count / batch_size
        batch_size = train_loader.batch_size
        it_count = len(train_loader)
        with tqdm(total=it_count,
                  desc=f"Epochs {self.epoch_counter + 1}/{self.max_epochs}",
                  bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{remaining}{postfix}]'
                  ) as pbar:
            for ind, (inputs, target) in enumerate(train_loader):

                inputs, target = inputs.contiguous(), target.contiguous()  # image=【8, 3, 572, 572】,mask=[8, 388, 388]了
                if self.use_cuda:
                    inputs = inputs.to(self.device)
                    target = target.to(self.device)
                
                # forward
                logits = self.net.forward(inputs) # [8, 388, 388]
                probs = torch.sigmoid(logits)
                pred = (probs > threshold).float()

                # backward + optimize
                loss = self.criterion(logits, target)
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
                if self.scheduler:
                    self.scheduler.step()
                
                # print statistics
                acc = losses_utils.dice_coeff(pred, target)

                losses.update(loss.data.item(), batch_size) # 将一个batch的平均loss输入
                dice_coeffs.update(acc.data.item(), batch_size) # 一个batch的平均准确率

                # Update pbar
                pbar.set_postfix(OrderedDict(loss='{0:1.5f}'.format(loss.data.item()),
                                             dice_coeff='{0:1.5f}'.format(acc.data.item()))) # 在最后显示该batch的损失和准确率
                pbar.update(1) # 进度条涨1
                
        return losses.avg, dice_coeffs.avg # 返回的是当前epoch所有样本(这里是像素点)的平均损失和精确度