def test(self, model, loader, thresholds, metrics): model.eval() n_batches = len(loader) metric_totals = {m.name: 0 for m in metrics} for inputs, targets, _, _ in loader: inputs = Variable(inputs.cuda(async=True), volatile=True) targets = Variable(targets.cuda(async=True), volatile=True) output = model(inputs) loss = self.tst_criterion(output, targets) loss_data = loss.data[0] labels = targets.data.cpu().numpy() probs = output.data.cpu().numpy() preds = pred_utils.get_predictions(probs, thresholds) for m in metrics: score = m.evaluate(loss_data, preds, probs, labels) metric_totals[m.name] += score for m in metrics: metric_totals[m.name] /= n_batches return metric_totals
def test(self, model, loader, thresholds, metrics): model.eval() loss = 0 probs = [] labels = [] metric_totals = {m.name: 0 for m in metrics} for inputs, targets, aux_inputs, _ in loader: if len(targets.size()) == 1: targets = targets.float().view(-1, 1) inputs = Variable(inputs.cuda(async=True), volatile=True) aux_inputs = Variable(aux_inputs.cuda(async=True), volatile=True) targets = Variable(targets.cuda(async=True), volatile=True) output = model(inputs, aux_inputs) loss += self.tst_criterion(output, targets).data[0] probs = np.vstack([probs, output.data.cpu().numpy()]) labels = np.vstack([labels, targets.data.cpu().numpy()]) loss /= len(loader) preds = pred_utils.get_predictions(probs, thresholds) for m in metrics: score = m.evaluate(loss, preds, probs, labels) metric_totals[m.name] = score return metric_totals
def get_pseudo_label_targets(fpaths, model, img_scale, n_labels, thresholds): dataset = FileDataset(fpaths, targets=None, transform=data_aug.get_basic_transform(img_scale)) dataloader = torch.utils.data.DataLoader(dataset, 64, shuffle=False, pin_memory=False, num_workers=1) probs = pred_utils.get_probabilities(model, dataloader) preds = pred_utils.get_predictions(probs, thresholds) return preds, probs
def train(self, model, loader, thresholds, epoch, metrics): model.train() loss_data = 0 n_classes = loader.dataset.targets.shape[1] probs = np.empty((0, n_classes)) labels = np.empty((0, n_classes)) metric_totals = {m.name: 0 for m in metrics} cur_iter = int((epoch - 1) * len(loader)) + 1 for inputs, targets, _ in loader: if len(targets.size()) == 1: targets = targets.float().view(-1, 1) inputs = Variable(inputs.cuda(async=True)) targets = Variable(targets.cuda(async=True)) ## Forward Pass output = model(inputs) ## Clear Gradients model.zero_grad() # Loss loss = self.trn_criterion(output, targets) ## Backprop loss.backward() self.optimizer.step() ### Adjust Lr ### if self.lr_adjuster.iteration_type == 'mini_batch': self.lr_adjuster.adjust(self.optimizer, cur_iter) cur_iter += 1 loss_data += loss.data[0] probs = np.vstack([probs, output.data.cpu().numpy()]) labels = np.vstack([labels, targets.data.cpu().numpy()]) loss_data /= len(loader) preds = pred_utils.get_predictions(probs, thresholds) for m in metrics: score = m.evaluate(loss_data, preds, probs, labels) metric_totals[m.name] = score return metric_totals
def train(self, model, loader, thresholds, epoch, n_epochs, metrics): model.train() n_batches = len(loader) cur_iter = int((epoch - 1) * n_batches) + 1 metric_totals = {m.name: 0 for m in metrics} for inputs, targets, _, _ in loader: inputs = Variable(inputs.cuda(async=True)) targets = Variable(targets.cuda(async=True)) output = model(inputs) loss = self.trn_criterion(output, targets) loss_data = loss.data[0] labels = targets.data.cpu().numpy() probs = output.data.cpu().numpy() preds = pred_utils.get_predictions(probs, thresholds) for m in metrics: score = m.evaluate(loss_data, preds, probs, labels) metric_totals[m.name] += score ## Backprop (Calculate gradient) loss.backward() ## Update gradient if cur_iter % self.n_batches_per_step == 0: self.optimizer.step() model.zero_grad() if self.lr_adjuster.iteration_type == 'mini_batch': self.lr_adjuster.adjust(self.optimizer, cur_iter) cur_iter += 1 for m in metrics: metric_totals[m.name] /= n_batches return metric_totals
def train(self, model, loader, thresholds, epoch, metrics): model.train() n_batches = len(loader) cur_iter = int((epoch - 1) * n_batches) + 1 metric_totals = {m.name: 0 for m in metrics} for inputs, targets, aux_inputs, _ in loader: if len(targets.size()) == 1: targets = targets.float().view(-1, 1) inputs = Variable(inputs.cuda(async=True)) aux_inputs = Variable(aux_inputs.cuda(async=True)) targets = Variable(targets.cuda(async=True)) output = model(inputs, aux_inputs) model.zero_grad() loss = self.trn_criterion(output, targets) loss_data = loss.data[0] labels = targets.data.cpu().numpy() probs = output.data.cpu().numpy() preds = pred_utils.get_predictions(probs, thresholds) for m in metrics: score = m.evaluate(loss_data, preds, probs, labels) metric_totals[m.name] += score loss.backward() self.optimizer.step() if self.lr_adjuster.iteration_type == 'mini_batch': self.lr_adjuster.adjust(self.optimizer, cur_iter) cur_iter += 1 for m in metrics: metric_totals[m.name] /= n_batches return metric_totals