Esempio n. 1
0
 def all_reduce_tensor(self, tensor, norm=True):
     if self.distributed:
         return all_reduce_tensor(tensor,
                                  world_size=self.world_size,
                                  norm=norm)
     else:
         return torch.mean(tensor)
Esempio n. 2
0
            cur_masks = cur_masks.cuda(non_blocking=True)

            preds = model(ref_imgs, cur_imgs)
            preds = F.interpolate(preds,
                                  (cur_masks.size()[2], cur_masks.size()[3]),
                                  mode='bilinear',
                                  align_corners=False)
            loss = criterion(preds, cur_masks)
            #loss = lovasz_hinge(preds.squeeze(), cur_masks.squeeze())
            loss = bootstrapped_ce_loss(loss)
            loss += 1. * dice_loss(preds[:, 0].flatten(1),
                                   cur_masks.flatten(1))

            # reduce the whole loss over multi-gpu
            if engine.distributed:
                reduce_loss = all_reduce_tensor(loss,
                                                world_size=engine.world_size)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            current_idx = epoch * config.niters_per_epoch + idx
            lr = lr_policy.get_lr(current_idx)

            optimizer.param_groups[0]['lr'] = lr
            optimizer.param_groups[1]['lr'] = lr
            for i in range(2, len(optimizer.param_groups)):
                optimizer.param_groups[i]['lr'] = lr * 10

            writer.add_scalar("train/loss",
                              scalar_value=reduce_loss.item(),
Esempio n. 3
0
            engine.update_iteration(epoch, idx)

            minibatch = dataloader.next()
            imgs = minibatch['data']
            gts = minibatch['label']
            edge_gts = minibatch['aux_label']

            imgs = imgs.cuda(non_blocking=True)
            gts = gts.cuda(non_blocking=True)
            edge_gts = edge_gts.cuda(non_blocking=True)
            loss, edge_loss, onelevel_segloss, twolevel_segloss = model(
                imgs, gts, edge_gts)

            # reduce the whole loss over multi-gpu
            if engine.distributed:
                reduce_loss = all_reduce_tensor(loss,
                                                world_size=engine.world_size)
                reduce_edge_loss = all_reduce_tensor(
                    edge_loss, world_size=engine.world_size)
                onelevel_segloss = all_reduce_tensor(
                    onelevel_segloss, world_size=engine.world_size)
                twolevel_segloss = all_reduce_tensor(
                    twolevel_segloss, world_size=engine.world_size)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            current_idx = epoch * config.niters_per_epoch + idx
            lr = lr_policy.get_lr(current_idx)

            optimizer.param_groups[0]['lr'] = lr