コード例 #1
0
 def visualize_image(self,
                     writer,
                     dataset,
                     image,
                     target,
                     output,
                     global_step,
                     num_image=3):
     grid_image = make_grid(image[:num_image].clone().cpu().data,
                            num_image,
                            normalize=True)
     writer.add_image('Image', grid_image, global_step)
     grid_image = make_grid(decode_seg_map_sequence(torch.max(
         output[:num_image], 1)[1].detach().cpu().numpy(),
                                                    dataset=dataset),
                            num_image,
                            normalize=False,
                            range=(0, 255))
     writer.add_image('Predicted label', grid_image, global_step)
     grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(
         target[:num_image], 1).detach().cpu().numpy(),
                                                    dataset=dataset),
                            num_image,
                            normalize=False,
                            range=(0, 255))
     writer.add_image('Groundtruth label', grid_image, global_step)
コード例 #2
0
    def visualize_image(self, writer, dataset, image, target, output,
                        global_step):
        rgb = (image[:3] * self.std + self.mean).clone().cpu().data
        y_ = decode_seg_map_sequence(torch.max(output[:3],
                                               1)[1].detach().cpu().numpy(),
                                     dataset=dataset)
        y = decode_seg_map_sequence(torch.squeeze(target[:3],
                                                  1).detach().cpu().numpy(),
                                    dataset=dataset)
        rgb_y_ = rgb * 0.2 + y_ * 0.8
        rgb_y = rgb * 0.2 + y * 0.8

        grid_image = make_grid(rgb, 3, normalize=False, range=(0, 255))
        writer.add_image('Image', grid_image, global_step)

        grid_image = make_grid(y_, 3, normalize=False, range=(0, 255))
        writer.add_image('Predicted label', grid_image, global_step)

        grid_image = make_grid(y, 3, normalize=False, range=(0, 255))
        writer.add_image('Groundtruth label', grid_image, global_step)

        grid_image = make_grid(rgb_y_, 3, normalize=False, range=(0, 255))
        writer.add_image('Predicted label + RGB', grid_image, global_step)

        grid_image = make_grid(rgb_y, 3, normalize=False, range=(0, 255))
        writer.add_image('Groundtruth label + RGB', grid_image, global_step)
コード例 #3
0
 def save_batch_images(self, imgs, preds, targets, batch_index):
     (filepath, _) = os.path.split(self.args.resume)
     save_path = os.path.join(filepath, 'visualization')
     if not os.path.exists(save_path):
         os.makedirs(save_path)
     grid_image = make_grid(imgs.clone().detach().cpu(), 8, normalize=True)
     save_image(
         grid_image,
         os.path.join(save_path,
                      'batch_{:0>4}-img.jpg'.format(batch_index)))
     grid_image = make_grid(decode_seg_map_sequence(
         torch.max(preds, 1)[1].detach().cpu().numpy(),
         dataset=self.args.dataset),
                            8,
                            normalize=False,
                            range=(0, 255))
     save_image(
         grid_image,
         os.path.join(save_path,
                      'batch_{:0>4}-pred.png'.format(batch_index)))
     grid_image = make_grid(decode_seg_map_sequence(
         torch.squeeze(targets, 1).detach().cpu().numpy(),
         dataset=self.args.dataset),
                            8,
                            normalize=False,
                            range=(0, 255))
     save_image(
         grid_image,
         os.path.join(save_path,
                      'batch_{:0>4}-target.png'.format(batch_index)))
コード例 #4
0
    def visualize_image_four(self,
                             writer,
                             dataset,
                             image,
                             target,
                             output,
                             global_step,
                             istrain=False):
        #print(image.shape)
        if istrain:
            prefix = '/train'
        else:
            prefix = '/val'
        grid_image = make_grid(image[:3, :3].clone().cpu().data,
                               3,
                               normalize=True)
        writer.add_image('Image' + prefix, grid_image, global_step)
        #preditcted
        grid_image = make_grid(decode_seg_map_sequence(torch.max(
            output[:3], 1)[1].detach().cpu().numpy(),
                                                       dataset=dataset),
                               3,
                               normalize=False,
                               range=(0, 255))
        writer.add_image('Predicted label' + prefix, grid_image, global_step)
        #gt label
        #if dataset == 'drive':

        grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(
            target[:3], 1).detach().cpu().numpy(),
                                                       dataset=dataset),
                               3,
                               normalize=False,
                               range=(0, 255))
        writer.add_image('Groundtruth label' + prefix, grid_image, global_step)
コード例 #5
0
    def visualize_image(self, dataset, image, target, output, global_step, title='Sample'):
        bitmap = image.clone().cpu().data
        truth  = decode_seg_map_sequence(torch.squeeze(target, 1).detach().cpu().numpy(), dataset=dataset)
        pred   = decode_seg_map_sequence(torch.max(output, 0)[1].detach().cpu().numpy(), dataset=dataset)

        sample = make_grid([bitmap, truth, pred], 3, normalize=True, scale_each=True)

        self.add_image(title, sample, global_step)
コード例 #6
0
 def visualize_image(self, writer, dataset, image, target, output, global_step):
     grid_image = make_grid(image[:3].clone().cpu().data, 3, normalize=True)
     writer.add_image('Image', grid_image, global_step)
     grid_image = make_grid(decode_seg_map_sequence(torch.max(output[:3], 1)[1].detach().cpu().numpy(),
                                                    dataset=dataset), 3, normalize=False, range=(0, 255))
     writer.add_image('Predicted label', grid_image, global_step)
     grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(target[:3], 1).detach().cpu().numpy(),
                                                    dataset=dataset), 3, normalize=False, range=(0, 255))
     writer.add_image('Groundtruth label', grid_image, global_step)
コード例 #7
0
    def visualize_image(
        self,
        writer,
        dataset,
        image,
        target,
        output,
        global_step,
        centers=None,
        reg_x=None,
        reg_y=None,
    ):
        # reg_x = reg[:, 0:1, :, :]
        # reg_y = reg[:, 1:2, :, :]
        grid_image = make_grid(image[:3].clone().cpu().data, 3, normalize=True)
        writer.add_image("Image", grid_image, global_step)
        grid_image = make_grid(
            decode_seg_map_sequence(
                torch.max(output[:3], 1)[1].detach().cpu().numpy(),
                dataset=dataset,
            ),
            3,
            normalize=False,
            range=(0, 255),
        )
        writer.add_image("Predicted label", grid_image, global_step)
        grid_image = make_grid(
            decode_seg_map_sequence(
                torch.squeeze(target[:3], 1).detach().cpu().numpy(),
                dataset=dataset,
            ),
            3,
            normalize=False,
            range=(0, 255),
        )
        writer.add_image("Groundtruth label", grid_image, global_step)
        if centers is not None:
            grid_image = make_grid(
                centers[:3].clone().cpu().data,
                3,
                normalize=True,
            )
            writer.add_image("Centers image", grid_image, global_step)

            grid_image = make_grid(
                reg_x[:3].clone().cpu().data,
                3,
                normalize=True,
            )
            writer.add_image("reg_x image", grid_image, global_step)

            grid_image = make_grid(
                reg_y[:3].clone().cpu().data,
                3,
                normalize=True,
            )
            writer.add_image("reg_y image", grid_image, global_step)
コード例 #8
0
ファイル: summaries.py プロジェクト: jamycheung/ISSAFE
    def visualize_image(self,
                        writer,
                        dataset,
                        image,
                        target,
                        output,
                        global_step,
                        event=None):
        if event is None:
            grid_image = make_grid(image[:3].clone().cpu().data,
                                   3,
                                   normalize=True)
            writer.add_image('Image', grid_image, global_step)

            grid_image = make_grid(decode_seg_map_sequence(torch.max(
                output[:3], 1)[1].detach().cpu().numpy(),
                                                           dataset=dataset),
                                   3,
                                   normalize=False,
                                   range=(0, 255))
            writer.add_image('Predicted label', grid_image, global_step)
            grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(
                target[:3], 1).detach().cpu().numpy(),
                                                           dataset=dataset),
                                   3,
                                   normalize=False,
                                   range=(0, 255))
            writer.add_image('Groundtruth label', grid_image, global_step)
        else:
            grid_image = make_grid(image[:3].clone().cpu().data,
                                   4,
                                   normalize=True)
            writer.add_image('Image', grid_image, global_step)

            grid_image = make_grid(event[:3].clone().cpu().data,
                                   4,
                                   normalize=False)  # normalize=False?
            writer.add_image('Event', grid_image, global_step)

            grid_image = make_grid(decode_seg_map_sequence(torch.max(
                output[:3], 1)[1].detach().cpu().numpy(),
                                                           dataset=dataset),
                                   4,
                                   normalize=False,
                                   range=(0, 255))
            writer.add_image('Predicted label', grid_image, global_step)
            grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(
                target[:3], 1).detach().cpu().numpy(),
                                                           dataset=dataset),
                                   4,
                                   normalize=False,
                                   range=(0, 255))
            writer.add_image('Groundtruth label', grid_image, global_step)
コード例 #9
0
 def visualize_image(self, writer, dataset, image, target, output, global_step):
     grid_image = make_grid(image[:3].clone().cpu().data, 3, normalize=True)
     writer.add_image('Image', grid_image, global_step)
     grid_image = make_grid(decode_seg_map_sequence(torch.max(output[:3], 1)[1].detach().cpu().numpy(),
                                                    dataset=dataset), 3, normalize=False, range=(0, 255))
     writer.add_image('Predicted label', grid_image, global_step)
     grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(target[:3], 1).detach().cpu().numpy(),
                                                    dataset=dataset), 3, normalize=False, range=(0, 255))
     # torch.squeeze() 这个函数主要对数据的维度进行压缩,去掉维数为1的的维度
     # torch.tensor.detach():return a new Variable,which is separated from the current calculation graph,but still points to
     #                       the original variable.The difference is that requires_grad is false.
     writer.add_image('Groundtruth label', grid_image, global_step)
コード例 #10
0
ファイル: test1_1.py プロジェクト: Lamaric/Detect_Car_Server
    def validation(self, epoch, img_dir, save_path):
        self.model.eval()
        self.evaluator.reset()
        image = Image.open(img_dir)
        composed_transforms = transforms.Compose([
                        tr.FixScaleCrop(crop_size=513),
                        tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
                        tr.ToTensor()])
        image = composed_transforms(image)
        image = image.unsqueeze(0)

        if self.args.cuda:
            image = image.cuda()
        with torch.no_grad():
            output = self.model(image)

        pred = output.data.cpu().numpy()
        pred = np.argmax(pred, axis=1)

        # grid_image = make_grid(image[:3].clone().cpu().data, 3, normalize=True)
        # grid_image = grid_image.numpy()
        # grid_image = np.moveaxis(grid_image,0,2)
        # matplotlib.image.imsave('D:/Excise/Deecamp/backend/untitled/detect/test1/_image.png', grid_image)

        grid_image = make_grid(decode_seg_map_sequence(torch.max(output[:3], 1)[1].detach().cpu().numpy()), 3, normalize=False, range=(0, 255))
        grid_image = grid_image.numpy()
        grid_image = np.moveaxis(grid_image,0,2)
        matplotlib.image.imsave(save_path, grid_image)
        print('------OK!------')
コード例 #11
0
def save_test_img(inputs, outputs, ii):

    fig = plt.figure()
    ax0 = plt.subplot(121)
    ax1 = plt.subplot(122)

    # Input RGB img
    rgb_img = inputs[0]
    # inv_normalize = transforms.Normalize(
    #     mean=[-0.5 / 0.5, -0.5 / 0.5, -0.5 / 0.5],
    #     std=[1 / 0.5, 1 / 0.5, 1 / 0.5]
    # )
    # rgb_img = inv_normalize(rgb_img)
    rgb_img = rgb_img.detach().cpu().numpy()
    rgb_img = np.transpose(rgb_img, (1, 2, 0))

    # Inference Result
    predictions = torch.max(outputs[:1], 1)[1].detach().cpu().numpy()
    output_rgb = utils.decode_seg_map_sequence(predictions)
    output_rgb = output_rgb.numpy()
    output_rgb = np.transpose(output_rgb[0], (1, 2, 0))

    # Create plot
    ax0.imshow(rgb_img)
    ax0.set_title('Source RGB Image')  # subplot 211 title
    ax1.imshow(output_rgb)
    ax1.set_title('Inference result')

    fig.savefig('data/results/current_training_model/%04d-results.png' % (ii))
    plt.close('all')
コード例 #12
0
    def training(self, epoch):
        train_loss = 0.0
        self.model.train()
        tbar = tqdm(self.train_loader)
        num_img_tr = len(self.train_loader)
        for i, sample in enumerate(tbar):
            image, target = sample['image'], sample['label']
            image, target = image.to(self.device), target.to(self.device)

            self.scheduler(self.optimizer, i, epoch, self.best_pred)
            self.optimizer.zero_grad()
            output = self.model(image)

            if i % 1000 == 0:
                save_image(self.denorm(image.data.to("cpu")),
                           "test_result/data_{}_test.jpg".format(i),
                           nrow=1,
                           normalize=False,
                           range=(0, 255))
                save_image(decode_seg_map_sequence(torch.max(
                    output[:3], 1)[1].detach().to("cpu").numpy(),
                                                   dataset=self.args.dataset),
                           "test_result/data_{}.jpg".format(i),
                           nrow=1,
                           normalize=False,
                           range=(0, 255))

            loss = self.criterion(output, target)
            loss.backward()
            self.optimizer.step()
            train_loss += loss.item()
            tbar.set_description('Train loss: %.3f' % (train_loss / (i + 1)))
            self.writer.add_scalar('train/total_loss_iter', loss.item(),
                                   i + num_img_tr * epoch)

            # Show 10 * 3 inference results each epoch
            if i % (num_img_tr // 10) == 0:
                global_step = i + num_img_tr * epoch
                self.summary.visualize_image(self.writer, self.args.dataset,
                                             image, target, output,
                                             global_step)

        torch.save(self.model.state_dict(),
                   "./models/{}_ade20k_xception_512.ckpt".format(epoch))
        self.writer.add_scalar('train/total_loss_epoch', train_loss, epoch)
        print('[Epoch: %d, numImages: %5d]' %
              (epoch, i * self.args.batch_size + image.data.shape[0]))
        print('Loss: %.3f' % train_loss)

        if self.args.no_val:
            # save checkpoint every epoch
            is_best = False
            self.saver.save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': self.model.module.state_dict(),
                    'optimizer': self.optimizer.state_dict(),
                    'best_pred': self.best_pred,
                }, is_best)
コード例 #13
0
    def visualize_image(self, writer, dataset, image, target, output, b_mask,
                        b_enlarged_mask, global_step):
        # softmax = nn.Softmax(dim=1)
        # sigmoid = nn.Sigmoid()
        # import pdb; pdb.set_trace()
        # background = sigmoid(output[:,:1,:,:])
        # characters = softmax(output[:,1:,:,:])
        # output = torch.cat([background, characters], dim=1)

        # target_tb_ch0 = target[:,0,:,:]
        # target_tb_ch1 = torch.sum(target[:,1:,:,:], dim=1)
        # target_tb = torch.stack((target_tb_ch0,target_tb_ch1), dim=1)
        #def visualize_image(self, writer, dataset, image, target, output, global_step):
        #import pdb; pdb.set_trace()
        grid_image = make_grid(image[:3].clone().cpu().data, 3, normalize=True)
        #grid_image = make_grid(image.clone().cpu().data, 5, normalize=True)
        writer.add_image('Image', grid_image, global_step)
        grid_image = make_grid(decode_seg_map_sequence(torch.max(
            output[:3], 1)[1].detach().cpu().numpy(),
                                                       dataset=dataset),
                               3,
                               normalize=False,
                               range=(0, 255))
        #grid_image = make_grid(decode_seg_map_sequence(torch.max(output, 1)[1].detach().cpu().numpy(),
        #                                               dataset=dataset), 5, normalize=False, range=(0, 255))
        writer.add_image('Predicted label', grid_image, global_step)
        grid_image = make_grid(decode_seg_map_sequence(torch.max(
            target[:3], 1)[1].detach().cpu().numpy(),
                                                       dataset=dataset),
                               3,
                               normalize=False,
                               range=(0, 255))
        #grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(target[:3], 1).detach().cpu().numpy(),
        #                                                dataset=dataset), 3, normalize=False, range=(0, 255))
        #grid_image = make_grid(decode_seg_map_sequence(torch.max(target, 1)[1].detach().cpu().numpy(),
        #                                               dataset=dataset), 5, normalize=False, range=(0, 255))
        writer.add_image('Groundtruth label', grid_image, global_step)
        #import pdb; pdb.set_trace()
        grid_image = make_grid(
            b_mask[:, 0, :, :][:3].detach().cpu().data.unsqueeze(1) * 255, 3)
        writer.add_image('b_maskage', grid_image, global_step)
        grid_image = make_grid(
            b_enlarged_mask[:, 0, :, :][:3].detach().cpu().data.unsqueeze(1) *
            255, 3)
        writer.add_image('enlarged_b_mask', grid_image, global_step)
コード例 #14
0
    def visualize_pregt(self, writer, dataset, image, target, output, val_batch_i):
        assert image.size(0) == target.size(0) == output.size(0)
        batch_size = image.size(0)

        predicteds = decode_seg_map_sequence(torch.max(output[:], 1)[1].detach().cpu().numpy(),
                                                       dataset=dataset)

        gts = decode_seg_map_sequence(torch.squeeze(target[:], 1).detach().cpu().numpy(),
                                                       dataset=dataset)

        #Have same size
        assert predicteds.size() == gts.size()

        for i in range(batch_size):
            predicted = predicteds[i]
            gt = gts[i]
            combine = np.concatenate((gt, predicted), axis=2)
            writer.add_image('Results', combine, val_batch_i * batch_size + i + 1)
コード例 #15
0
ファイル: vis.py プロジェクト: suyanzhou626/seg_model
 def save_img(self, images, labels, predictions, names):
     save_dir = self.args.save_dir
     if not os.path.exists(save_dir):
         os.makedirs(save_dir)
     num_image = len(labels)
     labels = decode_seg_map_sequence(labels).cpu().numpy().transpose(
         0, 2, 3, 1)
     predictions = decode_seg_map_sequence(
         predictions).cpu().numpy().transpose(0, 2, 3, 1)
     for i in range(num_image):
         name = names[i]
         if not isinstance(name, str):
             name = str(name)
         save_name = os.path.join(save_dir, name + '.png')
         image = images[i, :, :, :]
         label_mask = labels[i, :, :, :]
         prediction = predictions[i, :, :, :]
         if image.shape != label_mask.shape:
             print('error in %s' % name)
             continue
         label_map = self.addImage(image.astype(dtype=np.uint8),
                                   label_mask.astype(dtype=np.uint8))
         pred_map = self.addImage(image.astype(dtype=np.uint8),
                                  prediction.astype(dtype=np.uint8))
         label = img.fromarray(label_map.astype(dtype=np.uint8), mode='RGB')
         pred = img.fromarray(pred_map.astype(dtype=np.uint8), mode='RGB')
         label_mask = img.fromarray(label_mask.astype(dtype=np.uint8),
                                    mode='RGB')
         pred_mask = img.fromarray(prediction.astype(dtype=np.uint8),
                                   mode='RGB')
         shape1 = label.size
         shape2 = pred.size
         assert (shape1 == shape2)
         width = 2 * shape1[0] + 60
         height = 2 * shape1[1] + 60
         toImage = img.new('RGB', (width, height))
         toImage.paste(pred, (0, 0))
         toImage.paste(label, (shape1[0] + 60, 0))
         toImage.paste(pred_mask, (0, shape1[1] + 60))
         toImage.paste(label_mask, (shape1[0] + 60, shape1[1] + 60))
         toImage.save(save_name)
コード例 #16
0
    def save_pred(self, dataset, output, val_batch_i):
        output_dir = os.path.join(os.path.join(self.directory), 'output')
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        batch_size = output.size(0)
        predicteds = decode_seg_map_sequence(torch.max(output[:], 1)[1].detach().cpu().numpy(),
                                                       dataset=dataset)

        for i in range(batch_size):
            #validation set start from 301
            idx = 300 + val_batch_i * batch_size + i + 1
            file_save_path = os.path.join(output_dir, str(idx).zfill(4) + '.png')
            predicted = predicteds[i]
            save_image(predicted, file_save_path, normalize=False)
コード例 #17
0
ファイル: test_video.py プロジェクト: suyanzhou626/seg_model
 def visual(self,video_path):
     self.model.eval()
     print('\nvisualizing')
     vis_set = VideoDataset(self.args,video_path)
     fourcc = cv2.VideoWriter_fourcc(*'MJPG') #opencv3.0
     save_name = os.path.join(self.args.save_dir,video_path.split('/')[-1].split('.')[0])
     os.makedirs(self.args.save_dir,exist_ok=True)
     videoWriter = cv2.VideoWriter(save_name + '.avi', fourcc, float(vis_set.framerate), (vis_set.wid,vis_set.hei))
     vis_loader = DataLoader(vis_set, batch_size=self.args.batch_size, shuffle=False,drop_last=False)
     num_img_tr = len(vis_loader)
     print('=====>[frames: %5d]' % (num_img_tr * self.args.batch_size))
     for i, sample in enumerate(vis_loader):
         image, ori = sample['image'],sample['ori']
         image = image.cuda()
         with torch.no_grad():
             output = self.model(image)
             if isinstance(output,(tuple,list)):
                 output = output[0]
         output = torch.nn.functional.interpolate(output,size=ori.size()[1:3],mode='bilinear',align_corners=True)
         pred = output.data.cpu().numpy()
         pred = pred.transpose((0,2,3,1)).copy()
         if self.args.hole_ratio > 0:
             pred[0] = post_utils.removehole(pred[0],self.args.hole_ratio)
         if self.args.diff_threshold > 0:
             pred[0] = self.deflicker(pred[0])
         if self.args.blursize > 0:
             pred[0] = post_utils.blur(pred[0],self.args.blursize)
         ori = ori.cpu().numpy()
         pred = np.argmax(pred, axis=3)
         # pred = np.ones(pred.shape) - pred
         label = decode_seg_map_sequence(pred).cpu().numpy().transpose([0,2,3,1])
         label = label[:,:,:,::-1] # convert to BGR
         pred = np.stack([pred,pred,pred],axis=3)
         ori = ori.astype(dtype=np.uint8)
         label = label.astype(dtype=np.uint8)
         # ori *= pred.astype(dtype=np.uint8)
         # label[pred==0] = 0
         temp = self.addImage(ori,label)
         temp[pred == 0] = 0
         temp = temp.astype(np.uint8)
         # temp = temp[:,:,:,::-1]
         # cv2.imwrite(os.path.join(save_name,str(i)+'.jpg'),temp[0])
         videoWriter.write(temp[0])
     print('write %d frame' % (i+1))
     videoWriter.release()
コード例 #18
0
def show_mixup(original_image, image, targets_a, targets_b, lam, output, dataset):
    # for j in range(args.batch_size):
    #     plt.imshow(image[j].permute(1,2,0))
    #     plt.show()
    # image = norm(image.permute(0, 2,3,1)).permute(0,3,1,2)

    mask = decode_seg_map_sequence(torch.max(output, 1)[1].detach().cpu().numpy(),
                                                       dataset=dataset)
    for j in range(args.batch_size):
        plt.imshow(image[j].permute(1,2,0))
        plt.show()
        plt.imshow(mask[j].permute(1,2,0))
        plt.show()
        # print(targets_a.shape)
        # print(targets_b[j]*(1 - lam).shape)
        plt.imshow(targets_a[j]*lam + targets_b[j]*(1 - lam), cmap='gray')
        plt.show()
    for j in range(args.batch_size):
        plt.imshow(original_image[j].permute(1,2,0))
        plt.show()
コード例 #19
0
            logging.info(
                'iteration %d : loss : %f, loss_seg : %f, loss_seg_dice : %f, consistency_loss : %f, cons_dist: %f, loss_weight: %f'
                % (iter_num, loss.item(), loss_seg.item(),
                   loss_seg_dice.item(), consistency_loss.item(),
                   consistency_dist.item(), consistency_weight))
            if iter_num % 50 == 0:
                image = labeled_volume_batch[0, 0:1, :, :, 20:61:10].permute(
                    3, 0, 1, 2).repeat(1, 3, 1, 1)  #repeat 3是为了模拟图像的RGB三个通道
                grid_image = make_grid(image, 5, normalize=True)
                writer.add_image('train/Image', grid_image, iter_num)

                # image = outputs_soft[0, 3:4, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
                image = torch.max(outputs_soft[0, :, :, :, 20:61:10],
                                  0)[1].permute(2, 0, 1).data.cpu().numpy()
                image = utils.decode_seg_map_sequence(image)
                grid_image = make_grid(image, 5, normalize=False)
                writer.add_image('train/Predicted_label', grid_image, iter_num)

                image = label_batch[0, :, :, 20:61:10].permute(2, 0, 1)
                grid_image = make_grid(utils.decode_seg_map_sequence(
                    image.data.cpu().numpy()),
                                       5,
                                       normalize=False)
                writer.add_image('train/Groundtruth_label', grid_image,
                                 iter_num)

                image = uncertainty[0, 0:1, :, :,
                                    20:61:10].permute(3, 0, 1,
                                                      2).repeat(1, 3, 1, 1)
                grid_image = make_grid(image, 5, normalize=True)
コード例 #20
0
            # Backward the averaged gradient
            loss /= p['nAveGrad']
            loss.backward()
            aveGrad += 1

            # Update the weights once in p['nAveGrad'] forward passes
            if aveGrad % p['nAveGrad'] == 0:
                writer.add_scalar('data/total_loss_iter', loss.item(), ii + num_img_tr * epoch)
                optimizer.step()
                optimizer.zero_grad()
                aveGrad = 0

            if ii % num_img_tr / 20 == 0:
                grid_image = make_grid(inputs[:3].clone().cpu().data, 3, normalize=True)
                writer.add_image('image', grid_image)
                grid_image = make_grid(utils.decode_seg_map_sequence(torch.max(output[:3], 1)[1].detach().cpu().numpy()), 3, normalize=False,
                                       range=(0, 255))
                writer.add_image('Predicted label', grid_image)
                grid_image = make_grid(utils.decode_seg_map_sequence(torch.squeeze(gts[:3], 1).detach().cpu().numpy()), 3, normalize=False, range=(0, 255))
                writer.add_image('Groundtruth label', grid_image)

        # Save the model
        if (epoch % snapshot) == snapshot - 1:
            torch.save(net.state_dict(), os.path.join(save_dir, 'models', modelName + '_epoch-' + str(epoch) + '.pth'))
            print("Save model at {}\n".format(os.path.join(save_dir, 'models', modelName + '_epoch-' + str(epoch) + '.pth')))

        # One testing epoch
        if useTest and epoch % nTestInterval == (nTestInterval - 1):
            net.eval()
            for ii, sample_batched in enumerate(testloader):
                inputs, gts = sample_batched['image'], sample_batched['gt']
コード例 #21
0
    predictions = torch.max(outputs, 1)[1]

    inputs = inputs.cpu()
    labels = labels.cpu().type(torch.FloatTensor)
    predictions = predictions.cpu().type(torch.FloatTensor)

    if args.label_images_path:
        _total_iou, per_class_iou, per_class_img_count = utils.get_iou(predictions, labels.squeeze(1), n_classes=args.num_of_classes)
        total_iou += _total_iou
        for i in range(len(per_class_iou)):
            miou_per_class[i] += per_class_iou[i]
            num_images_per_class[i] += per_class_img_count[i]

    # Save the model output, 3 imgs in a row: Input, Prediction, Label
    imgs_per_row = 3
    predictions_colormap = utils.decode_seg_map_sequence(predictions.squeeze(1).numpy()).type(torch.FloatTensor)
    labels_colormap = utils.decode_seg_map_sequence(labels.squeeze(1).numpy()).type(torch.FloatTensor)
    sample = torch.cat((inputs, predictions_colormap, labels_colormap), 0)
    img_grid = make_grid(sample, nrow=testBatchSize*imgs_per_row, padding=2)
    save_image(img_grid, os.path.join(results_store_dir, sample_filename[0] + '-results.png'))

    mask_out = predictions.squeeze(0).numpy() * 255
    imageio.imwrite(os.path.join(results_store_dir, 'masks', sample_filename[0] + '.png'), mask_out.astype(np.uint8))

    # Calculate mean IoU per class and overall
    print('  image num : %03d' % (ii * testBatchSize))
    if args.label_images_path:
        if ii % num_img_ts == num_img_ts - 1:
            miou = total_iou / (ii * testBatchSize + inputs.shape[0])
            for i in range(len(miou_per_class)):
                if num_images_per_class[i] == 0:
コード例 #22
0
    def validation(self, epoch):
        self.model.eval()
        self.evaluator.reset()
        tbar = tqdm(self.val_loader, desc='\r')
        test_loss = 0.0
        for i, sample in enumerate(tbar):
            image, target = sample['image'], sample['label']
            image, target = image.to(self.device), target.to(self.device)
            with torch.no_grad():
                output = self.model(image)
                if i % 1000 == 0:
                    save_image(self.denorm(image.data.to("cpu")),
                               "test_result/data_{}_test.jpg".format(i),
                               nrow=1,
                               normalize=False,
                               range=(0, 255))
                    save_image(decode_seg_map_sequence(
                        torch.max(output[:3], 1)[1].detach().to("cpu").numpy(),
                        dataset=self.args.dataset),
                               "test_result/data_{}.jpg".format(i),
                               nrow=1,
                               normalize=False,
                               range=(0, 255))

            loss = self.criterion(output, target)
            test_loss += loss.item()
            tbar.set_description('Test loss: %.3f' % (test_loss / (i + 1)))
            pred = output.data.to("cpu").numpy()
            target = target.to("cpu").numpy()
            pred = np.argmax(pred, axis=1)
            # Add batch sample into evaluator
            self.evaluator.add_batch(target, pred)

        # Fast test during the training
        Acc = self.evaluator.Pixel_Accuracy()
        Acc_class = self.evaluator.Pixel_Accuracy_Class()
        mIoU = self.evaluator.Mean_Intersection_over_Union()
        FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union()
        self.writer.add_scalar('val/total_loss_epoch', test_loss, epoch)
        self.writer.add_scalar('val/mIoU', mIoU, epoch)
        self.writer.add_scalar('val/Acc', Acc, epoch)
        self.writer.add_scalar('val/Acc_class', Acc_class, epoch)
        self.writer.add_scalar('val/fwIoU', FWIoU, epoch)
        print('Validation:')
        print('[Epoch: %d, numImages: %5d]' %
              (epoch, i * self.args.batch_size + image.data.shape[0]))
        print("Acc:{}, Acc_class:{}, mIoU:{}, fwIoU: {}".format(
            Acc, Acc_class, mIoU, FWIoU))
        print('Loss: %.3f' % test_loss)

        new_pred = mIoU
        if new_pred > self.best_pred:
            is_best = True
            self.best_pred = new_pred
            self.saver.save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': self.model.module.state_dict(),
                    'optimizer': self.optimizer.state_dict(),
                    'best_pred': self.best_pred,
                }, is_best)
コード例 #23
0
    # Forward pass of the mini-batch
    inputs, labels = Variable(inputs, requires_grad=True), Variable(labels)
    if gpu_id >= 0:
        inputs, labels = inputs.cuda(), labels.cuda()

    with torch.no_grad():
        outputs = net.forward(inputs)

    predictions = torch.max(outputs, 1)[1]

    loss = criterion(outputs, labels, size_average=False, batch_average=True)
    testing_loss += loss.item()
    #from IPython import embed;embed();exit();
    for num in range(testBatch):
        img = utils.decode_seg_map_sequence(predictions.cpu().numpy())[num]
        print('save images num {} '.format(images_num))
        cv2.imwrite(
            os.path.join(save_dir, 'img_{:04d}_pre.jpg'.format(images_num)),
            img.numpy().transpose(1, 2, 0) * 256)
        img_ori = inputs[num].detach().cpu().numpy().transpose(
            1, 2, 0) * global_std + global_mean
        cv2.imwrite(
            os.path.join(save_dir, 'img_{:04d}_ori.jpg'.format(images_num)),
            img_ori * 256)
        img_gt = utils.decode_seg_map_sequence(
            labels.detach().cpu().numpy().reshape(
                testBatch, 512, 512))[num]  #*global_std+global_mean
        cv2.imwrite(
            os.path.join(save_dir, 'img_{:04d}_gt.jpg'.format(images_num)),
            img_gt.numpy().transpose(1, 2, 0) * 256)
コード例 #24
0
            writer.add_scalar('loss/loss_seg', loss_seg, iter_num)
            writer.add_scalar('loss/loss_seg_dice', loss_seg_dice, iter_num)
            writer.add_scalar('train/consistency_loss', consistency_loss, iter_num)
            writer.add_scalar('train/consistency_weight', consistency_weight, iter_num)
            writer.add_scalar('train/consistency_dist', consistency_dist, iter_num)

            logging.info('iteration %d : loss : %f cons_dist: %f, loss_weight: %f' %
                         (iter_num, loss.item(), consistency_dist.item(), consistency_weight))
            if iter_num % 50 == 0:
                image = volume_batch[0, 0:1, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
                grid_image = make_grid(image, 5, normalize=True)
                writer.add_image('train/Image', grid_image, iter_num)

                # image = outputs_soft[0, 3:4, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
                image = torch.max(outputs_soft[0, :, :, :, 20:61:10], 0)[1].permute(2, 0, 1).data.cpu().numpy()
                image = utils.decode_seg_map_sequence(image)
                grid_image = make_grid(image, 5, normalize=False)
                writer.add_image('train/Predicted_label', grid_image, iter_num)

                image = label_batch[0, :, :, 20:61:10].permute(2, 0, 1)
                grid_image = make_grid(utils.decode_seg_map_sequence(image.data.cpu().numpy()), 5, normalize=False)
                writer.add_image('train/Groundtruth_label', grid_image, iter_num)

                image = uncertainty[0, 0:1, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
                grid_image = make_grid(image, 5, normalize=True)
                writer.add_image('train/uncertainty', grid_image, iter_num)

                mask2 = (uncertainty > threshold).float()
                image = mask2[0, 0:1, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
                grid_image = make_grid(image, 5, normalize=True)
                writer.add_image('train/mask', grid_image, iter_num)
コード例 #25
0
            loss.backward()
            aveGrad += 1

            # Update the weights once in p['nAveGrad'] forward passes
            if aveGrad % p['nAveGrad'] == 0:
                writer.add_scalar('data/total_loss_iter', loss.item(), ii + num_img_tr * epoch)
                optimizer.step()
                optimizer.zero_grad()
                aveGrad = 0

            # Show 10 * 3 images results each epoch
            if ii % (num_img_tr // 10) == 0:
                grid_image = make_grid(inputs[:3].clone().cpu().data, 3, normalize=True)
                writer.add_image('Image', grid_image, global_step)
                grid_image = make_grid(
                    utils.decode_seg_map_sequence(torch.max(outputs[:3], 1)[1].detach().cpu().numpy(), 'cityscapes'), 3,
                    normalize=False,
                    range=(0, 255))
                writer.add_image('Predicted label', grid_image, global_step)
                grid_image = make_grid(
                    utils.decode_seg_map_sequence(torch.squeeze(labels[:3], 1).detach().cpu().numpy(), 'cityscapes'), 3,
                    normalize=False, range=(0, 255))
                writer.add_image('Groundtruth label', grid_image, global_step)

        # One testing epoch
        if epoch % nValInterval == (nValInterval - 1):
            total_miou = 0.0
            net.eval()
            for ii, sample_batched in enumerate(valloader):
                inputs, labels = sample_batched['image'], sample_batched['label']
コード例 #26
0
def main(args):

    save_dir_root = os.path.join(os.path.dirname(os.path.abspath(__file__)))
    if args.resume_epoch != 0:
        runs = sorted(glob.glob(os.path.join(save_dir_root, 'run', 'run_*')))
        run_id = int(runs[-1].split('_')[-1]) if runs else 0
    else:
        runs = sorted(glob.glob(os.path.join(save_dir_root, 'run', 'run_*')))
        run_id = int(runs[-1].split('_')[-1]) + 1 if runs else 0

    if args.run_id >= 0:
        run_id = args.run_id

    save_dir = os.path.join(save_dir_root, 'run', 'run_' + str(run_id))
    log_dir = os.path.join(
        save_dir, 'models',
        datetime.now().strftime('%b%d_%H-%M-%S') + '_' + socket.gethostname())
    writer = SummaryWriter(log_dir=log_dir)

    if 'RAS' in args.model_name:
        net = rasnet()
    else:
        raise NotImplementedError

    if args.resume_epoch == 0:
        print('Training ' + args.model_name + ' from scratch...')
        net.apply(weights_init)
        net = load_vgg16conv_fromcaffe(net, args.load_path)
        net = interp_surgery(net)
    else:
        load_path = os.path.join(
            save_dir, 'models',
            args.model_name + '_epoch-' + str(args.resume_epoch - 1) + '.pth')
        if args.load_path != '': load_path = args.load_path
        print('Initializing weights from: {}...'.format(load_path))
        net.load_state_dict(
            torch.load(load_path, map_location=lambda storage, loc: storage))

    torch.cuda.set_device(device=0)
    net.cuda()

    optimizer = optim.SGD(get_parameters(net),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    criterion = utils.BCE_2d

    # use vgg16-caffe transformation
    composed_transforms_tr = transforms.Compose([
        # trforms.RandomHorizontalFlip(),
        # trforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        trforms.RGB2BGR(),
        trforms.MeanNormalize(mean=(104.00699, 116.66877, 122.67892)),
        trforms.ToTensor()
    ])

    composed_transforms_ts = transforms.Compose([
        # trforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        trforms.RGB2BGR(),
        trforms.MeanNormalize(mean=(104.00699, 116.66877, 122.67892)),
        trforms.ToTensor()
    ])

    train_data = msrab5k.MSRAB5K(split='train',
                                 transform=composed_transforms_tr)
    val_data = msrab.MSRAB(split='val',
                           transform=composed_transforms_ts,
                           return_size=True)

    trainloader = DataLoader(train_data,
                             batch_size=args.batch_size,
                             shuffle=True,
                             num_workers=3)
    testloader = DataLoader(val_data,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=1)

    num_batch_tr = len(trainloader)
    num_batch_ts = len(testloader)
    num_iter_tr = num_batch_tr / args.iter_size
    num_tr_samples = len(train_data)
    num_ts_samples = len(val_data)
    resume_nbatches = args.resume_epoch * num_batch_tr
    resume_nsamples = args.resume_epoch * num_tr_samples

    print('batch number of train set : %d' % (num_batch_tr))
    print('sample number of train set : %d' % (num_tr_samples))
    print('batch number of test set : %d' % (num_batch_ts))
    print('sample number of test set : %d' % (num_ts_samples))
    print('resume training from Batch %d' % (resume_nbatches))
    print('resume training from Sample %d' % (resume_nsamples))

    cur_batch = resume_nbatches
    cur_sample = resume_nsamples
    cur_iter = int(cur_batch / args.iter_size)
    cur_lr = args.lr

    aveGrad = 0
    loss_sum_per_epoch = 0
    loss_sum_recent = 0

    start_t = time.time()
    print('Training Network')

    for epoch in range(args.resume_epoch, args.max_nepochs):

        net.train()
        loss_sum_per_epoch = 0
        loss_sum_recent = 0

        for ii, sample_batched in enumerate(trainloader):

            inputs, labels = sample_batched['image'], sample_batched['label']
            inputs, labels = Variable(inputs,
                                      requires_grad=True), Variable(labels)
            inputs, labels = inputs.cuda(), labels.cuda()

            #print 'inputs.size: ',inputs.size(),inputs.min(),inputs.max()
            #print 'labels.size: ',labels.size(),labels.min(),labels.max()

            outputs = net.forward(inputs)
            #print 'outputs.size: ',outputs.size(),outputs.min(),outputs.max()
            nrep = outputs.size(0) / labels.size(0)
            assert (labels.size(0) * nrep == outputs.size(0))
            loss = criterion(outputs,
                             labels.repeat(nrep, 1, 1, 1),
                             size_average=False,
                             batch_average=True)

            cur_loss = loss.item()

            loss_sum_per_epoch += cur_loss
            loss_sum_recent += cur_loss

            # Backward the averaged gradient
            # loss /= args.naver_grad
            loss.backward()

            cur_sample += inputs.data.shape[0]
            cur_batch += 1
            aveGrad += 1

            # Update the weights once in p['nAveGrad'] forward passes
            if aveGrad % args.iter_size == 0:
                optimizer.step()
                optimizer.zero_grad()
                aveGrad = 0
                cur_iter += 1

                if cur_iter % args.log_every_iters == 0:
                    loss_mean_recent = loss_sum_recent / args.log_every_iters / args.iter_size
                    print(
                        'epoch: %d iter: %d trainloss: %.2f timecost:%.2f secs'
                        % (epoch, cur_iter, loss_mean_recent,
                           time.time() - start_t))
                    writer.add_scalar('data/trainloss', loss_mean_recent,
                                      cur_iter)
                    loss_sum_recent = 0

                # Show 10 * 3 images results each epoch
                if cur_iter % (num_iter_tr // 10) == 0:
                    grid_image = make_grid(inputs[:3].clone().cpu().data,
                                           3,
                                           normalize=True)
                    writer.add_image('Image', grid_image, cur_iter)
                    # grid_image = make_grid(utils.decode_seg_map_sequence(torch.max(outputs[:3], 1)[1].detach().cpu().numpy()), 3, normalize=False, range=(0, 255))

                    tmp = torch.nn.Sigmoid()(outputs[:1])
                    grid_image = make_grid(utils.decode_seg_map_sequence(
                        tmp.narrow(1, 0, 1).detach().cpu().numpy()),
                                           1,
                                           normalize=False,
                                           range=(0, 255))

                    writer.add_image('Predicted label', grid_image, cur_iter)
                    grid_image = make_grid(utils.decode_seg_map_sequence(
                        torch.squeeze(labels[:3], 1).detach().cpu().numpy()),
                                           3,
                                           normalize=False,
                                           range=(0, 255))
                    writer.add_image('Groundtruth label', grid_image, cur_iter)

        loss_mean_per_epoch = loss_sum_per_epoch / num_batch_tr
        print('epoch: %d meanloss: %.2f' % (epoch, loss_mean_per_epoch))
        writer.add_scalar('data/epochloss', loss_mean_per_epoch, cur_iter)

        # The following is to do validation
        if args.use_test == 1:

            net.eval()

            prec_lists = []
            recall_lists = []
            sum_testloss = 0.0
            total_mae = 0.0
            cnt = 0

            rand_id = random.randint(100000, 199999)
            tmp_pred_dir = 'tmp_pred_' + str(rand_id)
            tmp_gt_dir = 'tmp_gt_' + str(rand_id)
            if os.path.isdir(tmp_pred_dir) == True:
                os.system('rm ' + tmp_pred_dir + '/*')
            else:
                os.makedirs(tmp_pred_dir)
            if os.path.isdir(tmp_gt_dir) == True:
                os.system('rm ' + tmp_gt_dir + '/*')
            else:
                os.makedirs(tmp_gt_dir)

            for ii, sample_batched in enumerate(testloader):
                inputs, labels = sample_batched['image'], sample_batched[
                    'label']
                sizes = sample_batched['size']

                # Forward pass of the mini-batch
                inputs, labels = Variable(inputs,
                                          requires_grad=True), Variable(labels)
                inputs, labels = inputs.cuda(), labels.cuda()

                with torch.no_grad():
                    outputs = net.forward(inputs)

                outputs = outputs[:1]
                loss = criterion(outputs,
                                 labels,
                                 size_average=False,
                                 batch_average=False)
                sum_testloss += loss.item()

                predictions = torch.nn.Sigmoid()(outputs)

                preds = predictions.data.cpu().numpy()
                gts = labels.data.cpu().numpy()

                for jj in range(preds.shape[0]):
                    pred = preds[jj]
                    pred = Image.fromarray(
                        np.squeeze(np.rint(pred * 255.0).astype(np.uint8)))
                    gt = gts[jj]
                    gt = Image.fromarray(
                        np.squeeze(np.rint(gt * 255.0).astype(np.uint8)))
                    imsize = sizes[jj]
                    imgh, imgw = imsize[0].item(), imsize[1].item()
                    pred = pred.resize((imgw, imgh))
                    gt = gt.resize((imgw, imgh))

                    save_name = str(cnt) + '.png'
                    pred.save(os.path.join(tmp_pred_dir, save_name))
                    gt.save(os.path.join(tmp_gt_dir, save_name))
                    cnt += 1
                    if cnt % 100 == 0:
                        print('Tested %d samples / %d' % (cnt, num_ts_samples))

            mean_testloss = sum_testloss / num_batch_ts
            print('Evaluating maxf...')
            os.system('nohup python eval_maxf.py -pred_path=' + tmp_pred_dir +
                      ' -gt_path=' + tmp_gt_dir + ' > tmp_' + str(rand_id) +
                      '.out')

            with open('tmp_' + str(rand_id) + '.out', 'r') as f:
                linelist = f.readlines()
            linelist = linelist[-4:]
            results = [x.split()[-1] for x in linelist]
            print results
            print type(results[0])
            maxf = float(results[0])
            prec = float(results[1])
            recall = float(results[2])
            mae = float(results[3])

            print('Validation:')
            print(
                'epoch: %d, numImages: %d testloss: %.2f mae: %.4f maxf: %.4f prec: %.4f recall: %.4f'
                % (epoch, cnt, mean_testloss, mae, maxf, prec, recall))
            writer.add_scalar('data/validloss', mean_testloss, cur_iter)
            writer.add_scalar('data/validmae', mae, cur_iter)
            writer.add_scalar('data/validfbeta', maxf, cur_iter)

            os.system('rm -rf ' + tmp_pred_dir)
            os.system('rm -rf ' + tmp_gt_dir)
            os.system('rm tmp_' + str(rand_id) + '.out')
        # The above finishes validation

        if epoch % args.save_every_epochs == args.save_every_epochs - 1:
            save_path = os.path.join(
                save_dir, 'models',
                args.model_name + '_epoch-' + str(epoch) + '.pth')
            torch.save(net.state_dict(), save_path)
            print("Save model at {}\n".format(save_path))

        if epoch % args.update_lr_every_epochs == args.update_lr_every_epochs - 1:
            cur_lr = cur_lr * args.gamma
            print('updated learning rate: ', cur_lr)
            optimizer = optim.SGD(get_parameters(net),
                                  lr=cur_lr,
                                  momentum=args.momentum,
                                  weight_decay=args.weight_decay)
            writer.add_scalar('data/learningrate', cur_lr, cur_iter)
            '''
コード例 #27
0
    def training(self, epoch):
        train_loss = 0.0
        self.model.train()
        self.model.sbox_net.eval()
        tbar = tqdm(self.train_loader)
        num_img_tr = len(self.train_loader)
        for i, sample in enumerate(tbar):
            image, gt = sample['crop_image'], sample['crop_gt']
            if self.args.cuda:
                image, gt = image.cuda(), gt.cuda()
            self.scheduler(self.optimizer, i, epoch, self.best_pred)
            self.optimizer.zero_grad()
            sbox_pred, click_pred, sum_pred = self.model(image, crop_gt=gt)
            sum_pred = F.interpolate(sum_pred,
                                     size=gt.size()[-2:],
                                     align_corners=True,
                                     mode='bilinear')
            sbox_pred = F.interpolate(sbox_pred,
                                      size=gt.size()[-2:],
                                      align_corners=True,
                                      mode='bilinear')
            loss1 = self.criterion(sum_pred, gt) \
                # + self.criterion(sbox_pred, gt)

            loss1.backward()
            self.optimizer.step()
            total_loss = loss1.item()
            train_loss += total_loss
            tbar.set_description('Train loss: %.3f' % (train_loss / (i + 1)))
            self.writer.add_scalar('train/total_steps', total_loss,
                                   i + num_img_tr * epoch)

            # Show 10 * 3 inference results each epoch
            if i % (num_img_tr // 10) == 0:
                global_step = i + num_img_tr * epoch
                grid_image = make_grid(decode_seg_map_sequence(
                    torch.max(sbox_pred[:3], 1)[1].detach().cpu().numpy(),
                    dataset=self.args.dataset),
                                       3,
                                       normalize=False,
                                       range=(0, 255))
                self.summary.visualize_image(self.writer, self.args.dataset,
                                             image, sample['crop_gt'],
                                             sum_pred, global_step)
                self.writer.add_image('sbox_pred', grid_image, global_step)

        self.writer.add_scalar('train/total_epochs', train_loss, epoch)
        print('[Epoch: %d, numImages: %5d]' %
              (epoch, i * self.args.batch_size + image.data.shape[0]))
        print('Loss: %.3f' % train_loss)

        if self.args.no_val:
            # save checkpoint every epoch
            is_best = False
            self.saver.save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': self.model.state_dict(),
                    'optimizer': self.optimizer.state_dict(),
                    'best_pred': self.best_pred,
                }, is_best)
コード例 #28
0
ファイル: train.py プロジェクト: lhaof/DSS-Pytorch
def main(args):
	# os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
	torch.manual_seed(1234)
	save_dir_root = os.path.join(os.path.dirname(os.path.abspath(__file__)))
	if args.resume_epoch != 0:
		runs = sorted(glob.glob(os.path.join(save_dir_root, 'run', 'run_*')))
		run_id = int(runs[-1].split('_')[-1]) if runs else 0
	else:
		runs = sorted(glob.glob(os.path.join(save_dir_root, 'run', 'run_*')))
		run_id = int(runs[-1].split('_')[-1]) + 1 if runs else 0

	if args.run_id >= 0:
		run_id = args.run_id

	save_dir = os.path.join(save_dir_root, 'run', 'run_' + str(run_id))
	log_dir = os.path.join(save_dir, 'models', datetime.now().strftime('%b%d_%H-%M-%S') + '_' + socket.gethostname())
	writer = SummaryWriter(log_dir=log_dir)

	net = DSSNet()
	# load VGG16 encoder or pretrained DSS
	if args.load_pretrain is not None:
		pretrain_weights = torch.load(args.load_pretrain)
		pretrain_keys = list(pretrain_weights.keys())
		net_keys = list(net.state_dict().keys())
		for key in pretrain_keys:
			_key = key 
			if _key in net_keys:
				net.state_dict()[_key].copy_(pretrain_weights[key])
			else:
				print('missing key: ',_key)
	print('created and initialized a DSS model.')
	net.cuda()

	lr_ = args.lr
	optimizer = optim.SGD(get_params(net, args.lr),momentum=args.momentum,weight_decay=args.weight_decay)

	# optimizer = optim.Adam(get_params(net, 1e-6))

	criterion = dssloss()

	composed_transforms_tr = transforms.Compose([
		# trforms.FixedResize(size=(args.input_size, args.input_size)),
		trforms.Normalize_caffevgg(mean=(104.00698793,116.66876762,122.67891434), std=(1.0,1.0,1.0)),
		trforms.ToTensor()])
	
	composed_transforms_ts = transforms.Compose([
		# trforms.FixedResize(size=(args.input_size, args.input_size)),
		trforms.Normalize_caffevgg(mean=(104.00698793,116.66876762,122.67891434), std=(1.0,1.0,1.0)),
		trforms.ToTensor()])

	train_data = msrab.MSRAB(max_num_samples=-1, split="train", transform=composed_transforms_tr)
	val_data = msrab.MSRAB(max_num_samples=-1, split="val", transform=composed_transforms_ts)

	trainloader = DataLoader(train_data, batch_size=args.batch_size, shuffle=False, num_workers=0)
	testloader = DataLoader(val_data, batch_size=1, shuffle=False, num_workers=0)

	num_iter_tr = len(trainloader)
	num_iter_ts = len(testloader)
	nitrs = args.resume_epoch * num_iter_tr
	nsamples = args.resume_epoch * len(train_data) 
	print('nitrs: %d num_iter_tr: %d'%(nitrs, num_iter_tr))
	print('nsamples: %d tot_num_samples: %d'%(nsamples, len(train_data)))

	aveGrad = 0
	global_step = 0
	epoch_losses = []
	recent_losses = []
	start_t = time.time()
	print('Training Network')

	best_f, cur_f = 0.0, 0.0
	lr_ = args.lr
	for epoch in range(args.resume_epoch,args.nepochs):

		### do validation
		if args.use_test == 1:
			cnt = 0
			sum_testloss = 0.0

			avg_mae = 0.0
			avg_prec, avg_recall = 0.0, 0.0

			if args.use_eval == 1:
				net.eval()
			for ii, sample_batched in enumerate(testloader):
				inputs, labels = sample_batched['image'], sample_batched['label']

				# Forward pass of the mini-batch
				inputs, labels = Variable(inputs, requires_grad=True), Variable(labels)
				inputs, labels = inputs.cuda(), labels.cuda()

				with torch.no_grad():
					outputs = net.forward(inputs)
					loss = criterion(outputs, labels)
				sum_testloss += loss.item()
				
				predictions = [torch.nn.Sigmoid()(outputs_i) for outputs_i in outputs]
				if len(predictions) >= 7: 
					predictions = (predictions[2]+predictions[3]+predictions[4]+predictions[6]) / 4.0
				else:
					predictions = predictions[0]
				predictions = (predictions-predictions.min()+1e-8) / (predictions.max()-predictions.min()+1e-8)

				avg_mae += eval_mae(predictions, labels).cpu().item()
				prec, recall = eval_pr(predictions, labels, 100)
				avg_prec, avg_recall = avg_prec + prec, avg_recall + recall

				cnt += predictions.size(0)
				
				if ii % num_iter_ts == num_iter_ts-1:
					mean_testloss = sum_testloss / num_iter_ts
					avg_mae = avg_mae / num_iter_ts
					avg_prec = avg_prec / num_iter_ts
					avg_recall = avg_recall / num_iter_ts
					f = (1+0.3) * avg_prec * avg_recall / (0.3 * avg_prec + avg_recall)
					f[f != f] = 0 # delete the nan
					maxf = f.max()

					print('Validation:')
					print('epoch: %d, numImages: %d testloss: %.2f mmae: %.4f maxf: %.4f' % (
						epoch, cnt, mean_testloss, avg_mae, maxf))
					writer.add_scalar('data/validloss', mean_testloss, nsamples)
					writer.add_scalar('data/validmae', avg_mae, nsamples)
					writer.add_scalar('data/validmaxf', maxf, nsamples)

					cur_f = maxf
					if cur_f > best_f:
						save_path = os.path.join(save_dir, 'models', args.model_name + '_best' + '.pth')
						torch.save(net.state_dict(), save_path)
						print("Save model at {}\n".format(save_path))
						best_f = cur_f


		### train one epoch
		net.train()
		epoch_losses = []
		for ii, sample_batched in enumerate(trainloader):
			
			inputs, labels = sample_batched['image'], sample_batched['label']
			inputs, labels = Variable(inputs, requires_grad=True), Variable(labels) 
			global_step += inputs.data.shape[0] 
			inputs, labels = inputs.cuda(), labels.cuda()

			outputs = net.forward(inputs)
			loss = criterion(outputs, labels)
			trainloss = loss.item()
			epoch_losses.append(trainloss)
			if len(recent_losses) < args.log_every:
				recent_losses.append(trainloss)
			else:
				recent_losses[nitrs % len(recent_losses)] = trainloss

			# Backward the averaged gradient
			loss /= args.naver_grad
			loss.backward()
			aveGrad += 1
			nitrs += 1
			nsamples += args.batch_size

			# Update the weights once in p['nAveGrad'] forward passes
			if aveGrad % args.naver_grad == 0:
				optimizer.step()
				optimizer.zero_grad()
				aveGrad = 0

			if nitrs % args.log_every == 0:
				meanloss = sum(recent_losses) / len(recent_losses)
				print('epoch: %d ii: %d trainloss: %.2f timecost:%.2f secs'%(
					epoch,ii,meanloss,time.time()-start_t))
				writer.add_scalar('data/trainloss',meanloss,nsamples)

			# Show 10 * 3 images results each epoch
			if (ii < 50 and ii % 10 == 0) or (ii % max(1, (num_iter_tr // 10)) == 0):
			# if ii % 10 == 0:
				tmp = inputs[:1].clone().cpu().data.numpy()
				tmp += np.array((104.00698793,116.66876762,122.67891434)).reshape(1, 3, 1, 1)
				tmp = np.ascontiguousarray(tmp[:, ::-1, :, :])
				tmp = torch.tensor(tmp).float()
				grid_image = make_grid(tmp, 3, normalize=True)
				writer.add_image('Image', grid_image, global_step)
				
				predictions = [nn.Sigmoid()(outputs_i)[:1] for outputs_i in outputs]
				final_prediction = (predictions[2]+predictions[3]+predictions[4]+predictions[6]) / 4.0
				predictions.append(final_prediction)
				predictions = torch.cat(predictions, dim=0)

				grid_image = make_grid(utils.decode_seg_map_sequence(predictions.narrow(1, 0, 1).detach().cpu().numpy()), 2, normalize=False, range=(0, 255))
				writer.add_image('Predicted label', grid_image, global_step)

				grid_image = make_grid(utils.decode_seg_map_sequence(torch.squeeze(labels[:1], 1).detach().cpu().numpy()), 3, normalize=False, range=(0, 255))
				writer.add_image('Groundtruth label', grid_image, global_step)


		meanloss = sum(epoch_losses) / len(epoch_losses)
		print('epoch: %d meanloss: %.2f'%(epoch,meanloss))
		writer.add_scalar('data/epochloss', meanloss, nsamples)


		### save model
		if epoch % args.save_every == args.save_every - 1:
			save_path = os.path.join(save_dir, 'models', args.model_name + '_epoch-' + str(epoch) + '.pth')
			torch.save(net.state_dict(), save_path)
			print("Save model at {}\n".format(save_path))


		### adjust lr
		if epoch % args.update_lr_every == args.update_lr_every - 1:
			lr_ = lr_ * 0.1
			print('current learning rate: ', lr_)
			optimizer = optim.SGD(get_params(net, lr_),momentum=args.momentum,weight_decay=args.weight_decay)
コード例 #29
0
            aveGrad += 1

            # Update the weights once in p['nAveGrad'] forward passes
            if aveGrad % p['nAveGrad'] == 0:
                writer.add_scalar('data/total_loss_iter', loss.item(),
                                  ii + num_img_tr * epoch)
                optimizer.step()
                optimizer.zero_grad()
                aveGrad = 0

            if ii % (num_img_tr / 20) == 0:
                grid_image = make_grid(inputs[:3].clone().cpu().data,
                                       3,
                                       normalize=True)
                writer.add_image('Image', grid_image, global_step)
                grid_image = make_grid(utils.decode_seg_map_sequence(
                    torch.max(outputs[:3], 1)[1].detach().cpu().numpy()),
                                       3,
                                       normalize=False,
                                       range=(0, 255))
                writer.add_image('Predicted label', grid_image, global_step)
                grid_image = make_grid(utils.decode_seg_map_sequence(
                    torch.squeeze(labels[:3], 1).detach().cpu().numpy()),
                                       3,
                                       normalize=False,
                                       range=(0, 255))
                writer.add_image('Groundtruth label', grid_image, global_step)

        # Save the model
        if (epoch % snapshot) == snapshot - 1:
            torch.save(
                net.state_dict(),
コード例 #30
0
ファイル: summaries.py プロジェクト: sunlibocs/DeepLabV3Plus
    def visualize_image(self, writer, dataset, image, target, output, global_step):
        grid_image = make_grid(image[:3].clone().cpu().data, 3, normalize=True)
        
        #
        grid_image1 = grid_image
        writer.add_image('Image', grid_image, global_step)


        grid_image = make_grid(decode_seg_map_sequence(torch.max(output[:3], 1)[1].detach().cpu().numpy(), dataset=dataset), 3, normalize=False, range=(0, 255))
        
        #
        grid_image2 = grid_image  
        writer.add_image('Predicted label', grid_image, global_step)
       
        grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(target[:3], 1).detach().cpu().numpy(), dataset=dataset), 3, normalize=False, range=(0, 255))
        
        #
        grid_image3 = grid_image  
        writer.add_image('Groundtruth label', grid_image, global_step)


        if 1:
            ############################################################################
            # #我们在这里保存一下图像到本地,因为服务器不可以可视化 TODO
            # fake_image_list = []
            # # coarse_map = self.FCN8(fixed_x)
            # # refined_map= self.guidance_module(fixed_x,coarse_map)

            # lbl_pred = output.data.max(1)[1].cpu().numpy()
            # #lbl_pred_refined = refined_map.data.max(1)[1].cpu().numpy()
            # lbl_pred = self.data_loader.dataset.colorize_mask_batch(lbl_pred)
            # #lbl_pred_refined = self.data_loader.dataset.colorize_mask_batch(lbl_pred_refined)
            # lbl_true = self.data_loader.dataset.colorize_mask_batch(fixed_target.numpy())
            # # print(lbl_pred.size()) 
            # # print(lbl_pred_refined.size()) 
            # # print(lbl_true.size()) 
            # fake_image_list.append(lbl_pred)
            # #fake_image_list.append(lbl_pred_refined)
            # fake_image_list.append(lbl_true)
            # # fake_image_list.append(lbl_pred_refined.unsqueeze(1).expand(fixed_x.size()).float())
            # # fake_image_list.append(lbl_true)
            # fake_images = torch.cat(fake_image_list, dim=3)

            # save_image(grid_image1,
            #     os.path.join('/fast/users/a1746546/code/pytorch-deeplab-xception/run/pascal/deeplab-resnet/imagesRs', 
            #     '{}_SrcImg.png'.format(global_step)),nrow=1, padding=0)
            
            # save_image(grid_image2,
            #     os.path.join('/fast/users/a1746546/code/pytorch-deeplab-xception/run/pascal/deeplab-resnet/imagesRs', 
            #     '{}_SrcPred.png'.format(global_step)),nrow=1, padding=0)
            

            # save_image(grid_image3,
            #     os.path.join('/fast/users/a1746546/code/pytorch-deeplab-xception/run/pascal/deeplab-resnet/imagesRs', 
            #     '{}_SrcImgGt.png'.format(global_step)),nrow=1, padding=0)
            
            # print('Translated images and saved into {}..!'.format(self.sample_path))

            # del coarse_map, refined_map, lbl_pred, lbl_pred_refined, fake_image_list 
            
            #########################################################################    
            
コード例 #31
0
        # Update the weights once in p['nAveGrad'] forward passes
        if aveGrad % p['nAveGrad'] == 0:
            writer.add_scalar('data/total_loss_iter', loss.item(), global_step)
            optimizer.step()
            optimizer.zero_grad()
            aveGrad = 0

        # Show 10 * 3 images results each epoch
        if num_img_tr < 10:
            plot_per_iter =  num_img_tr
        else:
            plot_per_iter = 10
        if ii % (num_img_tr // plot_per_iter) == 0:
            img_tensor = torch.squeeze((inputs[:3].clone().cpu().data), 0)

            output_tensor = torch.squeeze(utils.decode_seg_map_sequence(torch.max(outputs[:3], 1)[1].detach().cpu().numpy()).type(torch.FloatTensor), 0)

            label_tensor = torch.squeeze(utils.decode_seg_map_sequence(torch.squeeze(labels[:3], 1).detach().cpu().numpy()).type(torch.FloatTensor), 0)
            images = []
            for img, output, label in zip(img_tensor, output_tensor, label_tensor):
                images.append(img)
                images.append(output)
                images.append(label)

            grid_image = make_grid(images ,3, normalize=True, scale_each=True )
            writer.add_image('Train', grid_image, global_step)


    # Save the model
    # TODO : bring the model to cpu before saving
    if (epoch % snapshot) == snapshot - 1: