コード例 #1
0
def test_images(epoch):
    def write_image(img, filename):
        out_img = img.cpu().detach().numpy()
        out_img *= 255.0
        out_img = out_img.clip(0, 255)
        out_img = np.uint8(out_img)
        writer.add_image(filename, out_img, epoch)

    with torch.no_grad():
        num_minibatch = len(testing_full_data_loader)
        pg = ProgressBar(num_minibatch,
                         'Test %d Images' % num_minibatch,
                         length=50)
        model.eval()
        if criterion.has_discriminator:
            criterion.discr_eval()
        for i, batch in enumerate(testing_full_data_loader):
            pg.print_progress_bar(i)
            input = batch[0].to(device)
            B, _, Cin, H, W = input.shape
            Hhigh = H * opt.upscale_factor
            Whigh = W * opt.upscale_factor
            Cout = output_channels

            channel_mask = [0, 1, 2]  #RGB

            previous_output = None
            for j in range(dataset_data.num_frames):
                # prepare input
                if j == 0 or opt.disableTemporal:
                    previous_warped = initialImage(input[:, 0, :, :, :], Cout,
                                                   opt.initialImage, False,
                                                   opt.upscale_factor)
                else:
                    previous_warped = models.VideoTools.warp_upscale(
                        previous_output,
                        flow[:, j - 1, :, :, :],
                        opt.upscale_factor,
                        special_mask=True)
                previous_warped_flattened = models.VideoTools.flatten_high(
                    previous_warped, opt.upscale_factor)
                single_input = torch.cat(
                    (input[:, j, :, :, :], previous_warped_flattened), dim=1)
                # run generator and cost
                prediction, residual = model(single_input)
                # write prediction image
                write_image(prediction[0, channel_mask],
                            'image%03d/frame%03d_prediction' % (i, j))
                # write residual image
                if residual is not None:
                    write_image(residual[0, channel_mask],
                                'image%03d/frame%03d_residual' % (i, j))
                # save output for next frame
                previous_output = prediction
        pg.print_progress_bar(num_minibatch)

    print("Test images sent to Tensorboard for visualization")
コード例 #2
0
                file_flow = os.path.join(folder, "flow_%05d.npy" % i)
                # load them
                sample_low = torch.from_numpy(np.load(file_low)).to(device)
                sample_high = torch.from_numpy(np.load(file_high)).to(device)
                sample_flow = torch.from_numpy(np.load(file_flow)).to(device)
                # iterate over all models
                for model_index in range(len(MODELS)):
                    statistics[model_index].reset()
                    # iterate over time
                    NF, C, H, W = sample_low.shape
                    for j in range(NF):
                        # SUPER-RES
                        # prepare input
                        if j == 0:
                            previous_warped = initialImage(
                                sample_low[0:1, :, :, :],
                                model_list[model_index].prev_input_channels,
                                'zero', False, UPSCALING)
                        else:
                            previous_warped = models.VideoTools.warp_upscale(
                                previous_output,
                                sample_flow[j - 1:j, :, :, :],
                                UPSCALING,
                                special_mask=True)
                        previous_warped_flattened = models.VideoTools.flatten_high(
                            previous_warped, UPSCALING)
                        # run model
                        pred_color, previous_output = model_list[model_index](
                            sample_low[j:j + 1, :, :, :],
                            previous_warped_flattened)

                        ##DEBUG: save the images
コード例 #3
0
 file_flow = os.path.join(folder, "flow_%05d.npy" % i)
 # load them
 sample_low = torch.from_numpy(np.load(file_low)).to(device)
 sample_high = torch.from_numpy(np.load(file_high)).to(device)
 sample_flow = torch.from_numpy(np.load(file_flow)).to(device)
 # iterate over all models
 for model_index in range(len(MODELS)):
     statistics[model_index].reset()
     # iterate over time
     NF, C, H, W = sample_low.shape
     for j in range(NF):
         # SUPER-RES
         # prepare input
         if j == 0:
             previous_warped = initialImage(
                 sample_low[0:1, :, :, :], 6, 'zero', False,
                 UPSCALING)
         else:
             previous_warped = models.VideoTools.warp_upscale(
                 previous_output,
                 sample_flow[j - 1:j, :, :, :],
                 UPSCALING,
                 special_mask=True)
             #previous_warped = previous_output
         previous_warped_flattened = models.VideoTools.flatten_high(
             previous_warped, UPSCALING)
         single_input = torch.cat((sample_low[j:j + 1, :, :, :],
                                   previous_warped_flattened),
                                  dim=1)
         # run model
         prediction, _ = model_list[model_index](single_input)
コード例 #4
0
def trainAdv_v2(epoch):
    """
    Second version of adverserial training, 
    for each batch, train both discriminator and generator.
    Not full epoch for each seperately
    """
    print("===> Epoch %d Training" % epoch)
    discr_scheduler.step()
    writer.add_scalar('train/lr_discr', discr_scheduler.get_lr()[0], epoch)
    gen_scheduler.step()
    writer.add_scalar('train/lr_gen', gen_scheduler.get_lr()[0], epoch)

    disc_steps = opt.advDiscrInitialSteps if opt.advDiscrInitialSteps is not None and epoch == 1 else opt.advDiscrMaxSteps
    gen_steps = opt.advGenMaxSteps

    num_minibatch = len(training_data_loader)
    model.train()
    criterion.discr_train()

    total_discr_loss = 0
    total_gen_loss = 0
    total_gt_score = 0
    total_pred_score = 0

    pg = ProgressBar(num_minibatch, 'Train', length=50)
    for iteration, batch in enumerate(training_data_loader):
        pg.print_progress_bar(iteration)
        input, flow, target = batch[0].to(device), batch[1].to(
            device), batch[2].to(device)
        B, _, Cout, Hhigh, Whigh = target.shape
        _, _, Cin, H, W = input.shape

        # DISCRIMINATOR
        for _ in range(disc_steps):
            discr_optimizer.zero_grad()
            gen_optimizer.zero_grad()
            loss = 0
            #iterate over all timesteps
            for j in range(dataset_data.num_frames):
                # prepare input for the generator
                if j == 0 or opt.disableTemporal:
                    previous_warped = initialImage(input[:, 0, :, :, :], Cout,
                                                   opt.initialImage, False,
                                                   opt.upscale_factor)
                    # loss takes the ground truth current image as warped previous image,
                    # to not introduce a bias and big loss for the first image
                    previous_warped_loss = target[:, 0, :, :, :]
                    previous_input = F.interpolate(input[:, 0, :, :, :],
                                                   size=(Hhigh, Whigh),
                                                   mode=opt.upsample)
                else:
                    previous_warped = models.VideoTools.warp_upscale(
                        previous_output,
                        flow[:, j - 1, :, :, :],
                        opt.upscale_factor,
                        special_mask=True)
                    previous_warped_loss = previous_warped
                    previous_input = F.interpolate(input[:, j - 1, :, :, :],
                                                   size=(Hhigh, Whigh),
                                                   mode=opt.upsample)
                    previous_input = models.VideoTools.warp_upscale(
                        previous_input,
                        flow[:, j - 1, :, :, :],
                        opt.upscale_factor,
                        special_mask=True)
                previous_warped_flattened = models.VideoTools.flatten_high(
                    previous_warped, opt.upscale_factor)
                single_input = torch.cat(
                    (input[:, j, :, :, :], previous_warped_flattened), dim=1)
                #evaluate generator
                with torch.no_grad():
                    prediction, _ = model(single_input)
                #prepare input for the discriminator
                gt_prev_warped = models.VideoTools.warp_upscale(
                    target[:, j - 1, :, :, :],
                    flow[:, j - 1, :, :, :],
                    opt.upscale_factor,
                    special_mask=True)
                #evaluate discriminator
                input_high = F.interpolate(input[:, j, :, :, :],
                                           size=(Hhigh, Whigh),
                                           mode=opt.upsample)
                disc_loss, gt_score, pred_score = criterion.train_discriminator(
                    input_high, target[:, j, :, :, :], previous_input,
                    gt_prev_warped, prediction, previous_warped_loss)
                loss += disc_loss
                total_gt_score += float(gt_score)
                total_pred_score += float(pred_score)
                # save output
                previous_output = torch.cat(
                    [
                        torch.clamp(prediction[:, 0:1, :, :], -1, +1),  # mask
                        ScreenSpaceShading.normalize(prediction[:, 1:4, :, :],
                                                     dim=1),
                        torch.clamp(prediction[:, 4:5, :, :], 0, +1),  # depth
                        torch.clamp(prediction[:, 5:6, :, :], 0, +1)  # ao
                    ],
                    dim=1)
            loss.backward()
            discr_optimizer.step()
        total_discr_loss += loss.item()

        # GENERATOR
        for _ in range(disc_steps):
            discr_optimizer.zero_grad()
            gen_optimizer.zero_grad()
            loss = 0
            #iterate over all timesteps
            for j in range(dataset_data.num_frames):
                # prepare input for the generator
                if j == 0 or opt.disableTemporal:
                    previous_warped = initialImage(input[:, 0, :, :, :], Cout,
                                                   opt.initialImage, False,
                                                   opt.upscale_factor)
                    # loss takes the ground truth current image as warped previous image,
                    # to not introduce a bias and big loss for the first image
                    previous_warped_loss = target[:, 0, :, :, :]
                    previous_input = F.interpolate(input[:, 0, :, :, :],
                                                   size=(Hhigh, Whigh),
                                                   mode=opt.upsample)
                else:
                    previous_warped = models.VideoTools.warp_upscale(
                        previous_output,
                        flow[:, j - 1, :, :, :],
                        opt.upscale_factor,
                        special_mask=True)
                    previous_warped_loss = previous_warped
                    previous_input = F.interpolate(input[:, j - 1, :, :, :],
                                                   size=(Hhigh, Whigh),
                                                   mode=opt.upsample)
                    previous_input = models.VideoTools.warp_upscale(
                        previous_input,
                        flow[:, j - 1, :, :, :],
                        opt.upscale_factor,
                        special_mask=True)
                previous_warped_flattened = models.VideoTools.flatten_high(
                    previous_warped, opt.upscale_factor)
                single_input = torch.cat(
                    (input[:, j, :, :, :], previous_warped_flattened), dim=1)
                #evaluate generator
                prediction, _ = model(single_input)
                #evaluate loss
                input_high = F.interpolate(input[:, j, :, :, :],
                                           size=(Hhigh, Whigh),
                                           mode=opt.upsample)
                loss0, map = criterion(target[:, j, :, :, :], prediction,
                                       input_high, previous_input,
                                       previous_warped_loss)
                loss += loss0
                # save output
                previous_output = torch.cat(
                    [
                        torch.clamp(prediction[:, 0:1, :, :], -1, +1),  # mask
                        ScreenSpaceShading.normalize(prediction[:, 1:4, :, :],
                                                     dim=1),
                        torch.clamp(prediction[:, 4:5, :, :], 0, +1),  # depth
                        torch.clamp(prediction[:, 5:6, :, :], 0, +1)  # ao
                    ],
                    dim=1)
            loss.backward()
            gen_optimizer.step()
        total_gen_loss += loss.item()
    pg.print_progress_bar(num_minibatch)

    total_discr_loss /= num_minibatch * dataset_data.num_frames
    total_gen_loss /= num_minibatch * dataset_data.num_frames
    total_gt_score /= num_minibatch * dataset_data.num_frames
    total_pred_score /= num_minibatch * dataset_data.num_frames

    writer.add_scalar('train/discr_loss', total_discr_loss, epoch)
    writer.add_scalar('train/gen_loss', total_gen_loss, epoch)
    writer.add_scalar('train/gt_score', total_gt_score, epoch)
    writer.add_scalar('train/pred_score', total_pred_score, epoch)
    print("===> Epoch {} Complete".format(epoch))
コード例 #5
0
def test(epoch):
    avg_psnr = 0
    avg_losses = defaultdict(float)
    with torch.no_grad():
        num_minibatch = len(testing_data_loader)
        pg = ProgressBar(num_minibatch, 'Testing', length=50)
        model.eval()
        if criterion.has_discriminator:
            criterion.discr_eval()
        for iteration, batch in enumerate(testing_data_loader, 0):
            pg.print_progress_bar(iteration)
            input, target = batch[0].to(device), batch[1].to(device)
            B, _, Cout, Hhigh, Whigh = target.shape
            _, _, Cin, H, W = input.shape

            previous_output = None
            for j in range(dataset_data.num_frames):
                # prepare input
                if j == 0 or opt.disableTemporal:
                    previous_warped = initialImage(input[:, 0, :, :, :], Cout,
                                                   opt.initialImage, False,
                                                   opt.upscale_factor)
                    # loss takes the ground truth current image as warped previous image,
                    # to not introduce a bias and big loss for the first image
                    previous_warped_loss = target[:, 0, :, :, :]
                    previous_input = F.interpolate(input[:, 0, :, :, :],
                                                   size=(Hhigh, Whigh),
                                                   mode=opt.upsample)
                else:
                    previous_warped = models.VideoTools.warp_upscale(
                        previous_output,
                        flow[:, j - 1, :, :, :],
                        opt.upscale_factor,
                        special_mask=True)
                    previous_warped_loss = previous_warped
                    previous_input = F.interpolate(input[:, j - 1, :, :, :],
                                                   size=(Hhigh, Whigh),
                                                   mode=opt.upsample)
                    previous_input = models.VideoTools.warp_upscale(
                        previous_input,
                        flow[:, j - 1, :, :, :],
                        opt.upscale_factor,
                        special_mask=True)
                previous_warped_flattened = models.VideoTools.flatten_high(
                    previous_warped, opt.upscale_factor)
                single_input = torch.cat(
                    (input[:, j, :, :, :], previous_warped_flattened), dim=1)
                # run generator
                prediction, _ = model(single_input)
                # evaluate cost
                input_high = F.interpolate(input[:, j, :, :, :],
                                           size=(Hhigh, Whigh),
                                           mode=opt.upsample)
                loss0, loss_values = criterion(target[:, j, :, :, :],
                                               prediction, input_high,
                                               previous_warped_loss)
                avg_losses['total_loss'] += loss0.item()
                psnr = 10 * log10(1 / max(1e-10, loss_values['mse']))
                avg_losses['psnr'] += psnr
                for key, value in loss_values.items():
                    avg_losses[str(key)] += value

                # save output for next frame
                previous_output = prediction
        pg.print_progress_bar(num_minibatch)
    for key in avg_losses.keys():
        avg_losses[key] /= num_minibatch * dataset_data.num_frames
    print("===> Avg. PSNR: {:.4f} dB".format(avg_losses['psnr']))
    print("  losses:", avg_losses)
    for key, value in avg_losses.items():
        writer.add_scalar('test/%s' % key, value, epoch)
コード例 #6
0
    def test_images(epoch):
        def write_image(img, filename):
            out_img = img.cpu().detach().numpy()
            out_img *= 255.0
            out_img = out_img.clip(0, 255)
            out_img = np.uint8(out_img)
            writer.add_image(filename, out_img, epoch)

        with torch.no_grad():
            num_minibatch = len(testing_full_data_loader)
            pg = ProgressBar(num_minibatch,
                             'Test %d Images' % num_minibatch,
                             length=50)
            model.eval()
            if criterion.has_discriminator:
                criterion.discr_eval()
            for i, batch in enumerate(testing_full_data_loader):
                pg.print_progress_bar(i)
                input, flow, target = batch[0].to(device), batch[1].to(
                    device), batch[2].to(device)
                B, _, Cin, H, W = input.shape
                Hhigh = H * upscale_factor
                Whigh = W * upscale_factor
                Cout = output_channels

                channel_mask = [1, 2, 3]  #normal

                previous_output = None
                for j in range(dataset_data.num_frames):
                    # prepare input
                    if j == 0 or opt.disableTemporal:
                        previous_warped = initialImage(input[:, 0, :, :, :],
                                                       Cout, opt.initialImage,
                                                       False, upscale_factor)
                    else:
                        previous_warped = models.VideoTools.warp_upscale(
                            previous_output,
                            flow[:, j - 1, :, :, :],
                            upscale_factor,
                            special_mask=True)
                    # TODO: enable temporal component again
                    #previous_warped_flattened = models.VideoTools.flatten_high(previous_warped, opt.upscale_factor)
                    #single_input = torch.cat((
                    #        input[:,j,:,:,:],
                    #        previous_warped_flattened),
                    #    dim=1)
                    single_input = input[:, j, :, :, :]
                    # run generator
                    heatMap = model(single_input)
                    heatMap = postprocess(heatMap)
                    prediction = importance.adaptiveSmoothing(
                        target[:, j, :, :, :].contiguous(),
                        1 / heatMap.unsqueeze(1),
                        opt.distanceToStandardDeviation)
                    # write heatmap
                    write_image(heatMap[0].unsqueeze(0),
                                'image%03d/frame%03d_heatmap' % (i, j))
                    ## write warped previous frame
                    #write_image(previous_warped[0, channel_mask], 'image%03d/frame%03d_warped' % (i, j))
                    # write predicted normals
                    prediction[:, 1:4, :, :] = ScreenSpaceShading.normalize(
                        prediction[:, 1:4, :, :], dim=1)
                    write_image(prediction[0, channel_mask],
                                'image%03d/frame%03d_prediction' % (i, j))
                    # write shaded image if network runs in deferredShading mode
                    shaded_image = shading(prediction)
                    write_image(shaded_image[0],
                                'image%03d/frame%03d_shaded' % (i, j))
                    # write mask
                    write_image(prediction[0, 0:1, :, :] * 0.5 + 0.5,
                                'image%03d/frame%03d_mask' % (i, j))
                    # write ambient occlusion
                    write_image(prediction[0, 5:6, :, :],
                                'image%03d/frame%03d_ao' % (i, j))
                    # save output for next frame
                    previous_output = prediction
            pg.print_progress_bar(num_minibatch)

        print("Test images sent to Tensorboard for visualization")
コード例 #7
0
def trainNormal(epoch):
    epoch_loss = 0
    num_minibatch = len(training_data_loader)
    pg = ProgressBar(num_minibatch, 'Training', length=50)
    model.train()
    for iteration, batch in enumerate(training_data_loader, 0):
        pg.print_progress_bar(iteration)
        input, target = batch[0].to(device), batch[1].to(device)
        B, _, Cout, Hhigh, Whigh = target.shape
        _, _, Cin, H, W = input.shape
        assert (Cout == output_channels)
        assert (Cin == input_channels)
        assert (H == dataset_data.crop_size)
        assert (W == dataset_data.crop_size)
        assert (Hhigh == dataset_data.crop_size * opt.upscale_factor)
        assert (Whigh == dataset_data.crop_size * opt.upscale_factor)

        optimizer.zero_grad()

        previous_output = None
        loss = 0
        for j in range(dataset_data.num_frames):
            # prepare input
            if j == 0 or opt.disableTemporal:
                previous_warped = initialImage(input[:, 0, :, :, :], Cout,
                                               opt.initialImage, False,
                                               opt.upscale_factor)
                # loss takes the ground truth current image as warped previous image,
                # to not introduce a bias and big loss for the first image
                previous_warped_loss = target[:, 0, :, :, :]
                previous_input = F.interpolate(input[:, 0, :, :, :],
                                               size=(Hhigh, Whigh),
                                               mode=opt.upsample)
            else:
                previous_warped = models.VideoTools.warp_upscale(
                    previous_output,
                    flow[:, j - 1, :, :, :],
                    opt.upscale_factor,
                    special_mask=True)
                previous_warped_loss = previous_warped
                previous_input = F.interpolate(input[:, j - 1, :, :, :],
                                               size=(Hhigh, Whigh),
                                               mode=opt.upsample)
                previous_input = models.VideoTools.warp_upscale(
                    previous_input,
                    flow[:, j - 1, :, :, :],
                    opt.upscale_factor,
                    special_mask=True)
            previous_warped_flattened = models.VideoTools.flatten_high(
                previous_warped, opt.upscale_factor)
            single_input = torch.cat(
                (input[:, j, :, :, :], previous_warped_flattened), dim=1)
            # run generator
            prediction, _ = model(single_input)
            # evaluate cost
            input_high = F.interpolate(input[:, j, :, :, :],
                                       size=(Hhigh, Whigh),
                                       mode=opt.upsample)
            loss0, _ = criterion(target[:, j, :, :, :], prediction, input_high,
                                 previous_warped_loss)
            del _
            loss += loss0
            epoch_loss += loss0.item()
            # save output
            previous_output = prediction

        loss.backward()
        optimizer.step()
    pg.print_progress_bar(num_minibatch)
    epoch_loss /= num_minibatch * dataset_data.num_frames
    print("===> Epoch {} Complete: Avg. Loss: {:.4f}".format(
        epoch, epoch_loss))
    writer.add_scalar('train/total_loss', epoch_loss, epoch)
    writer.add_scalar('train/lr', scheduler.get_lr()[0], epoch)
    scheduler.step()
コード例 #8
0
    def trainNormal(epoch):
        epoch_loss = 0
        num_minibatch = len(training_data_loader)
        pg = ProgressBar(num_minibatch, 'Training', length=50)
        model.train()
        for iteration, batch in enumerate(training_data_loader, 0):
            pg.print_progress_bar(iteration)
            input, flow, target = batch[0].to(device), batch[1].to(
                device), batch[2].to(device)
            B, _, Cout, Hhigh, Whigh = target.shape
            _, _, Cin, H, W = input.shape
            assert (Cout == output_channels)
            assert (Cin == input_channels)
            assert (H == dataset_data.crop_size)
            assert (W == dataset_data.crop_size)
            assert (Hhigh == dataset_data.crop_size * upscale_factor)
            assert (Whigh == dataset_data.crop_size * upscale_factor)

            optimizer.zero_grad()

            previous_output = None
            loss = 0
            for j in range(1):  #range(dataset_data.num_frames):
                # prepare input
                if j == 0 or opt.disableTemporal:
                    previous_warped = initialImage(input[:, 0, :, :, :], Cout,
                                                   opt.initialImage, False,
                                                   upscale_factor)
                    # loss takes the ground truth current image as warped previous image,
                    # to not introduce a bias and big loss for the first image
                    previous_warped_loss = target[:, 0, :, :, :]
                    previous_input = F.interpolate(input[:, 0, :, :, :],
                                                   size=(Hhigh, Whigh),
                                                   mode='bilinear')
                else:
                    previous_warped = models.VideoTools.warp_upscale(
                        previous_output,
                        flow[:, j - 1, :, :, :],
                        upscale_factor,
                        special_mask=True)
                    previous_warped_loss = previous_warped
                    previous_input = F.interpolate(input[:, j - 1, :, :, :],
                                                   size=(Hhigh, Whigh),
                                                   mode='bilinear')
                    previous_input = models.VideoTools.warp_upscale(
                        previous_input,
                        flow[:, j - 1, :, :, :],
                        upscale_factor,
                        special_mask=True)
                # TODO: enable temporal component again
                #previous_warped_flattened = models.VideoTools.flatten_high(previous_warped, opt.upscale_factor)
                #single_input = torch.cat((
                #        input[:,j,:,:,:],
                #        previous_warped_flattened),
                #    dim=1)
                single_input = input[:, j, :, :, :]
                # run generator
                heatMap = model(single_input)
                heatMapCrop = heatMap[:, opt.
                                      lossBorderPadding:-opt.lossBorderPadding,
                                      opt.
                                      lossBorderPadding:-opt.lossBorderPadding]
                heatMap = postprocess(heatMap)
                prediction = importance.adaptiveSmoothing(
                    target[:, j, :, :, :].contiguous(),
                    1 / heatMap.unsqueeze(1), opt.distanceToStandardDeviation)
                # evaluate cost
                input_high = F.interpolate(input[:, j, :, :, :],
                                           size=(Hhigh, Whigh),
                                           mode='bilinear')
                loss0, _ = criterion(target[:, j, :, :, :], prediction,
                                     input_high, previous_input,
                                     previous_warped_loss)
                del _
                loss0 += opt.lossHeatmapMean * (
                    (0.5 - torch.mean(heatMapCrop))**2)
                #print("Mean:",torch.mean(heatMapCrop).item())
                loss += loss0
                epoch_loss += loss0.item()
                # save output
                previous_output = prediction

            #loss.retain_grad()
            loss.backward()
            optimizer.step()
        pg.print_progress_bar(num_minibatch)
        epoch_loss /= num_minibatch * dataset_data.num_frames
        print("===> Epoch {} Complete: Avg. Loss: {:.4f}".format(
            epoch, epoch_loss))
        writer.add_scalar('train/total_loss', epoch_loss, epoch)
        writer.add_scalar('train/lr', scheduler.get_lr()[0], epoch)
        scheduler.step()
コード例 #9
0
    def test(epoch):
        avg_psnr = 0
        avg_losses = defaultdict(float)
        heatmap_min = 1e10
        heatmap_max = -1e10
        heatmap_avg = heatmap_count = 0
        with torch.no_grad():
            num_minibatch = len(testing_data_loader)
            pg = ProgressBar(num_minibatch, 'Testing', length=50)
            model.eval()
            if criterion.has_discriminator:
                criterion.discr_eval()
            for iteration, batch in enumerate(testing_data_loader, 0):
                pg.print_progress_bar(iteration)
                input, flow, target = batch[0].to(device), batch[1].to(
                    device), batch[2].to(device)
                B, _, Cout, Hhigh, Whigh = target.shape
                _, _, Cin, H, W = input.shape

                previous_output = None
                for j in range(dataset_data.num_frames):
                    # prepare input
                    if j == 0 or opt.disableTemporal:
                        previous_warped = initialImage(input[:, 0, :, :, :],
                                                       Cout, opt.initialImage,
                                                       False, upscale_factor)
                        # loss takes the ground truth current image as warped previous image,
                        # to not introduce a bias and big loss for the first image
                        previous_warped_loss = target[:, 0, :, :, :]
                        previous_input = F.interpolate(input[:, 0, :, :, :],
                                                       size=(Hhigh, Whigh),
                                                       mode='bilinear')
                    else:
                        previous_warped = models.VideoTools.warp_upscale(
                            previous_output,
                            flow[:, j - 1, :, :, :],
                            upscale_factor,
                            special_mask=True)
                        previous_warped_loss = previous_warped
                        previous_input = F.interpolate(input[:,
                                                             j - 1, :, :, :],
                                                       size=(Hhigh, Whigh),
                                                       mode='bilinear')
                        previous_input = models.VideoTools.warp_upscale(
                            previous_input,
                            flow[:, j - 1, :, :, :],
                            upscale_factor,
                            special_mask=True)
                    # TODO: enable temporal component again
                    #previous_warped_flattened = models.VideoTools.flatten_high(previous_warped, opt.upscale_factor)
                    #single_input = torch.cat((
                    #        input[:,j,:,:,:],
                    #        previous_warped_flattened),
                    #    dim=1)
                    single_input = input[:, j, :, :, :]
                    # run generator
                    heatMap = model(single_input)
                    heatMapCrop = heatMap[:, opt.lossBorderPadding:-opt.
                                          lossBorderPadding,
                                          opt.lossBorderPadding:-opt.
                                          lossBorderPadding]
                    heatmap_min = min(heatmap_min,
                                      torch.min(heatMapCrop).item())
                    heatmap_max = max(heatmap_max,
                                      torch.max(heatMapCrop).item())
                    heatmap_avg += torch.mean(heatMapCrop).item()
                    heatmap_count += 1
                    heatMap = postprocess(heatMap)
                    prediction = importance.adaptiveSmoothing(
                        target[:, j, :, :, :].contiguous(),
                        1 / heatMap.unsqueeze(1),
                        opt.distanceToStandardDeviation)
                    # evaluate cost
                    input_high = F.interpolate(input[:, j, :, :, :],
                                               size=(Hhigh, Whigh),
                                               mode='bilinear')
                    loss0, loss_values = criterion(target[:, j, :, :, :],
                                                   prediction, input_high,
                                                   previous_input,
                                                   previous_warped_loss)
                    avg_losses['total_loss'] += loss0.item()
                    psnr = 10 * log10(
                        1 / max(1e-10, loss_values[('mse', 'color')]))
                    avg_losses['psnr'] += psnr
                    for key, value in loss_values.items():
                        avg_losses[str(key)] += value

                    # save output for next frame
                    previous_output = torch.cat(
                        [
                            torch.clamp(prediction[:, 0:1, :, :], -1,
                                        +1),  # mask
                            ScreenSpaceShading.normalize(
                                prediction[:, 1:4, :, :], dim=1),
                            torch.clamp(prediction[:, 4:5, :, :], 0,
                                        +1),  # depth
                            torch.clamp(prediction[:, 5:6, :, :], 0, +1)  # ao
                        ],
                        dim=1)
            pg.print_progress_bar(num_minibatch)
        for key in avg_losses.keys():
            avg_losses[key] /= num_minibatch * dataset_data.num_frames
        print("===> Avg. PSNR: {:.4f} dB".format(avg_losses['psnr']))
        print("  losses:", avg_losses)
        for key, value in avg_losses.items():
            writer.add_scalar('test/%s' % key, value, epoch)
        print("  heatmap: min=%f, max=%f, avg=%f" %
              (heatmap_min, heatmap_max, heatmap_avg / heatmap_count))
        writer.add_scalar('test/heatmap_min', heatmap_min, epoch)
        writer.add_scalar('test/heatmap_max', heatmap_max, epoch)
        writer.add_scalar('test/heatmap_avg', heatmap_avg / heatmap_count,
                          epoch)
コード例 #10
0
def test_images(epoch):
    def write_image(img, filename):
        out_img = img.cpu().detach().numpy()
        out_img *= 255.0
        out_img = out_img.clip(0, 255)
        out_img = np.uint8(out_img)
        writer.add_image(filename, out_img, epoch)

    with torch.no_grad():
        num_minibatch = len(testing_full_data_loader)
        pg = ProgressBar(num_minibatch, 'Test %d Images'%num_minibatch, length=50)
        model.eval()
        if criterion.has_discriminator:
            criterion.discr_eval()
        for i,batch in enumerate(testing_full_data_loader):
            pg.print_progress_bar(i)
            input, flow = batch[0].to(device), batch[1].to(device)
            B, _, Cin, H, W = input.shape
            Hhigh = H * opt.upscale_factor
            Whigh = W * opt.upscale_factor
            Cout = output_channels

            channel_mask = [1, 2, 3] #normal

            previous_output = None
            for j in range(dataset_data.num_frames):
                # prepare input
                if j == 0 or opt.disableTemporal:
                    previous_warped = initialImage(input[:,0,:,:,:], Cout, 
                                               opt.initialImage, False, opt.upscale_factor)
                else:
                    previous_warped = models.VideoTools.warp_upscale(
                        previous_output, 
                        flow[:, j-1, :, :, :], 
                        opt.upscale_factor,
                        special_mask = True)
                previous_warped_flattened = models.VideoTools.flatten_high(previous_warped, opt.upscale_factor)
                single_input = torch.cat((
                        input[:,j,:,:,:],
                        previous_warped_flattened),
                    dim=1)
                # write warped previous frame
                write_image(previous_warped[0, channel_mask], 'image%03d/frame%03d_warped' % (i, j))
                # run generator and cost
                prediction, residual = model(single_input)
                # normalize normal
                prediction[:,1:4,:,:] = ScreenSpaceShading.normalize(prediction[:,1:4,:,:], dim=1)
                # write prediction image
                write_image(prediction[0, channel_mask], 'image%03d/frame%03d_prediction' % (i, j))
                # write residual image
                if residual is not None:
                    write_image(residual[0, channel_mask], 'image%03d/frame%03d_residual' % (i, j))
                # write shaded image if network runs in deferredShading mode
                shaded_image = shading(prediction)
                write_image(shaded_image[0], 'image%03d/frame%03d_shaded' % (i, j))
                # write mask
                write_image(prediction[0, 0:1, :, :]*0.5+0.5, 'image%03d/frame%03d_mask' % (i, j))
                # write ambient occlusion
                # write mask
                write_image(prediction[0, 5:6, :, :], 'image%03d/frame%03d_ao' % (i, j))
                # save output for next frame
                previous_output = torch.cat([
                    torch.clamp(prediction[:,0:1,:,:], -1, +1), # mask
                    prediction[:,1:4,:,:], #already normalized
                    torch.clamp(prediction[:,4:5,:,:], 0, +1), # depth
                    torch.clamp(prediction[:,5:6,:,:], 0, +1) # ao
                    ], dim=1)
        pg.print_progress_bar(num_minibatch)

    print("Test images sent to Tensorboard for visualization")
コード例 #11
0
    def inference(self, current_low, prev_high):
        """
        Performs the superresolution.
        current_low: low-resolution input from the renderer, 10 channels (RGB, mask, normal, depth, flow), GPU. Format: (B,C,H,W)
        prev_high: RGB-image of the previous inference result
        """
        with torch.no_grad():
            current_low_cpu = current_low.cpu().numpy()[0]
            # compute flow
            flow_inpaint = np.stack(
                (cv.inpaint(current_low_cpu[8, :, :],
                            np.uint8(current_low_cpu[3, :, :] == 0), 3,
                            cv.INPAINT_NS),
                 cv.inpaint(current_low_cpu[9, :, :],
                            np.uint8(current_low_cpu[3, :, :] == 0), 3,
                            cv.INPAINT_NS)),
                axis=0).astype(np.float32)
            flow = torch.unsqueeze(torch.from_numpy(flow_inpaint),
                                   dim=0).to(self.device)
            #input
            if self.unshaded:
                input = torch.cat((current_low[:, 3:4, :, :] * 2 - 1,
                                   current_low[:, 4:8, :, :]),
                                  dim=1)
                if prev_high is None:
                    previous_warped = initialImage(
                        input, 6, self.initial_image_mode, self.inverse_ao,
                        self.upscale_factor).to(self.device)
                else:
                    previous_warped = VideoTools.warp_upscale(
                        prev_high.to(self.device),
                        flow,
                        self.upscale_factor,
                        special_mask=True)
            else:
                if self.has_normal and self.has_depth:
                    input = torch.clamp(current_low[:, 0:8, :, :], 0, 1)
                elif self.has_normal:  #no depth
                    input = current_low[:, 0:7, :, :]
                elif self.has_depth:  #no normal
                    input = torch.cat(
                        (current_low[:, 0:4, :, :], current_low[:, 7:8, :, :]),
                        dim=1)
                else:  #only color+mask
                    input = current_low[:, 0:4, :, :]
                if prev_high is None:
                    #prev_high = np.zeros(
                    #    (3, input.shape[2]*self.upscale_factor, input.shape[3]*self.upscale_factor),
                    #    dtype=current_low.dtype)
                    prev_high = initialImage(input, 3, self.initial_image_mode,
                                             self.upscale_factor)
                previous_warped = VideoTools.warp_upscale(prev_high.to(
                    self.device),
                                                          flow,
                                                          self.upscale_factor,
                                                          special_mask=False)
            previous_warped_flattened = VideoTools.flatten_high(
                previous_warped, self.upscale_factor)
            # run the network
            single_input = torch.cat((input, previous_warped_flattened), dim=1)
            prediction, _ = self.model(single_input)

        return prediction
コード例 #12
0
    def test(epoch, save_images):
        def write_image(img, filename):
            out_img = img.cpu().detach().numpy()
            out_img *= 255.0
            out_img = out_img.clip(0, 255)
            out_img = np.uint8(out_img)
            writer.add_image(filename, out_img, epoch)

        avg_psnr = 0
        avg_losses = defaultdict(float)
        with torch.no_grad():
            num_minibatch = len(test_set) // opt.testBatchSize
            pg = ProgressBar(num_minibatch, 'Testing ', length=50)
            model.eval()
            for iteration, batch in enumerate(testing_data_loader, 0):
                pg.print_progress_bar(iteration)
                input, target = batch[0].to(device), batch[1].to(device)
                B, T, Cout, H, W = target.shape
                _, _, Cin, _, _ = input.shape
                #assert(Cout == output_channels)
                #assert(Cin == input_channels)

                input_flow = input[:,:,6:8,:,:]
                input, input_mask = preprocessInput(input)

                previous_output = None
                for j in range(T):
                    # prepare input
                    flow = input_flow[:,j-1,:,:,:]
                    if j == 0 or opt.disableTemporal:
                        previous_input = utils.initialImage(input[:,0,0:output_channels,:,:], output_channels, 
                                                       opt.initialImage, False, 1)
                        # loss takes the ground truth current image as warped previous image,
                        # to not introduce a bias and big loss for the first image
                        previous_warped_loss = target[:,0,:,:,:]
                    else:
                        previous_input = models.VideoTools.warp_upscale(
                            previous_output, 
                            flow, 
                            1,
                            special_mask = opt.warpSpecialMask)
                        previous_warped_loss = previous_input
                    single_input = torch.cat((
                            input[:,j,:,:,:],
                            previous_input),
                        dim=1)
                    if opt.externalFlow:
                        # remove flow from the input and output
                        single_input = torch.cat((
                            single_input[:,:6,:,:],
                            single_input[:,8:,:,:]),
                            dim=1)
                    # run generator
                    prediction, masks = model(single_input, input_mask[:,j,:,:,:])
                    # evaluate cost
                    loss0, loss_values = criterion(
                        target[:,j,:output_channels,:,:], 
                        prediction, 
                        previous_warped_loss,
                        no_temporal_loss = (j==0))
                    # accumulate average values
                    avg_losses['total_loss'] += loss0.item()
                    psnr = 10 * log10(1 / max(1e-10, loss_values[('mse','color')]))
                    avg_losses['psnr'] += psnr
                    for key, value in loss_values.items():
                        avg_losses[str(key)] += value
                    # save output
                    if opt.externalFlow:
                        previous_output = torch.cat([
                            torch.clamp(prediction[:,0:1,:,:], -1, +1), # mask
                            utils.ScreenSpaceShading.normalize(prediction[:,1:4,:,:], dim=1),
                            torch.clamp(prediction[:,4:5,:,:], 0, +1), # depth
                            torch.clamp(prediction[:,5:6,:,:], 0, +1) # ao
                            ], dim=1)
                    else:
                        previous_output = torch.cat([
                            torch.clamp(prediction[:,0:1,:,:], -1, +1), # mask
                            utils.ScreenSpaceShading.normalize(prediction[:,1:4,:,:], dim=1),
                            torch.clamp(prediction[:,4:5,:,:], 0, +1), # depth
                            torch.clamp(prediction[:,5:6,:,:], 0, +1), # ao
                            torch.clamp(prediction[:,6:8,:,:], -1, +1) # flow
                            ], dim=1)

                    # save images
                    imagesToSave = opt.numVisImages - iteration*opt.testBatchSize
                    if imagesToSave>0 and save_images:
                        # for each image in the batch
                        for b in range(min(B, imagesToSave)):
                            imgID = b + iteration * B
                            # mask
                            if j==0:
                                for layer,mask in enumerate(masks):
                                    write_image(mask[b,:,:,:], 'image%03d/debug/mask%d'%(imgID, layer))
                            if opt.disableTemporal:
                                # images, two in a row: current prediction, ground truth
                                maskPredGT = torch.cat([previous_output[b,0:1,:,:], target[b,j,0:1,:,:]], dim=2)*0.5+0.5
                                write_image(maskPredGT, 'image%03d/mask/frame%03d' % (imgID, j))
                                normalPredGT = torch.cat([previous_output[b,1:4,:,:], target[b,j,1:4,:,:]], dim=2)*0.5+0.5
                                write_image(normalPredGT, 'image%03d/normal/frame%03d' % (imgID, j))
                                depthPredGT = torch.cat([previous_output[b,4:5,:,:], target[b,j,4:5,:,:]], dim=2)
                                write_image(depthPredGT, 'image%03d/depth/frame%03d' % (imgID, j))
                                aoPredGT = torch.cat([previous_output[b,5:6,:,:], target[b,j,5:6,:,:]], dim=2)
                                write_image(aoPredGT, 'image%03d/ao/frame%03d' % (imgID, j))
                            else:
                                # images, three in a row: previous-warped, current prediction, ground truth
                                maskPredGT = torch.cat([previous_input[b,0:1,:,:], previous_output[b,0:1,:,:], target[b,j,0:1,:,:]], dim=2)*0.5+0.5
                                write_image(maskPredGT, 'image%03d/mask/frame%03d' % (imgID, j))
                                normalPredGT = torch.cat([previous_input[b,1:4,:,:], previous_output[b,1:4,:,:], target[b,j,1:4,:,:]], dim=2)*0.5+0.5
                                write_image(normalPredGT, 'image%03d/normal/frame%03d' % (imgID, j))
                                depthPredGT = torch.cat([previous_input[b,4:5,:,:], previous_output[b,4:5,:,:], target[b,j,4:5,:,:]], dim=2)
                                write_image(depthPredGT, 'image%03d/depth/frame%03d' % (imgID, j))
                                aoPredGT = torch.cat([previous_input[b,5:6,:,:], previous_output[b,5:6,:,:], target[b,j,5:6,:,:]], dim=2)
                                write_image(aoPredGT, 'image%03d/ao/frame%03d' % (imgID, j))
                            # flow
                            if opt.externalFlow:
                                flowPredGT = torch.cat([
                                    torch.cat([flow[b,:,:,:], torch.zeros_like(target[b,j,6:7,:,:])], dim=0), 
                                    torch.cat([target[b,j,6:8,:,:], torch.zeros_like(target[b,j,6:7,:,:]) ], dim=0)], dim=2)*20+0.5
                                write_image(flowPredGT, 'image%03d/flow/frame%03d' % (imgID, j))
                            else:
                                flowPredGT = torch.cat([
                                    torch.cat([previous_output[b,6:8,:,:], torch.zeros_like(previous_output[b,6:7,:,:])], dim=0), 
                                    torch.cat([target[b,j,6:8,:,:], torch.zeros_like(target[b,j,6:7,:,:]) ], dim=0)], dim=2)*20+0.5
                                write_image(flowPredGT, 'image%03d/flow/frame%03d' % (imgID, j))


            pg.print_progress_bar(num_minibatch)
        for key in avg_losses.keys():
            avg_losses[key] /= num_minibatch * T
        print("===> Avg. PSNR: {:.4f} dB".format(avg_losses['psnr']))
        print("  losses:",avg_losses)
        for key, value in avg_losses.items():
            writer.add_scalar('test/%s'%key, value, epoch)
        writer.flush()
コード例 #13
0
    def train(epoch):
        epoch_loss = 0
        num_minibatch = len(train_set) // opt.trainBatchSize
        pg = ProgressBar(num_minibatch, 'Training', length=50)
        model.train()
        for iteration, batch in enumerate(training_data_loader, 0):
            pg.print_progress_bar(iteration)
            input, target = batch[0].to(device), batch[1].to(device)
            B, T, Cout, H, W = target.shape
            _, _, Cin, _, _ = input.shape
            #assert(Cout == output_channels)
            #assert(Cin == input_channels)

            input_flow = input[:,:,6:8,:,:]
            input, input_mask = preprocessInput(input)
            optimizer.zero_grad()

            previous_output = None
            loss = 0
            for j in range(T):
                # prepare input
                flow = input_flow[:,j-1,:,:,:]
                if j == 0 or opt.disableTemporal:
                    previous_input = utils.initialImage(input[:,0,0:output_channels,:,:], output_channels, 
                                                   opt.initialImage, False, 1)
                    # loss takes the ground truth current image as warped previous image,
                    # to not introduce a bias and big loss for the first image
                    previous_warped_loss = target[:,0,:,:,:]
                else:
                    previous_input = models.VideoTools.warp_upscale(
                        previous_output, 
                        flow, 
                        1,
                        special_mask = opt.warpSpecialMask)
                    previous_warped_loss = previous_input
                single_input = torch.cat((
                        input[:,j,:,:,:],
                        previous_input),
                    dim=1)
                if opt.externalFlow:
                    # remove flow from the input and output
                    single_input = torch.cat((
                        single_input[:,:6,:,:],
                        single_input[:,8:,:,:]),
                        dim=1)
                # run generator
                prediction, _ = model(single_input, input_mask[:,j,:,:,:])
                # evaluate cost
                loss0,_ = criterion(
                    target[:,j,:output_channels,:,:], 
                    prediction, 
                    previous_warped_loss,
                    no_temporal_loss = (j==0))
                del _
                loss += loss0
                epoch_loss += loss0.item()
                # save output
                if opt.externalFlow:
                    previous_output = torch.cat([
                        torch.clamp(prediction[:,0:1,:,:], -1, +1), # mask
                        utils.ScreenSpaceShading.normalize(prediction[:,1:4,:,:], dim=1),
                        torch.clamp(prediction[:,4:5,:,:], 0, +1), # depth
                        torch.clamp(prediction[:,5:6,:,:], 0, +1) # ao
                        ], dim=1)
                else:
                    previous_output = torch.cat([
                        torch.clamp(prediction[:,0:1,:,:], -1, +1), # mask
                        utils.ScreenSpaceShading.normalize(prediction[:,1:4,:,:], dim=1),
                        torch.clamp(prediction[:,4:5,:,:], 0, +1), # depth
                        torch.clamp(prediction[:,5:6,:,:], 0, +1), # ao
                        torch.clamp(prediction[:,6:8,:,:], -1, +1) # flow
                        ], dim=1)

            loss.backward()
            optimizer.step()
        pg.print_progress_bar(num_minibatch)
        epoch_loss /= num_minibatch * T
        print("===> Epoch {} Complete: Avg. Loss: {:.4f}".format(epoch, epoch_loss))
        writer.add_scalar('train/total_loss', epoch_loss, epoch)
        writer.add_scalar('train/lr', scheduler.get_lr()[0], epoch)
        scheduler.step()
コード例 #14
0
 pg = ProgressBar(num_minibatch,
                  'Test %d Images' % num_minibatch,
                  length=50)
 for i, batch in enumerate(data_loader):
     pg.print_progress_bar(i)
     input, flow, high = batch[0].to(device), batch[1].to(
         device), batch[2].to(device)
     B, _, Cin, H, W = input.shape
     Hhigh = H * 4
     Whigh = W * 4
     previous_output = None
     # loop over frames
     for j in range(dataset_data.num_frames):
         # prepare input
         if j == 0:
             previous_warped = initialImage(input[:, 0, :, :, :], 6,
                                            'zero', False, 4)
         else:
             previous_warped = models.VideoTools.warp_upscale(
                 previous_output,
                 flow[:, j - 1, :, :, :],
                 4,
                 special_mask=True)
         previous_warped_flattened = models.VideoTools.flatten_high(
             previous_warped, 4)
         single_input = torch.cat(
             (input[:, j, :, :, :], previous_warped_flattened), dim=1)
         # run model
         prediction, _ = modelList[model](single_input)
         # shade output
         prediction[:, 1:4, :, :] = ScreenSpaceShading.normalize(
             prediction[:, 1:4, :, :], dim=1)