コード例 #1
0
ファイル: benchmark_p2net.py プロジェクト: iamreallyi9/g_d
def mark_pru():
    net_e = load_encoder(after_f=True)
    net_e = nn.DataParallel(net_e).cuda()
    net_d = load_decoder(after_f=True)
    net_d = nn.DataParallel(net_d).cuda()

    data_loader = nyu_set.use_nyu_data(batch_s=1,
                                       max_len=400,
                                       isBenchmark=True)
    writer1 = SummaryWriter('/data/consistent_depth/gj_dir/benchmark_p2')

    with torch.no_grad():
        num = 0
        su = 0
        for data, label in data_loader:
            num += 1
            data = autograd.Variable(data.double().cuda(), requires_grad=False)

            prediction_d = net_d(net_e(data))

            abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3 = compute_depth_errors(
                label, prediction_d)

            writer1.add_images('pre', prediction_d, global_step=num)

            writer1.add_scalar('rmse', rmse, global_step=num)
            writer1.add_scalar("abs_rel", abs_rel, global_step=num)
            writer1.add_scalar('sq_rel', sq_rel, global_step=num)
            writer1.add_scalar('rmse_log', rmse_log, global_step=num)
            writer1.add_scalar('a1', a1, global_step=num)
            writer1.add_scalar('a2', a2, global_step=num)
            writer1.add_scalar('a3', a3, global_step=num)

            writer1.add_images('label', label, global_step=num)
            su += a3.item()
            print(su / num)
            # scaled_disp, _ = disp_to_depth(disp, 0.1, 10)
            # Saving colormapped depth image
            # vmax = np.percentile(disp_resized_np, 95)
        writer1.close()
        print('-> Done!')
コード例 #2
0
def benchmark_pruned():
    net = load_pru_mod(after_finetune=True).double()
    net = nn.DataParallel(net)
    net = net.cuda()

    data_loader = nyu_set.use_nyu_data(batch_s=1,
                                       max_len=100,
                                       isBenchmark=True)
    writer1 = SummaryWriter('./gj_dir/benchmark_pru_mod')

    criterion = nn.MSELoss(reduction='mean').cuda()
    net.eval()

    num = 0
    for data, label in data_loader:
        num += 1

        images = Variable(images).double().cuda()
        label = Variable(label).double().cuda()

        # Reshape ...CHW -> XCHW
        shape = images.shape
        prediction_d = net.forward(images)[0]  # 0is depth .1 is confidence

        out_shape = shape[:-3] + prediction_d.shape[-2:]
        prediction_d = prediction_d.reshape(out_shape)
        prediction_d = torch.exp(prediction_d)
        depth = prediction_d.squeeze(-3)
        depth = depth.detach().cpu().numpy().squeeze()

        inv_depth = (1.0 / depth)

        error = criterion(inv_depth, label).item()
        error = torch.sqrt(error / 2)
        writer1.add_scalar('loss', error, global_step=num)
        writer1.add_images('pre', prediction_d, global_step=num)
        writer1.add_images('label', label, global_step=num)
        writer1.add_images('process', inv_depth, global_step=num)
        print("ok")
コード例 #3
0
ファイル: benchmark.py プロジェクト: iamreallyi9/g_d
def benchmark_pruned():
    net = load_t_net(file=True)
    #net = load_pru_mod(after_finetune=True).double()
    net = nn.DataParallel(net)
    net = net.cuda()

    data_loader = nyu_set.use_nyu_data(batch_s=4,
                                       max_len=100,
                                       isBenchmark=True)
    writer1 = SummaryWriter('./gj_dir/benchmark_t_mod')

    Joint = JointLoss(opt=None).double().cuda()
    criterion = nn.MSELoss(reduction='mean').cuda()
    net.eval()

    num = 0
    for data, label in data_loader:
        num += 1
        target = label2target(label)

        images = autograd.Variable(images.double().cuda(), requires_grad=False)

        prediction_d = net.forward(images)[0]  # 0is depth .1 is confidence

        e_rmse = Joint.compute_rmse_error(prediction_d, target)
        e_rel = Joint.compute_l1_rel_error(prediction_d, target)
        loss = criterion(prediction_d, target["depth_gt"])
        writer1.add_images('pre', prediction_d, global_step=num)

        writer1.add_scalar('rmse', e_rmse, global_step=num)
        writer1.add_scalar("rel", e_rel, global_step=num)
        writer1.add_scalar('loss', loss, global_step=num)

        writer1.add_images('label', label, global_step=num)

        print("ok")
コード例 #4
0
def train_pru_mod(epoch=100, batch=4, lr=0.001):

    #net = load_t_net().double()
    net = load_pru_mod(after_finetune=True).double()
    net = nn.DataParallel(net)
    net = net.cuda()

    train_Data = nyu_set.use_nyu_data(batch_s=batch,
                                      max_len=160,
                                      isBenchmark=False)
    writer1 = SummaryWriter('./gj_dir/train_pru_mod')

    criterion = nn.MSELoss(reduction='mean').cuda()
    Joint = JointLoss(opt=None).double().cuda()
    s_loss = ts_loss.SSIM().cuda()
    optimizer = optim.Adam(net.parameters(), lr=lr)

    net.train()
    import time
    for epoch in range(epoch):
        time_start = time.time()
        batch_size = batch

        for i, data in enumerate(train_Data):
            images, depths = data
            # images = autograd.Variable(inputs.cuda(), requires_grad=False)
            images = Variable(images).double().cuda()
            depths = Variable(depths).double().cuda()

            # labels = labels.to(device).double()

            optimizer.zero_grad()
            # debug_img = transforms.ToPILImage()(images[0,:,:,:].float().cpu())
            # debug_img.save("debug.jpg")

            output_net = net(images)[0].double()

            # loss1 = 1 - s_loss.forward(output_s_features, T_mid_feature[0])
            # loss2 = criterion(output_s_depth,output_t)
            loss1 = criterion(output_net, depths)
            loss2 = Joint.LaplacianSmoothnessLoss(output_net, images)
            loss3 = Joint.compute_image_aware_2nd_smoothness_cost(
                output_net, images)
            #loss4 = Joint.compute_image_aware_1st_smoothness_cost(output_net,images)
            loss4 = 1 - s_loss.forward(output_net, depths)
            loss = loss1 * 10 + loss2 + loss3 + loss4

            loss.backward()
            optimizer.step()

            print('[%d, %5d] loss: %.4f  A:%.4f  B:%.4f C:%.4f D:%.4f' %
                  (epoch + 1, (i + 1) * batch_size, loss.item(), loss1.item(),
                   loss2.item(), loss3.item(), loss4.item()))

            writer1.add_scalar('loss',
                               loss.item(),
                               global_step=(epoch + 1) * batch_size + i)
            writer1.add_scalar('loss2',
                               loss2.item(),
                               global_step=(epoch + 1) * batch_size + i)
        #debug_img = transforms.ToPILImage()(output_net)
        writer1.add_images('pre', output_net, global_step=epoch)
        shape = images.shape

        dep = torch.exp(output_net)

        dep = dep.detach().cpu().numpy()
        inv_dep = 1.0 / dep * 255

        writer1.add_images('pro-dep', inv_dep, global_step=epoch)

        writer1.add_images('labels', depths, global_step=epoch)

        torch.save(net.module, "./gj_dir/after_nyu.pth.tar")
        time_end = time.time()
        print('Time cost:', time_end - time_start, "s")

    print('Finished Training')
コード例 #5
0
ファイル: benchmark_p2net.py プロジェクト: iamreallyi9/g_d
def inference(args):
    if torch.cuda.is_available() and not args.no_cuda:
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    encoder, decoder, thisH, thisW = prepare_model_for_test(args, device)

    data_loader = nyu_set.use_nyu_data(batch_s=1,
                                       max_len=400,
                                       isBenchmark=True)
    writer1 = SummaryWriter('/data/consistent_depth/gj_dir/benchmark_pp')

    with torch.no_grad():
        num = 0
        su = 0
        for data, label in data_loader:
            num += 1
            label = label.cpu()
            input_image = transforms.ToPILImage()(data[0])
            original_width, original_height = input_image.size
            input_image = input_image.resize((thisH, thisW), pil.LANCZOS)
            input_image = transforms.ToTensor()(input_image).unsqueeze(0)

            input_image = input_image.to(device)
            outputs = decoder(encoder(input_image))

            disp = outputs[("disp", 0)]
            disp_resized = torch.nn.functional.interpolate(
                disp, (original_height, original_width),
                mode="bilinear",
                align_corners=False)
            #print(torch.max(disp_resized),torch.min(disp_resized))
            #disp_resized, _ = disp_to_depth(disp_resized, 0.1, 10)
            #print(torch.max(disp_resized),torch.min(disp_resized))
            #disp_resized = torch.div(1.0,disp_resized)
            disp_resized = torch.div(disp_resized, torch.max(disp_resized))
            disp_resized_np = disp_resized.squeeze().cpu().numpy()
            abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3 = compute_depth_errors(
                label[0], disp_resized.cpu())

            for ind, ten in enumerate(disp_resized):
                #disp_resized[ind] = torch.div(disp_resized[ind], torch.max(disp_resized[ind]))
                pass
                # prediction_d = torch.mul(255,prediction_d)

            writer1.add_images('pre', disp_resized, global_step=num)

            writer1.add_scalar('rmse', rmse, global_step=num)
            writer1.add_scalar("abs_rel", abs_rel, global_step=num)
            writer1.add_scalar('sq_rel', sq_rel, global_step=num)
            writer1.add_scalar('rmse_log', rmse_log, global_step=num)
            writer1.add_scalar('a1', a1, global_step=num)
            writer1.add_scalar('a2', a2, global_step=num)
            writer1.add_scalar('a3', a3, global_step=num)

            writer1.add_images('label', label, global_step=num)
            su += a3.item()
            print(su / num)
        #scaled_disp, _ = disp_to_depth(disp, 0.1, 10)
        # Saving colormapped depth image
        #vmax = np.percentile(disp_resized_np, 95)
    writer1.close()
    print('-> Done!')
コード例 #6
0
ファイル: prune_plus.py プロジェクト: iamreallyi9/g_d
def train_pru_mod(epoch=100, batch=4, lr=0.001):

    #net = load_t_net().double()
    net = load_pru_mod(after_finetune=True).double()
    net = nn.DataParallel(net)
    net = net.cuda()

    train_Data = nyu_set.use_nyu_data(batch_s=batch,
                                      max_len=1008,
                                      isBenchmark=False)
    writer1 = SummaryWriter('./gj_dir/p_n_o')
    #criterion = nn.SmoothL1Loss(reduction='mean').cuda()
    #criterion = nn.MSELoss(reduction='mean').cuda()
    Joint = ts_loss.JointLoss(opt=None).double().cuda()
    s_loss = ts_loss.SSIM().cuda()
    optimizer = optim.Adam(net.parameters(), lr=lr)

    net.train()
    import time
    for epoch in range(epoch):
        time_start = time.time()
        batch_size = batch

        for i, data in enumerate(train_Data):
            images, depths = data
            # images = autograd.Variable(inputs.cuda(), requires_grad=False)
            images = Variable(images).double().cuda()
            target = label2target(depths)

            #depths = Variable(depths).double().cuda()

            # labels = labels.to(device).double()

            optimizer.zero_grad()
            # debug_img = transforms.ToPILImage()(images[0,:,:,:].float().cpu())
            # debug_img.save("debug.jpg")

            output_net = net(images)[0].double()
            output_net = torch.div(1.0, torch.exp(output_net))

            # loss1 = 1 - s_loss.forward(output_s_features, T_mid_feature[0])

            loss, loss1, loss2, loss3 = Joint(images, torch.log(output_net),
                                              target)

            #loss4 =1- s_loss.forward(output_net,depths)

            loss.backward()
            optimizer.step()

            print('[%d, %5d] loss: %.4f  A:%.4f  B:%.4f C:%.4f D:%.4f' %
                  (epoch + 1, (i + 1) * batch_size, loss.item(), loss1, loss2,
                   loss3, torch.min(output_net).item()))

        writer1.add_scalar('loss', loss.item(), global_step=(epoch + 1))
        writer1.add_scalar('loss1', loss1, global_step=(epoch + 1))
        writer1.add_scalar('loss2', loss2, global_step=(epoch + 1))
        writer1.add_scalar('loss3', loss3, global_step=(epoch + 1))
        #debug_img = transforms.ToPILImage()(output_net)
        writer1.add_images('pre', output_net, global_step=epoch)

        writer1.add_images('labels', depths, global_step=epoch)

        torch.save(net.module, "./gj_dir/p_n_o_i.pth.tar")
        time_end = time.time()
        print('Time cost:', time_end - time_start, "s")

    print('Finished Training')