コード例 #1
0
def train(args, loader, models, criterion, optimizers, log, epoch, recorder):
    models[1].train()
    models[0].eval()
    optimizer, optimizer_c = optimizers
    log.printWrite('---- Start Training Epoch %d: %d batches ----' %
                   (epoch, len(loader)))
    timer = time_utils.Timer(args.time_sync)
    for i, sample in enumerate(loader):
        data = model_utils.parseData(args, sample, timer, 'train')
        input = model_utils.getInput(args, data)
        with torch.no_grad():
            pred_c = models[0](input)
        input.append(pred_c)
        s2_est_obMp = True
        if s2_est_obMp:
            start_loc, end_loc = 32, 96
            random_loc = torch.randint(start_loc, end_loc, [2, 1])
            input.append(random_loc)
            data['ob_map_real'] = model_utils.parseData_stage2(
                args, sample, random_loc, 'train')

        pred = models[1](input)
        timer.updateTime('Forward')
        input.pop()
        optimizer.zero_grad()

        loss = criterion.forward(pred, data, random_loc, s2_est_obMp)
        timer.updateTime('Crit')
        criterion.backward()
        timer.updateTime('Backward')

        recorder.updateIter('train', loss.keys(), loss.values())

        optimizer.step()
        timer.updateTime('Solver')

        iters = i + 1
        if iters % args.train_disp == 0:
            opt = {
                'split': 'train',
                'epoch': epoch,
                'iters': iters,
                'batch': len(loader),
                'timer': timer,
                'recorder': recorder
            }
            log.printItersSummary(opt)

        if iters % args.train_save == 0:
            results, recorder, nrow = prepareSave(args, data, pred_c, pred,
                                                  random_loc, recorder, log)
            log.saveImgResults(results, 'train', epoch, iters, nrow=nrow)
            log.plotCurves(recorder,
                           'train',
                           epoch=epoch,
                           intv=args.train_disp)

        if args.max_train_iter > 0 and iters >= args.max_train_iter: break
    opt = {'split': 'train', 'epoch': epoch, 'recorder': recorder}
    log.printEpochSummary(opt)
コード例 #2
0
def testOnBm(args, split, loader, model, log, epoch, recorder):
    model.eval()
    log.printWrite('---- Start %s Epoch %d: %d batches ----' % (split, epoch, len(loader)))
    timer = time_utils.Timer(args.time_sync);

    disp_intv, save_intv, stop_iters = get_itervals(args, split)
    res = []
    with torch.no_grad():
        for i, sample in enumerate(loader):
            #print("\nIn for:", sample['mask'].shape)
            #print("sample :", sample.keys())
            input = model_utils.parseData(args, sample, timer, split)
            #input is a list
            pred = model(input); timer.updateTime('Forward')

            #recoder, iter_res, error = prepareRes(args, input, pred, recorder, log, split)
            #print("data['img'].shape:", data['img'].shape)
            #res.append(iter_res)
            iters = i + 1
            # if iters % disp_intv == 0:
            #     opt = {'split':split, 'epoch':epoch, 'iters':iters, 'batch':len(loader), 
            #             'timer':timer, 'recorder': recorder}
            #     log.printItersSummary(opt)

            if iters % save_intv == 0:
                results, nrow = prepareSave(args, input, pred, recorder, log)
                log.saveImgResults(results, split, epoch, iters, nrow=nrow, error='')
                #log.saveMatResults(pred['normal'], data['normal'], pred_c['dirs'], data['dirs'], split, epoch, iters, nrow=nrow, error='')
                #log.plotCurves(recorder, split, epoch=epoch, intv=disp_intv)

            if stop_iters > 0 and iters >= stop_iters: break
コード例 #3
0
ファイル: test_stage1.py プロジェクト: zombicore/SDPS-Net
def test(args, split, loader, model, log, epoch, recorder):
    model.eval()
    log.printWrite('---- Start %s Epoch %d: %d batches ----' % (split, epoch, len(loader)))
    timer = time_utils.Timer(args.time_sync);

    disp_intv, save_intv, stop_iters = get_itervals(args, split)
    res = []
    with torch.no_grad():
        for i, sample in enumerate(loader):
            data = model_utils.parseData(args, sample, timer, split)
            input = model_utils.getInput(args, data)

            pred = model(input); timer.updateTime('Forward')

            recoder, iter_res, error = prepareRes(args, data, pred, recorder, log, split)

            res.append(iter_res)
            iters = i + 1
            if iters % disp_intv == 0:
                opt = {'split':split, 'epoch':epoch, 'iters':iters, 'batch':len(loader), 
                        'timer':timer, 'recorder': recorder}
                log.printItersSummary(opt)

            if iters % save_intv == 0:
                results, nrow = prepareSave(args, data, pred)
                log.saveImgResults(results, split, epoch, iters, nrow=nrow, error=error)
                log.plotCurves(recorder, split, epoch=epoch, intv=disp_intv)

            if stop_iters > 0 and iters >= stop_iters: break
    res = np.vstack([np.array(res), np.array(res).mean(0)])
    save_name = '%s_res.txt' % (args.suffix)
    np.savetxt(os.path.join(args.log_dir, split, save_name), res, fmt='%.2f')
    opt = {'split': split, 'epoch': epoch, 'recorder': recorder}
    log.printEpochSummary(opt)
コード例 #4
0
ファイル: train_stage1.py プロジェクト: zombicore/SDPS-Net
def train(args, loader, model, criterion, optimizer, log, epoch, recorder):
    model.train()
    log.printWrite('---- Start Training Epoch %d: %d batches ----' % (epoch, len(loader)))
    timer = time_utils.Timer(args.time_sync);

    for i, sample in enumerate(loader):
        data = model_utils.parseData(args, sample, timer, 'train')
        input = model_utils.getInput(args, data)

        pred = model(input); timer.updateTime('Forward')

        optimizer.zero_grad()
        loss = criterion.forward(pred, data); 
        timer.updateTime('Crit');
        criterion.backward(); timer.updateTime('Backward')

        recorder.updateIter('train', loss.keys(), loss.values())

        optimizer.step(); timer.updateTime('Solver')

        iters = i + 1
        if iters % args.train_disp == 0:
            opt = {'split':'train', 'epoch':epoch, 'iters':iters, 'batch':len(loader), 
                    'timer':timer, 'recorder': recorder}
            log.printItersSummary(opt)

        if iters % args.train_save == 0:
            results, recorder, nrow = prepareSave(args, data, pred, recorder, log) 
            log.saveImgResults(results, 'train', epoch, iters, nrow=nrow)
            log.plotCurves(recorder, 'train', epoch=epoch, intv=args.train_disp)

        if args.max_train_iter > 0 and iters >= args.max_train_iter: break
    opt = {'split': 'train', 'epoch': epoch, 'recorder': recorder}
    log.printEpochSummary(opt)
コード例 #5
0
def train(args, loader, model, criterion, optimizer, log, epoch, recorder,
          tf_writer):
    model.train()
    print('---- Start Training Epoch %d: %d batches ----' %
          (epoch, len(loader)))
    timer = time_utils.Timer(args.time_sync)

    for i, sample in enumerate(loader):

        # hack to reduce training time
        if args.iterations != 0 and i >= args.iterations: break

        data = model_utils.parseData(args, sample, timer, 'train')
        input = model_utils.getInput(args, data)

        out_var = model(input)
        timer.updateTime('Forward')

        optimizer.zero_grad()
        loss = criterion.forward(out_var, data['tar'])
        timer.updateTime('Crit')
        criterion.backward()
        timer.updateTime('Backward')

        recorder.updateIter('train', loss.keys(), loss.values())

        optimizer.step()
        timer.updateTime('Solver')

        iters = i + 1
        if iters % args.train_disp == 0:
            opt = {
                'split': 'train',
                'epoch': epoch,
                'iters': iters,
                'batch': len(loader),
                'timer': timer,
                'recorder': recorder
            }
            log.printItersSummary(opt)

            for tag, value in loss.items():
                tfboard.tensorboard_scalar(tf_writer, tag, value, iters)

    opt = {'split': 'train', 'epoch': epoch, 'recorder': recorder}
    log.printEpochSummary(opt)
コード例 #6
0
def test(args, split, loader, model, log, epoch, recorder):
    model.eval()
    log.printWrite('---- Start %s Epoch %d: %d batches ----' % (split, epoch, len(loader)))
    timer = time_utils.Timer(args.time_sync);

    disp_intv, save_intv, stop_iters = get_itervals(args, split)
    res = []
    with torch.no_grad():
        for i, sample in enumerate(loader):
            #print("\nIn for:", sample['mask'].shape)
            #print("sample :", sample.keys())
            input = model_utils.parseData(args, sample, timer, split)
            #input is a list
            pred = model(input); timer.updateTime('Forward')

            recoder, iter_res, error = prepareRes(args, input, pred, recorder, log, split)
            #print("data['img'].shape:", data['img'].shape)
            res.append(iter_res)
            iters = i + 1
            if iters % disp_intv == 0:
                opt = {'split':split, 'epoch':epoch, 'iters':iters, 'batch':len(loader), 
                        'timer':timer, 'recorder': recorder}
                log.printItersSummary(opt)

            if iters % save_intv == 0:
                results, nrow = prepareSave(args, input, pred, recorder, log)
                log.saveImgResults(results, split, epoch, iters, nrow=nrow, error='')
                #log.saveMatResults(pred['normal'], data['normal'], pred_c['dirs'], data['dirs'], split, epoch, iters, nrow=nrow, error='')
                log.plotCurves(recorder, split, epoch=epoch, intv=disp_intv)

            if stop_iters > 0 and iters >= stop_iters: break
    res = np.vstack([np.array(res), np.array(res).mean(0)])
    save_name = '%s_res.txt' % (args.suffix)
    np.savetxt(os.path.join(args.log_dir, split, save_name), res, fmt='%.2f')
    if res.ndim > 1:
        for i in range(res.shape[1]):
            save_name = '%s_%d_res.txt' % (args.suffix, i)
            np.savetxt(os.path.join(args.log_dir, split, save_name), res[:,i], fmt='%.3f')

    opt = {'split': split, 'epoch': epoch, 'recorder': recorder}
    log.printEpochSummary(opt)
コード例 #7
0
ファイル: test_utils.py プロジェクト: rkripa/PS-FCN
def test(args, split, loader, model, log, epoch, recorder, tf_writer):
    model.eval()
    print('---- Start %s Epoch %d: %d batches ----' %
          (split, epoch, len(loader)))
    timer = time_utils.Timer(args.time_sync)

    disp_intv, save_intv = get_itervals(args, split)
    with torch.no_grad():
        for i, sample in enumerate(loader):
            data = model_utils.parseData(args, sample, timer, split)
            input = model_utils.getInput(args, data)

            out_var = model(input)
            timer.updateTime('Forward')
            acc = eval_utils.calNormalAcc(data['tar'].data, out_var.data,
                                          data['m'].data)
            recorder.updateIter(split, acc.keys(), acc.values())

            iters = i + 1
            if iters % disp_intv == 0:
                opt = {
                    'split': split,
                    'epoch': epoch,
                    'iters': iters,
                    'batch': len(loader),
                    'timer': timer,
                    'recorder': recorder
                }
                log.printItersSummary(opt)
                for tag, value in acc.items():
                    tfboard.tensorboard_scalar(tf_writer, tag, value, iters)

            if iters % save_intv == 0:
                pred = (out_var.data + 1) / 2
                masked_pred = pred * data['m'].data.expand_as(out_var.data)
                log.saveNormalResults(masked_pred, split, epoch, iters)

    opt = {'split': split, 'epoch': epoch, 'recorder': recorder}
    log.printEpochSummary(opt)
コード例 #8
0
ファイル: test_stage2.py プロジェクト: q-zh/photometric
def test(args, split, loader, models, log, epoch, recorder):
    models[0].eval()
    models[1].eval()
    log.printWrite('---- Start %s Epoch %d: %d batches ----' %
                   (split, epoch, len(loader)))
    timer = time_utils.Timer(args.time_sync)

    disp_intv, save_intv, stop_iters = get_itervals(args, split)
    res = []
    with torch.no_grad():
        for i, sample in enumerate(loader):
            data = model_utils.parseData(args, sample, timer, split)
            input = model_utils.getInput(args, data)

            pred_c = models[0](input)
            timer.updateTime('Forward')
            input.append(pred_c)

            s2_est_obMp = True
            if s2_est_obMp:
                start_loc, end_loc = 32, 96
                random_loc = torch.randint(start_loc, end_loc, [2, 1])
                input.append(random_loc)
                data['ob_map_real'] = model_utils.parseData_stage2(
                    args, sample, random_loc, 'train')

            pred = models[1](input)
            timer.updateTime('Forward')
            input.pop()
            recoder, iter_res, error = prepareRes(args, data, pred_c, pred,
                                                  random_loc, recorder, log,
                                                  split)

            res.append(iter_res)
            iters = i + 1
            if iters % disp_intv == 0:
                opt = {
                    'split': split,
                    'epoch': epoch,
                    'iters': iters,
                    'batch': len(loader),
                    'timer': timer,
                    'recorder': recorder
                }
                log.printItersSummary(opt)

            if iters % save_intv == 0:
                results, nrow = prepareSave(args, data, pred_c, pred,
                                            random_loc)
                log.saveImgResults(results,
                                   split,
                                   epoch,
                                   iters,
                                   nrow=nrow,
                                   error='')
                log.plotCurves(recorder, split, epoch=epoch, intv=disp_intv)

            if stop_iters > 0 and iters >= stop_iters: break
    res = np.vstack([np.array(res), np.array(res).mean(0)])
    save_name = '%s_res.txt' % (args.suffix)
    np.savetxt(os.path.join(args.log_dir, split, save_name), res, fmt='%.2f')
    if res.ndim > 1:
        for i in range(res.shape[1]):
            save_name = '%s_%d_res.txt' % (args.suffix, i)
            np.savetxt(os.path.join(args.log_dir, split, save_name),
                       res[:, i],
                       fmt='%.3f')

    opt = {'split': split, 'epoch': epoch, 'recorder': recorder}
    log.printEpochSummary(opt)
コード例 #9
0
def test(args, split, loader, models, log, epoch, recorder):
    models[0].eval()
    models[1].eval()
    log.printWrite('---- Start %s Epoch %d: %d batches ----' %
                   (split, epoch, len(loader)))
    timer = time_utils.Timer(args.time_sync)

    disp_intv, save_intv, stop_iters = get_itervals(args, split)
    res = []
    with torch.no_grad():
        for i, sample in enumerate(loader):
            data = model_utils.parseData(args, sample, timer, split)
            input = model_utils.getInput(args, data)

            pred_c = models[0](input)
            timer.updateTime('Forward')
            input.append(pred_c)

            normals_gt = data['n']
            normals_fake = torch.zeros(1, 3, 128, 128)
            ob_map_sparse = torch.zeros(1, 1, 4096, 4096)
            ob_map_dense = torch.zeros(1, 1, 4096, 4096)
            ob_map_gt = torch.zeros(1, 1, 4096, 4096)
            if args.cuda:
                normals_fake = normals_fake.cuda()
                ob_map_sparse = ob_map_sparse.cuda()
                ob_map_dense = ob_map_dense.cuda()
                ob_map_gt = ob_map_gt.cuda()

            visited_times = torch.zeros(128, 128)
            stride = 6
            # mask_ind_set = set(mask_ind)
            print('Object: ' + sample['obj'][0])
            # print(data['n'].shape)
            # print(data['img'].shape)
            with torch.no_grad():
                for x in range(0, 128 - 15, stride):
                    for y in range(0, 128 - 15, stride):
                        print((x, y))
                        random_loc = torch.tensor([x + 8, y + 8])
                        input.append(random_loc)
                        pred = models[1](input)
                        input.pop()
                        normals_fake[0, :, x + 2:x + 14,
                                     y + 2:y + 14] += pred['n'][0, :, 2:14,
                                                                2:14]
                        visited_times[x + 2:x + 14, y + 2:y + 14] += 1
            timer.updateTime('Forward')
            normals_fake = normals_fake / 4

            # stride = 16
            # with torch.no_grad():
            #     for x in range(0, 128 - 15, stride):
            #         for y in range(0, 128 - 15, stride):
            #             print ((x,y))
            #             random_loc = torch.tensor([x + 8, y + 8])
            #             input.append(random_loc)
            #             # pred = models[1](input);
            #             pred = models[1](input);
            #             # ob_map_gt[:,:,x * 32: (x+16)*32,y*32:(y+16)*32] = model_utils.parseData_stage2(args, sample, random_loc, split)
            #             input.pop();
            #             ob_map_sparse[:,:,x * 32: (x+16)*32,y*32:(y+16)*32] += pred['ob_map_sparse']
            #             ob_map_dense[:,:,x * 32: (x+16)*32,y*32:(y+16)*32] += pred['ob_map_dense']

            # width = 32;
            # dirs = data['dirs']
            # x= 0.5*(dirs[:,0]+1)*(width-1);
            # x=torch.round(x).clamp(0, width - 1).long();
            # y= 0.5*(dirs[:,1]+1)*(width-1);
            # y=torch.round(y).clamp(0, width - 1).long();
            # idx_x = torch.split(x, 1, 0)
            # idx_y = torch.split(y, 1, 0)
            # stride = 16
            # print (x)
            # print (y)
            # if not os.path.exists(os.path.join(args.log_dir, 'test') + '/' + str(args.test_set) + '/' + sample['obj'][0]):
            #     os.mkdir(os.path.join(args.log_dir, 'test') + '/' + str(args.test_set) + '/' + sample['obj'][0])
            # d = os.path.join(args.log_dir, 'test') + '/' + str(args.test_set) + '/{}/mapind2.txt'
            # f = open(d.format(sample['obj'][0]), 'w')
            # dirs_x = torch.split(pred_c['dirs_x'], 1, 0)
            # dirs_y = torch.split(pred_c['dirs_y'], 1, 0)
            # for i in range(len(dirs_x)):
            #     _, x_idx = dirs_x[i].data.max(1)
            #     _, y_idx = dirs_y[i].data.max(1)
            #     f.write(str(x_idx.item() + 32 * y_idx.item() + 1) + '\n')
            # f.close()
            # with torch.no_grad():
            #     for x in range(0, 128 - 15, stride):
            #         for y in range(0, 128 - 15, stride):
            #             print ((x,y))
            #             random_loc = torch.tensor([x + 8, y + 8])
            #             input.append(random_loc)
            #             # pred = models[1](input);
            #             pred = models[1](input, idx_x, idx_y);
            #             input.pop();
            #             normals_fake[:,:,x:x + 16, y:y + 16] += pred['n']
            # #             # visited_times[x + 2:x + 14, y + 2:y + 14] += 1
            # timer.updateTime('Forward')
            # normals_fake = normals_fake / 4

            delta = angular_deviation(normals_fake, normals_gt)
            # normalfilepath =  os.path.join(args.log_dir, 'test') + '/' + str(args.test_set) + '/Images/' + sample['obj'][0] + '.mat'
            # normal_output = np.zeros([128, 128, 3])
            # normal_output[:, :, 0] = normals_fake[0, 0, :, :].cpu().numpy()
            # normal_output[:, :, 1] = normals_fake[0, 1, :, :].cpu().numpy()
            # normal_output[:, :, 2] = normals_fake[0, 2, :, :].cpu().numpy()
            # normal_gt = np.zeros([128, 128, 3])
            # normal_gt[:, :, 0] = normals_gt[0, 0, :, :].cpu().numpy()
            # normal_gt[:, :, 1] = normals_gt[0, 1, :, :].cpu().numpy()
            # normal_gt[:, :, 2] = normals_gt[0, 2, :, :].cpu().numpy()

            # ob_map_sparse_output = np.zeros([4096,4096])
            # ob_map_dense_output = np.zeros([4096,4096])
            # ob_map_sparse_output = ob_map_sparse[0,0,:,:].cpu().numpy()
            # ob_map_dense_output = ob_map_dense[0,0,:,:].cpu().numpy()
            # io.savemat(normalfilepath, {'normal_fake': normal_output, 'normal_gt': normal_gt, 'ob_map_sparse':ob_map_sparse_output[2048:2048+512,2048:2048+512],'ob_map_dense':ob_map_dense_output[2048:2048+512,2048:2048+512]})

            Dall = []
            for j in range(len(delta)):
                Dall.append(delta[j].unsqueeze(0))
            D_final = torch.cat(Dall, dim=0)
            d = os.path.join(args.log_dir, 'test') + '/' + str(
                args.test_set) + '/{}.txt'
            f = open(d.format(sample['obj'][0]), 'w')
            f.write(str(torch.mean(D_final)) + '\n')
            f.close()