Exemplo n.º 1
0
    def _build_net(self):
        if self.net_name == "psmnet" or self.net_name == "ganet":
            self.net = build_net(self.net_name)(self.maxdisp)
        else:
            self.net = build_net(self.net_name)(batchNorm=False, lastRelu=True, maxdisp=self.maxdisp)

        self.is_pretrain = False

        if self.ngpu > 1:
            self.net = torch.nn.DataParallel(self.net, device_ids=self.devices).cuda()
        else:
            self.net.cuda()

        if self.pretrain == '':
            logger.info('Initial a new model...')
        else:
            if os.path.isfile(self.pretrain):
                model_data = torch.load(self.pretrain)
                logger.info('Load pretrain model: %s', self.pretrain)
                if 'state_dict' in model_data.keys():
                    self.net.load_state_dict(model_data['state_dict'])
                else:
                    self.net.load_state_dict(model_data)
                self.is_pretrain = True
            else:
                logger.warning('Can not find the specific model %s, initial a new model...', self.pretrain)
Exemplo n.º 2
0
def build_net_gating(s_tf, g_tf, w_dim, reuse):
    input_tfs = [s_tf, g_tf]
    h = NetBuilder.build_net(Gating_Net_Name, input_tfs, reuse)
    w_tf = tf.layers.dense(inputs=h,
                           units=w_dim,
                           activation=tf.nn.sigmoid,
                           kernel_initializer=TFUtilExtend.xavier_initializer)
    return w_tf
Exemplo n.º 3
0
 def _build_net_gating(self, net_name, num_primitives, init_output_scale):
     input_tfs = []
     norm_s_tf = self.s_norm.normalize_tf(self.s_tf)
     input_tfs += [norm_s_tf]
     if (self.has_goal()):
         norm_g_tf = self.g_norm.normalize_tf(self.g_tf)
         input_tfs += [norm_g_tf]
     h = NetBuilder.build_net(net_name, input_tfs)
     w_tf = tf.layers.dense(inputs=h, units=num_primitives, activation=tf.nn.sigmoid,
                            kernel_initializer=TFUtil.xavier_initializer)
     return w_tf
Exemplo n.º 4
0
 def _build_net_gating(self, net_name, s_tf, g_tf, w_dim, reuse):
     input_tfs = [s_tf, g_tf]
     h = NetBuilder.build_net(net_name, input_tfs, reuse)
     with tf.variable_scope('output', reuse=reuse):
         w_tf = tf.layers.dense(
             inputs=h,
             units=w_dim,
             activation=tf.nn.sigmoid,
             kernel_initializer=TFUtilExtend.xavier_initializer,
             reuse=reuse)
     return w_tf
Exemplo n.º 5
0
    def _build_net(self):

        # build net according to the net name
        if self.net_name == "psmnet":
            self.net = build_net(self.net_name)(self.maxdisp)
        elif self.net_name in ["normnets"]:
            self.net = build_net(self.net_name)()
        else:
            self.net = build_net(self.net_name)(batchNorm=False,
                                                lastRelu=True,
                                                maxdisp=self.maxdisp)

        if self.net_name in ['dnfusionnet', 'dtonnet']:
            self.net.set_focal_length(self.focal_length[0],
                                      self.focal_length[1])

        self.is_pretrain = False

        if self.ngpu >= 1:
            self.net = torch.nn.DataParallel(self.net,
                                             device_ids=self.devices).cuda()
        else:
            self.net.cuda()

        if self.pretrain == '':
            logger.info('Initial a new model...')
        else:
            if os.path.isfile(self.pretrain):
                model_data = torch.load(self.pretrain)
                logger.info('Load pretrain model: %s', self.pretrain)
                if 'state_dict' in model_data.keys():
                    self.net.load_state_dict(model_data['state_dict'])
                elif 'model' in model_data.keys():
                    self.net.load_state_dict(model_data['model'])
                else:
                    self.net.load_state_dict(model_data)
                self.is_pretrain = True
            else:
                logger.warning(
                    'Can not find the specific model %s, initial a new model...',
                    self.pretrain)
Exemplo n.º 6
0
 def _build_net_discriminator(self, net_name, w_tf, g_dim, reuse):
     h = NetBuilder.build_net(net_name, w_tf, reuse)
     with tf.variable_scope('output_1', reuse=reuse):
         D_tf = tf.layers.dense(
             inputs=h,
             units=1,
             activation=None,
             kernel_initializer=TFUtilExtend.xavier_initializer,
             reuse=reuse)
     with tf.variable_scope('output_2', reuse=reuse):
         D_v = tf.layers.dense(
             inputs=h,
             units=g_dim,
             activation=None,
             kernel_initializer=TFUtilExtend.xavier_initializer,
             reuse=reuse)
     return D_tf, h, D_v
Exemplo n.º 7
0
 def _build_net_primitives(self, net_name, num_primitives, init_output_scale):
     input_tfs = []
     norm_s_tf = self.s_norm.normalize_tf(self.s_tf)
     input_tfs += [norm_s_tf]
     h = NetBuilder.build_net(net_name, input_tfs)
     a_dim = self.get_action_size()
     batch_size = tf.shape(norm_s_tf)[0]
     primitives_mean_tf = []
     primitives_std_tf = []
     for i in range(0, num_primitives):
         h2 = tf.layers.dense(inputs=h, units=256, activation=tf.nn.relu, name='primitive_%d_dense' % (i),
                              kernel_initializer=TFUtil.xavier_initializer)
         norm_mean_tf = tf.layers.dense(inputs=h2, units=a_dim, activation=None, name='primitive_%d_dense_mean' % (i),
                                 kernel_initializer=tf.random_uniform_initializer(minval=-init_output_scale, maxval=init_output_scale))
         norm_std_tf = self.exp_params_curr.noise * tf.ones([batch_size, a_dim])
         primitives_mean_tf.append(norm_mean_tf)
         primitives_std_tf.append(norm_std_tf)
     return primitives_mean_tf, primitives_std_tf
Exemplo n.º 8
0
def detect(opt):
    model = opt.model
    result_path = opt.rp
    file_list = opt.filelist
    filepath = opt.filepath
    
    if not os.path.exists(result_path):
        os.makedirs(result_path)

    devices = [int(item) for item in opt.devices.split(',')]
    ngpu = len(devices)
    #net = DispNetC(ngpu, True)
    #net = DispNetCSRes(ngpu, False, True)
    #net = DispNetCSResWithMono(ngpu, False, True, input_channel=3)

    if opt.net == "psmnet" or opt.net == "ganet":
        net = build_net(opt.net)(maxdisp=192)
    elif opt.net == "dispnetc":
        net = build_net(opt.net)(batchNorm=False, lastRelu=True, resBlock=False)
    else:
        net = build_net(opt.net)(batchNorm=False, lastRelu=True)
 
    net = torch.nn.DataParallel(net, device_ids=devices).cuda()

    model_data = torch.load(model)
    print(model_data.keys())
    if 'state_dict' in model_data.keys():
        net.load_state_dict(model_data['state_dict'])
    else:
        net.load_state_dict(model_data)

    num_of_parameters = count_parameters(net)
    print('Model: %s, # of parameters: %d' % (opt.net, num_of_parameters))

    net.eval()

    batch_size = int(opt.batchSize)
    test_dataset = DispDataset(txt_file=file_list, root_dir=filepath, phase='detect')
    test_loader = DataLoader(test_dataset, batch_size = batch_size, \
                        shuffle = False, num_workers = 1, \
                        pin_memory = True)

    s = time.time()
    #high_res_EPE = multiscaleloss(scales=1, downscale=1, weights=(1), loss='L1', sparse=False)

    avg_time = []
    display = 100
    warmup = 10
    for i, sample_batched in enumerate(test_loader):
        input = torch.cat((sample_batched['img_left'], sample_batched['img_right']), 1)
        # print('input Shape: {}'.format(input.size()))
        num_of_samples = input.size(0)
        target = sample_batched['gt_disp']

        #print('disp Shape: {}'.format(target.size()))
        #original_size = (1, target.size()[2], target.size()[3])

        target = target.cuda()
        input = input.cuda()
        input_var = torch.autograd.Variable(input, volatile=True)
        target_var = torch.autograd.Variable(target, volatile=True)

        if i > warmup:
            ss = time.time()
        if opt.net == "psmnet" or opt.net == "ganet":
            output = net(input_var)
        elif opt.net == "dispnetc":
            output = net(input_var)[0]
        else:
            output = net(input_var)[-1] 
 
        if i > warmup:
            avg_time.append((time.time() - ss))
            if (i - warmup) % display == 0:
                print('Average inference time: %f' % np.mean(avg_time))
                mbytes = 1024.*1024
                print('GPU memory usage memory_allocated: %d MBytes, max_memory_allocated: %d MBytes, memory_cached: %d MBytes, max_memory_cached: %d MBytes, CPU memory usage: %d MBytes' %  \
                    (ct.memory_allocated()/mbytes, ct.max_memory_allocated()/mbytes, ct.memory_cached()/mbytes, ct.max_memory_cached()/mbytes, process.memory_info().rss/mbytes))
                avg_time = []

        # output = net(input_var)[1]
        output[output > 192] = 0
        output = scale_disp(output, (output.size()[0], 540, 960))
        for j in range(num_of_samples):
            # scale back depth
            np_depth = output[j][0].data.cpu().numpy()
            gt_depth = target_var[j, 0, :, :].data.cpu().numpy()
            #print(np.min(np_depth), np.max(np_depth))
            #cuda_depth = torch.from_numpy(np_depth).cuda()
            #cuda_depth = torch.autograd.Variable(cuda_depth, volatile=True)

            # flow2_EPE = high_res_EPE(output[j], target_var[j]) * 1.0
            #flow2_EPE = high_res_EPE(cuda_depth, target_var[j]) * 1.0
            #print('Shape: {}'.format(output[j].size()))
            print('Batch[{}]: {}, average disp: {}'.format(i, j, np.mean(np_depth)))
            #print('Batch[{}]: {}, Flow2_EPE: {}'.format(i, sample_batched['img_names'][0][j], flow2_EPE.data.cpu().numpy()))

            name_items = sample_batched['img_names'][0][j].split('/')
            #save_name = '_'.join(name_items).replace('.png', '.pfm')# for girl02 dataset
            #save_name = 'predict_{}_{}_{}.pfm'.format(name_items[-4], name_items[-3], name_items[-1].split('.')[0])
            #save_name = 'predict_{}_{}.pfm'.format(name_items[-1].split('.')[0], name_items[-1].split('.')[1])
            #save_name = 'predict_{}.pfm'.format(name_items[-1])
            #img = np.flip(np_depth[0], axis=0)

            save_name = '_'.join(name_items)# for girl02 dataset
            img = np_depth
            print('Name: {}'.format(save_name))
            print('')
            #save_pfm('{}/{}'.format(result_path, save_name), img)
            skimage.io.imsave(os.path.join(result_path, save_name),(img*256).astype('uint16'))
            
            save_name = '_'.join(name_items).replace(".png", "_gt.png")# for girl02 dataset
            img = gt_depth
            print('Name: {}'.format(save_name))
            print('')
            #save_pfm('{}/{}'.format(result_path, save_name), img)
            skimage.io.imsave(os.path.join(result_path, save_name),(img*256).astype('uint16'))


    print('Evaluation time used: {}'.format(time.time()-s))
Exemplo n.º 9
0
def detect(opt):

    net_name = opt.net
    model = opt.model
    result_path = opt.rp
    file_list = opt.filelist
    filepath = opt.filepath

    if not os.path.exists(result_path):
        os.makedirs(result_path)

    devices = [int(item) for item in opt.devices.split(',')]
    ngpu = len(devices)

    # build net according to the net name
    if net_name == "psmnet" or net_name == "ganet":
        net = build_net(net_name)(192)
    elif net_name in ["fadnet", "dispnetc"]:
        net = build_net(net_name)(batchNorm=False, lastRelu=True)

    net = torch.nn.DataParallel(net, device_ids=devices).cuda()

    model_data = torch.load(model)
    print(model_data.keys())
    if 'state_dict' in model_data.keys():
        net.load_state_dict(model_data['state_dict'])
    else:
        net.load_state_dict(model_data)

    num_of_parameters = count_parameters(net)
    print('Model: %s, # of parameters: %d' % (net_name, num_of_parameters))

    net.eval()

    batch_size = int(opt.batchSize)
    test_dataset = StereoDataset(txt_file=file_list,
                                 root_dir=filepath,
                                 phase='detect')
    test_loader = DataLoader(test_dataset, batch_size = batch_size, \
                        shuffle = False, num_workers = 1, \
                        pin_memory = True)

    s = time.time()

    avg_time = []
    display = 50
    warmup = 10
    for i, sample_batched in enumerate(test_loader):
        #if i > 215:
        #    break

        input = torch.cat(
            (sample_batched['img_left'], sample_batched['img_right']), 1)

        # print('input Shape: {}'.format(input.size()))
        num_of_samples = input.size(0)

        #output, input_var = detect_batch(net, sample_batched, opt.net, (540, 960))

        input = input.cuda()
        input_var = torch.autograd.Variable(input, volatile=True)

        if i > warmup:
            ss = time.time()

        with torch.no_grad():
            if opt.net == "psmnet" or opt.net == "ganet":
                output = net(input_var)
                output = output.unsqueeze(1)
            elif opt.net == "dispnetc":
                output = net(input_var)[0]
            else:
                output = net(input_var)[-1]

        if i > warmup:
            avg_time.append((time.time() - ss))
            if (i - warmup) % display == 0:
                print('Average inference time: %f' % np.mean(avg_time))
                mbytes = 1024. * 1024
                print('GPU memory usage memory_allocated: %d MBytes, max_memory_allocated: %d MBytes, memory_cached: %d MBytes, max_memory_cached: %d MBytes, CPU memory usage: %d MBytes' %  \
                    (ct.memory_allocated()/mbytes, ct.max_memory_allocated()/mbytes, ct.memory_cached()/mbytes, ct.max_memory_cached()/mbytes, process.memory_info().rss/mbytes))
                avg_time = []

        output = scale_disp(output, (output.size()[0], 540, 960))
        disp = output[:, 0, :, :]

        for j in range(num_of_samples):

            name_items = sample_batched['img_names'][0][j].split('/')
            # write disparity to file
            output_disp = disp[j]
            np_disp = disp[j].data.cpu().numpy()

            print('Batch[{}]: {}, average disp: {}({}-{}).'.format(
                i, j, np.mean(np_disp), np.min(np_disp), np.max(np_disp)))
            save_name = '_'.join(name_items).replace(
                ".png", "_d.png")  # for girl02 dataset
            print('Name: {}'.format(save_name))

            skimage.io.imsave(os.path.join(result_path, save_name),
                              (np_disp * 256).astype('uint16'))

            #save_name = '_'.join(name_items).replace("png", "pfm")# for girl02 dataset
            #print('Name: {}'.format(save_name))
            #np_disp = np.flip(np_disp, axis=0)
            #save_pfm('{}/{}'.format(result_path, save_name), np_disp)

    print('Evaluation time used: {}'.format(time.time() - s))
Exemplo n.º 10
0
def detect(opt):

    net_name = opt.net
    model = opt.model
    result_path = opt.rp
    file_list = opt.filelist
    filepath = opt.filepath

    if not os.path.exists(result_path):
        os.makedirs(result_path)

    devices = [int(item) for item in opt.devices.split(',')]
    ngpu = len(devices)

    # build net according to the net name
    if net_name in ["dispnetcres", "dispnetc"]:
        net = build_net(net_name)(batchNorm=False, lastRelu=True)
    else:
        net = build_net(net_name)(batchNorm=False, lastRelu=True)
        net.set_focal_length(1050.0, 1050.0)
    net = torch.nn.DataParallel(net, device_ids=devices).cuda()
    #net.cuda()

    model_data = torch.load(model)
    print(model_data.keys())
    if 'state_dict' in model_data.keys():
        net.load_state_dict(model_data['state_dict'])
    else:
        net.load_state_dict(model_data)

    num_of_parameters = count_parameters(net)
    print('Model: %s, # of parameters: %d' % (net_name, num_of_parameters))

    net.eval()

    batch_size = int(opt.batchSize)
    #test_dataset = StereoDataset(txt_file=file_list, root_dir=filepath, phase='detect')
    test_dataset = SceneFlowDataset(txt_file=file_list,
                                    root_dir=filepath,
                                    phase='detect')
    test_loader = DataLoader(test_dataset, batch_size = batch_size, \
                        shuffle = False, num_workers = 1, \
                        pin_memory = True)

    s = time.time()
    #high_res_EPE = multiscaleloss(scales=1, downscale=1, weights=(1), loss='L1', sparse=False)

    avg_time = []
    display = 100
    warmup = 10
    for i, sample_batched in enumerate(test_loader):
        input = torch.cat(
            (sample_batched['img_left'], sample_batched['img_right']), 1)
        if opt.disp_on:
            target_disp = sample_batched['gt_disp']
            target_disp = target_disp.cuda()
        if opt.norm_on:
            target_norm = sample_batched['gt_norm']
            target_norm = target_norm.cuda()

        # print('input Shape: {}'.format(input.size()))
        num_of_samples = input.size(0)

        #output, input_var = detect_batch(net, sample_batched, opt.net, (540, 960))

        input = input.cuda()
        input_var = torch.autograd.Variable(input, volatile=True)

        if i > warmup:
            ss = time.time()
        if opt.net == "psmnet" or opt.net == "ganet":
            output = net(input_var)
        elif opt.net == "dispnetc":
            output = net(input_var)[0]
        elif opt.net in ["dispnormnet", "dtonnet", "dnfusionnet"]:
            output = net(input_var)
            disp = output[0]
            normal = output[1]
            output = torch.cat((normal, disp), 1)
        else:
            output = net(input_var)[-1]

        if i > warmup:
            avg_time.append((time.time() - ss))
            if (i - warmup) % display == 0:
                print('Average inference time: %f' % np.mean(avg_time))
                mbytes = 1024. * 1024
                print('GPU memory usage memory_allocated: %d MBytes, max_memory_allocated: %d MBytes, memory_cached: %d MBytes, max_memory_cached: %d MBytes, CPU memory usage: %d MBytes' %  \
                    (ct.memory_allocated()/mbytes, ct.max_memory_allocated()/mbytes, ct.memory_cached()/mbytes, ct.max_memory_cached()/mbytes, process.memory_info().rss/mbytes))
                avg_time = []

        # output = net(input_var)[1]
        if opt.disp_on and not opt.norm_on:
            output = scale_disp(output, (output.size()[0], 540, 960))
            disp = output[:, 0, :, :]
        elif opt.disp_on and opt.norm_on:
            output = scale_norm(output, (output.size()[0], 4, 540, 960))
            disp = output[:, 3, :, :]
            normal = output[:, :3, :, :]
        print('disp shape:', disp.shape)

        for j in range(num_of_samples):

            name_items = sample_batched['img_names'][0][j].split('/')
            # write disparity to file
            if opt.disp_on:
                output_disp = disp[j]
                _target_disp = target_disp[j, 0]
                target_valid = _target_disp < 192
                print('target size', _target_disp.size())
                print('output size', output_disp.size())
                epe = F.smooth_l1_loss(output_disp[target_valid],
                                       _target_disp[target_valid],
                                       size_average=True)
                print('EPE: {}'.format(epe))

                np_disp = disp[j].data.cpu().numpy()

                print('Batch[{}]: {}, average disp: {}({}-{}).'.format(
                    i, j, np.mean(np_disp), np.min(np_disp), np.max(np_disp)))
                save_name = '_'.join(name_items).replace(".png", "_d.png")
                print('Name: {}'.format(save_name))

                skimage.io.imsave(os.path.join(result_path, save_name),
                                  (np_disp * 256).astype('uint16'))
            #save_name = '_'.join(name_items).replace(".png", "_d.pfm")
            #print('Name: {}'.format(save_name))
            #np_disp = np.flip(np_disp, axis=0)
            #save_pfm('{}/{}'.format(result_path, save_name), np_disp)

            if opt.norm_on:
                normal[j] = (normal[j] + 1.0) * 0.5
                #np_normal = normal[j].data.cpu().numpy().transpose([1, 2, 0])
                np_normal = normal[j].data.cpu().numpy()
                #save_name = '_'.join(name_items).replace('.png', '_n.png')
                save_name = '_'.join(name_items).replace('.png', '_n.exr')
                print('Name: {}'.format(save_name))
                #skimage.io.imsave(os.path.join(result_path, save_name),(normal*256).astype('uint16'))
                #save_pfm('{}/{}'.format(result_path, save_name), img)
                save_exr(np_normal, '{}/{}'.format(result_path, save_name))

            print('')

            #save_name = '_'.join(name_items).replace(".png", "_left.png")
            #img = input_var[0].detach().cpu().numpy()[:3,:,:]
            #img = np.transpose(img, (1, 2, 0))
            #print('Name: {}'.format(save_name))
            #print('')
            ##save_pfm('{}/{}'.format(result_path, save_name), img)
            #skimage.io.imsave(os.path.join(result_path, save_name),img)

    print('Evaluation time used: {}'.format(time.time() - s))
Exemplo n.º 11
0
def detect(opt):

    net_name = opt.net
    model = opt.model
    result_path = opt.rp
    file_list = opt.filelist
    filepath = opt.filepath

    if not os.path.exists(result_path):
        os.makedirs(result_path)

    devices = [int(item) for item in opt.devices.split(',')]
    ngpu = len(devices)

    # build net according to the net name
    if net_name == "psmnet" or net_name == "ganet":
        net = build_net(net_name)(192)
    elif net_name in ["fadnet", "dispnetc", "mobilefadnet", "slightfadnet"]:
        net = build_net(net_name)(batchNorm=False, lastRelu=True)
    #elif net_name in ["mobilefadnet", "slightfadnet"]:
    #    #B, max_disp, H, W = (wopt.batchSize, 40, 72, 120)
    #    shape = (opt.batchSize, 40, 72, 120) #TODO: Should consider how to dynamically use
    #    warp_size = (opt.batchSize, 3, 576, 960)
    #    net = build_net(net_name)(batchNorm=False, lastRelu=True, input_img_shape=shape, warp_size=warp_size)

    if ngpu > 1:
        net = torch.nn.DataParallel(net, device_ids=devices)

    model_data = torch.load(model)
    print(model_data.keys())
    if 'state_dict' in model_data.keys():
        #net.load_state_dict(model_data['state_dict'])
        load_model_trained_with_DP(net, model_data['state_dict'])
    else:
        net.load_state_dict(model_data)

    num_of_parameters = count_parameters(net)
    print('Model: %s, # of parameters: %d' % (net_name, num_of_parameters))

    batch_size = int(opt.batchSize)
    test_dataset = StereoDataset(txt_file=file_list,
                                 root_dir=filepath,
                                 phase='detect')
    test_loader = DataLoader(test_dataset, batch_size = batch_size, \
                        shuffle = False, num_workers = 1, \
                        pin_memory = True)

    net.eval()
    #net.dispnetc.eval()
    #net.dispnetres.eval()
    net = net.cuda()

    #for i, sample_batched in enumerate(test_loader):
    #    input = torch.cat((sample_batched['img_left'], sample_batched['img_right']), 1)
    #    num_of_samples = input.size(0)
    #    input = input.cuda()
    #    x = input
    #    break

    net_trt = trt_transform(net)

    torch.save(net_trt.state_dict(), 'models/mobilefadnet_trt.pth')

    s = time.time()

    avg_time = []
    display = 50
    warmup = 2
    for i, sample_batched in enumerate(test_loader):
        #if i > 215:
        #    break
        stime = time.time()

        input = torch.cat(
            (sample_batched['img_left'], sample_batched['img_right']), 1)
        print('input Shape: {}'.format(input.size()))
        num_of_samples = input.size(0)
        input = input.cuda()
        break

    iterations = 14 + warmup
    #iterations = len(test_loader) - warmup
    #for i, sample_batched in enumerate(test_loader):
    for i in range(iterations):
        stime = time.time()

        input = torch.cat(
            (sample_batched['img_left'], sample_batched['img_right']), 1)
        print('input Shape: {}'.format(input.size()))
        num_of_samples = input.size(0)
        input = input.cuda()

        input_var = input  #torch.autograd.Variable(input, volatile=True)
        iotime = time.time()
        print('[{}] IO time:{}'.format(i, iotime - stime))

        if i == warmup:
            ss = time.time()

        with torch.no_grad():
            if opt.net == "psmnet" or opt.net == "ganet":
                output = net_trt(input_var)
                output = output.unsqueeze(1)
            elif opt.net == "dispnetc":
                output = net_trt(input_var)[0]
            else:
                output = net_trt(input_var)[-1]
        itime = time.time()
        print('[{}] Inference time:{}'.format(i, itime - iotime))

        if i > warmup:
            avg_time.append((time.time() - ss))
            if (i - warmup) % display == 0:
                print('Average inference time: %f' % np.mean(avg_time))
                mbytes = 1024. * 1024
                print('GPU memory usage memory_allocated: %d MBytes, max_memory_allocated: %d MBytes, memory_cached: %d MBytes, max_memory_cached: %d MBytes, CPU memory usage: %d MBytes' %  \
                    (ct.memory_allocated()/mbytes, ct.max_memory_allocated()/mbytes, ct.memory_cached()/mbytes, ct.max_memory_cached()/mbytes, process.memory_info().rss/mbytes))
                avg_time = []

        print('[%d] output shape:' % i, output.size())
        #output = scale_disp(output, (output.size()[0], 540, 960))
        #disp = output[:, 0, :, :]
        ptime = time.time()
        print('[{}] Post-processing time:{}'.format(i, ptime - itime))

        #for j in range(num_of_samples):

        #    name_items = sample_batched['img_names'][0][j].split('/')
        #    # write disparity to file
        #    output_disp = disp[j]
        #    np_disp = disp[j].float().cpu().numpy()

        #    print('Batch[{}]: {}, average disp: {}({}-{}).'.format(i, j, np.mean(np_disp), np.min(np_disp), np.max(np_disp)))
        #    save_name = '_'.join(name_items).replace(".png", "_d.png")# for girl02 dataset
        #    print('Name: {}'.format(save_name))
        #    skimage.io.imsave(os.path.join(result_path, save_name),(np_disp*256).astype('uint16'))
        print('Current batch time used:: {}'.format(time.time() - stime))

        #save_name = '_'.join(name_items).replace("png", "pfm")# for girl02 dataset
        #print('Name: {}'.format(save_name))
        #np_disp = np.flip(np_disp, axis=0)
        #save_pfm('{}/{}'.format(result_path, save_name), np_disp)

    print('Evaluation time used: {}, avg iter: {}'.format(
        time.time() - ss, (time.time() - ss) / iterations))