コード例 #1
0
ファイル: train_hpe2.py プロジェクト: sebaram/ir-hand
                    get_vr20data = True
                else:
                    get_noblurdata = True
            else:
                get_noblurdata = True

            #load data from the selected dataset
            if get_blurdata == True:
                img_depth, img_ir = datasetloader_uvr['train_blur'].load_data()
            elif get_vr20data == True:
                img_depth, img_ir = datasetloader_uvr['train_vr20'].load_data()
            elif get_noblurdata == True:
                img_depth, img_ir = datasetloader_uvr['train'].load_data()

            #optimize
            fusionnet.set_input(img_ir, img_depth)  #0.0005 sec
            fusionnet.optimize_parameters_hpe2()  #0.25

            if 'hpd' in args.train_net:
                for j in range(10):
                    img_icvl, hpose_icvl, _, _ = next(generator_train_icvl)
                    fusionnet.set_input_icvl(img_icvl, hpose_icvl)
                    fusionnet.optimize_parameters_bighand()

            #forward and backward

            fusionnet.calculateloss_hpe2()  #0.08 sec
            loss_ = fusionnet.getloss(loss_names)
            progress_train.append_local(loss_)

            #print progress
コード例 #2
0
ファイル: runDemo_dataset.py プロジェクト: sebaram/ir-hand
        #--preprocess depth/ir
        train_imgs = np.load(load_filepath_preprocess + '%d.npy' % frame)
        depth_train = np.copy(train_imgs[:, 0:trainImageSize])
        ir_train = np.copy(train_imgs[:, trainImageSize:2 * trainImageSize])
        com = dataset_load['com'][frame]
        window = dataset_load['window'][frame]

        depth_crop = datasetloader_uvr.utils.crop(depth_orig, window[0],
                                                  window[1], window[2],
                                                  window[3], window[4],
                                                  window[5])

        # set input
        ir_batch[0, 0, :, :] = ir_train.copy()
        depth_batch[0, 0, :, :] = depth_train.copy()
        fusionnet.set_input(ir_batch, depth_batch)

        #forward
        #TIME=time.time()
        fusionnet.forward_('hpe1_orig')
        fusionnet.forward_('hig_hpe1')
        fusionnet.forward_('hpe2')
        #fusionnet.forward_('hpe1_blur')
        #print(time.time()-TIME)

        #reconstruct
        com3d = utils.unproject2Dto3D(com)
        out1 = fusionnet.reconstruct_joints(pca, com3d, cube, 'hpe1_orig',
                                            'tocpu')
        out2 = fusionnet.reconstruct_joints(pca, com3d, cube, 'hig_hpe1',
                                            'tocpu')
コード例 #3
0
                else:
                    get_icvldata=True
                '''
            elif traindataNum_vr20 == 0:
                if i % 2 == 0:
                    get_uvrdata = True
                else:
                    get_icvldata = True

            #input
            if get_icvldata == True:
                img_icvl, hpose_icvl, _, _ = next(generator_train_icvl)
                fusionnet.set_input_icvl(img_icvl, hpose_icvl)
            elif get_uvrdata == True:
                img_depth, img_ir = datasetloader_uvr['train'].load_data()
                fusionnet.set_input(img_ir, img_depth)
            else:
                img_depth, img_ir = datasetloader_uvr['train_vr20'].load_data()
                fusionnet.set_input(img_ir, img_depth)

            #optimize
            if get_icvldata == True:
                fusionnet.optimize_parameters_hpe1_bighand()
            else:
                fusionnet.optimize_parameters_hpe1_depthIR()

            #forward and backward
            if get_icvldata == True:
                fusionnet.calculateloss_hpe1_bighand()
            else:
                fusionnet.calculateloss_hpe1_depthIR()