Example #1
0
    def __init__(self):
        parser = argparse.ArgumentParser(
            description='PyTorch hand pose Training')
        args = parser.parse_args()
        '''user setting'''
        args.gpu_ids = '0'
        args.use_net = ['hpe1_orig', 'hpe2']  #hpe1_orig, hig, hpe1, hpe2
        forward_list = ['hpe1_orig', 'hpe2']  #hpe1_orig, hig_hpe1, hpe2

        trained_modelFile_hpe1_orig = basedir + '..\\HPE\\output\\2020_2_26_11_29\\trainedModel_epoch99.pth.tar'
        trained_modelFile_hpe2 = basedir + '..\\HPE2\\output\\2020_4_10_18_46\\trainedModel_epoch99.pth.tar'
        '''common setting'''
        out_list = {}
        args.train_net = []
        args.cgan = True

        args.is_train = False
        args.gan_mode = 'vanilla'
        args.trainImageSize = 128
        trainImageSize = 128
        args.skeleton_pca_dim = 52
        args.skeleton_orig_dim = 63
        args.discriminator_reconstruction = False
        args.test_batch = 1
        hpe_numBlocks = 5

        #device
        #device = torch.device(torch.device("cuda:%s"%args.gpu_ids if torch.cuda.is_available() else "cpu")
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        args.device = device
        cudnn.benchmark = True

        #--HPE_orig net setting
        if 'hpe1_orig' in args.use_net:
            if not 'hpe1_orig' in locals():
                hpe1_orig = dpnet(args.skeleton_pca_dim, hpe_numBlocks, device)
                checkpoint = torch.load(
                    trained_modelFile_hpe1_orig,
                    map_location=lambda storage, loc: storage)
                hpe1_orig.load_state_dict(checkpoint['state_dict'])

        #--HPE2 net setting
        if 'hpe2' in args.use_net:
            if not 'hpe2' in locals():
                hpe2 = dpnet(args.skeleton_pca_dim, hpe_numBlocks, device)
                checkpoint = torch.load(
                    trained_modelFile_hpe2,
                    map_location=lambda storage, loc: storage)
                hpe2.load_state_dict(checkpoint['state_dict_hpe2'])

        with open(basedir + '..\\pca_52_by_957032_augmented_X2.pickle',
                  'rb') as f:
            self.pca = pickle.load(f)

        #--fusion net setting
        #fusionnet=FusionNet(hpe1_orig,pix2pix,hpe1,hpe2,None,args)
        fl = []
        if 'hpe1_orig' in locals(): fl.append(hpe1_orig)
        else: fl.append(None)
        if 'pix2pix' in locals(): fl.append(pix2pix)
        else: fl.append(None)
        if 'hpe1' in locals(): fl.append(hpe1)
        else: fl.append(None)
        if 'hpe2' in locals(): fl.append(hpe2)
        else: fl.append(None)
        fusionnet = FusionNet(fl[0], fl[1], fl[2], fl[3], None, args)

        self.fusionnet = fusionnet.to(device)

        #--dataset of uvr (for utils)
        camera_info = {}
        camera_info['fx'] = 475.065948
        camera_info['fy'] = 475.065857
        camera_info['cx'] = 315.944855
        camera_info['cy'] = 245.287079
        camera_info['camerawidth'] = 640
        camera_info['cameraheight'] = 480
        camera_info['cube'] = np.asarray([250, 250, 250])
        camera_info['trainImageSize'] = 128

        self.trainImageSize = camera_info['trainImageSize']
        self.cube = camera_info['cube']
        camerawidth = camera_info['camerawidth']
        cameraheight = camera_info['cameraheight']

        jointnum = 21
        d_maximum = 500

        self.datasetloader_uvr = datasetloader_UVR('..', 0, camera_info, '..',
                                                   '..')
        self.utils = self.datasetloader_uvr.utils

        #--start
        self.fusionnet.set_mode('eval')
        self.ir_batch = np.zeros(
            (1, 1, args.trainImageSize, args.trainImageSize), dtype=np.float32)
        self.depth_batch = np.zeros(
            (1, 1, args.trainImageSize, args.trainImageSize), dtype=np.float32)
Example #2
0
   
 if not 'pix2pix' in locals():
     pix2pix=Pix2pix(args) 
     checkpoint=torch.load(trained_modelFile_hig)
     if args.gpu_ids=='all':
         pix2pix.netG.load_state_dict(checkpoint['state_dict_hig'])
     else:
         checkpoint_dict=get_state_dict(checkpoint['state_dict_hig'])
         pix2pix.netG.load_state_dict(checkpoint_dict)
     pix2pix.netG.eval()
     
 
 ##--select model (HPE)--##
 if args.hpe_enabled==True:
     if not 'hpe' in locals():
         hpe=dpnet(args.skeleton_pca_dim,hpe_numBlocks,device)
         device = torch.device("cuda:%s"%args.gpu_ids if torch.cuda.is_available() else "cpu")
         hpe=hpe.to(device)
         
         cudnn.benchmark = True     
         checkpoint=torch.load(trained_modelFile_hpe)
         hpe.load_state_dict(checkpoint['state_dict_hpe1'])
         hpe.eval()
         img_hpe = np.zeros((1,1,trainImageSize,trainImageSize), dtype = np.float32)
     
     with open(basedir+'..\\pca_52_by_957032_augmented_X2.pickle','rb') as f:
         pca=pickle.load(f)
 
 ##--init realsense--##
 if input_type=='sr300':
     realsense=Realsense()
Example #3
0
    if not 'pix2pix' in locals():
        pix2pix = Pix2pix(args)
        checkpoint = torch.load(trained_modelFile_hig,
                                map_location=lambda storage, loc: storage)

        if 'hig' in args.use_net:
            pix2pix.netG.load_state_dict(checkpoint['state_dict_hig'])

        if 'hid' in args.use_net:
            pix2pix.netD.load_state_dict(checkpoint['state_dict_hid'])

    #--HPE net setting
    if 'trained_modelFile_hpe1' in locals():
        if not 'hpe1' in locals():
            hpe_numBlocks = 5
            hpe1 = dpnet(args.skeleton_pca_dim, hpe_numBlocks, device)
            checkpoint = torch.load(trained_modelFile_hpe1,
                                    map_location=lambda storage, loc: storage)
            hpe1.load_state_dict(checkpoint['state_dict_hpe1'])
    else:
        if not 'hpe1' in locals():
            hpe_numBlocks = 5
            hpe1 = dpnet(args.skeleton_pca_dim, hpe_numBlocks, device)
            checkpoint = torch.load(trained_modelFile_hpe1_orig,
                                    map_location=lambda storage, loc: storage)
            hpe1.load_state_dict(checkpoint['state_dict'])

    #--HPE_orig net setting
    if not 'hpe1_orig' in locals():
        hpe_numBlocks = 5
        hpe1_orig = dpnet(args.skeleton_pca_dim, hpe_numBlocks, device)
Example #4
0
    args.trainImageSize = 128
    args.skeleton_pca_dim = 52
    args.skeleton_orig_dim = 63

    args.cgan = True

    #device
    device = torch.device("cuda:%s" %
                          args.gpu_ids if torch.cuda.is_available() else "cpu")
    args.device = device

    #network

    pix2pix = Pix2pix(args)

    hpe1 = dpnet(52, 5, device)
    hpe2 = dpnet(52, 5, device)
    net = FusionNet(pix2pix, hpe1, hpe2, args)

    w = np.random.rand(args.skeleton_orig_dim, args.skeleton_pca_dim)
    b = np.random.rand(args.skeleton_orig_dim)
    net.set_reconstruction_net(w, b)

    net = net.to(device)

    #run
    ir = np.random.rand(args.train_batch, 1, 128, 128)
    depth = np.random.rand(args.train_batch, 1, 128, 128)

    net.set_input(ir, depth)