Example #1
0
 def loadRefineNetLazy(self, net):
     if isinstance(net, basestring):
         if os.path.exists(net):
             from net.scalenet import ScaleNet, ScaleNetParams
             comrefNetParams = ScaleNetParams(type=5, nChan=1, wIn=128, hIn=128, batchSize=1, resizeFactor=2,
                                              numJoints=1, nDims=3)
             self.refineNet = ScaleNet(np.random.RandomState(23455), cfgParams=comrefNetParams)
             self.refineNet.load(net)
         else:
             raise EnvironmentError("File not found: {}".format(net))
Example #2
0
    xend = xstart + dsize[0]
    ystart = int(train_data.shape[3]/2-dsize[1]/2)
    yend = ystart + dsize[1]
    test_data4 = test_data[:, :, ystart:yend, xstart:xend]

    print train_gt3D.max(), test_gt3D.max(), train_gt3D.min(), test_gt3D.min()
    print train_data.max(), test_data.max(), train_data.min(), test_data.min()

    imgSizeW = train_data.shape[3]
    imgSizeH = train_data.shape[2]
    nChannels = train_data.shape[1]

    #############################################################################
    print("create network")
    batchSize = 64
    poseNetParams = ScaleNetParams(type=1, nChan=nChannels, wIn=imgSizeW, hIn=imgSizeH, batchSize=batchSize,
                                   resizeFactor=2, numJoints=1, nDims=3)
    poseNet = ScaleNet(rng, cfgParams=poseNetParams)

    poseNetTrainerParams = ScaleNetTrainerParams()
    poseNetTrainerParams.use_early_stopping = False
    poseNetTrainerParams.batch_size = batchSize
    poseNetTrainerParams.learning_rate = 0.0005
    poseNetTrainerParams.weightreg_factor = 0.0001
    poseNetTrainerParams.force_macrobatch_reload = True
    poseNetTrainerParams.para_augment = True
    poseNetTrainerParams.augment_fun_params = {'fun': 'augment_poses', 'args': {'normZeroOne': False,
                                                                                'di': di,
                                                                                'aug_modes': aug_modes,
                                                                                'hd': HandDetector(train_data[0, 0].copy(), abs(di.fx), abs(di.fy), importer=di)}}

    print("setup trainer")
                                  nChan=1,
                                  wIn=128,
                                  hIn=128,
                                  batchSize=1,
                                  numJoints=14,
                                  nDims=3)
 poseNet = PoseRegNet(numpy.random.RandomState(23455),
                      cfgParams=poseNetParams)
 poseNet.load("./NYU_network_prior.pkl")
 # comrefNetParams = ScaleNetParams(type=1, nChan=1, wIn=128, hIn=128, batchSize=1, resizeFactor=2, numJoints=1, nDims=3)
 # comrefNet = ScaleNet(numpy.random.RandomState(23455), cfgParams=comrefNetParams)
 # comrefNet.load("./net_ICVL_COM.pkl")
 comrefNetParams = ScaleNetParams(type=1,
                                  nChan=1,
                                  wIn=128,
                                  hIn=128,
                                  batchSize=1,
                                  resizeFactor=2,
                                  numJoints=1,
                                  nDims=3)
 comrefNet = ScaleNet(numpy.random.RandomState(23455),
                      cfgParams=comrefNetParams)
 comrefNet.load("./net_NYU_COM.pkl")
 config = {'fx': 588., 'fy': 587., 'cube': (300, 300, 300)}
 # config = {'fx': 241.42, 'fy': 241.42, 'cube': (250, 250, 250)}
 # config = {'fx': 224.5, 'fy': 230.5, 'cube': (300, 300, 300)}  # Creative Gesture Camera
 # di = ICVLImporter("./capture/")
 # di.fx = 224.5
 # di.fy = 230.5
 # di.ux = 160.
 # di.uy = 120.
 rtp = RealtimeHandposePipeline(poseNet, config, di, comrefNet)
    Seq2 = di.loadSequence('test_1')
    testSeqs = [Seq2]

    # load trained network
    poseNetParams = ResNetParams(type=1,
                                 nChan=1,
                                 wIn=128,
                                 hIn=128,
                                 batchSize=1,
                                 numJoints=14,
                                 nDims=3)
    poseNetParams.loadFile = "./eval/NYU_network_prior.pkl"
    comrefNetParams = ScaleNetParams(type=1,
                                     nChan=1,
                                     wIn=128,
                                     hIn=128,
                                     batchSize=1,
                                     resizeFactor=2,
                                     numJoints=1,
                                     nDims=3)
    comrefNetParams.loadFile = "./eval/net_NYU_COM_AUGMENT.pkl"
    config = {'fx': 588., 'fy': 587., 'cube': (300, 300, 300)}
    # config = {'fx': 241.42, 'fy': 241.42, 'cube': (250, 250, 250)}
    # config = {'fx': 224.5, 'fy': 230.5, 'cube': (300, 300, 300)}  # Creative Gesture Camera
    rtp = RealtimeHandposePipeline(poseNetParams,
                                   config,
                                   di,
                                   verbose=False,
                                   comrefNet=comrefNetParams)

    # use filenames
    filenames = []
    di = MSRA15Importer('/content/drive/My Drive/KNOWLEDGE ENGINEERING/KE Semester 4/Core Course/CA2 (Matthew)/cvpr15_MSRAHandGestureDB/')
    Seq2 = di.loadSequence('P0')
    testSeqs = [Seq2]

    # di = ICVLImporter('../data/ICVL/')
    # Seq2 = di.loadSequence('test_seq_1')
    # testSeqs = [Seq2]

    #di = NYUImporter('../data/NYU/')
    #Seq2 = di.loadSequence('test_1')
    #testSeqs = [Seq2]

    # load trained network
    poseNetParams = ResNetParams(type=1, nChan=1, wIn=128, hIn=128, batchSize=1, numJoints=14, nDims=3)
    poseNetParams.loadFile = "/content/deep-prior-pp/src/eval/MSRA_network_prior_0.pkl"
    comrefNetParams = ScaleNetParams(type=1, nChan=1, wIn=128, hIn=128, batchSize=1, resizeFactor=2, numJoints=1, nDims=3)
    comrefNetParams.loadFile = "/content/deep-prior-pp/src/eval/net_MSRA15_COM_AUGMENT.pkl"
    config = {'fx': 588., 'fy': 587., 'cube': (300, 300, 300)}
    # config = {'fx': 241.42, 'fy': 241.42, 'cube': (250, 250, 250)}
    # config = {'fx': 224.5, 'fy': 230.5, 'cube': (300, 300, 300)}  # Creative Gesture Camera
    rtp = RealtimeHandposePipeline(poseNetParams, config, di, verbose=False, comrefNet=comrefNetParams)

    # use filenames
    filenames = []
    for i in testSeqs[0].data:
        filenames.append(i.fileName)
    dev = FileDevice(filenames, di)

    # use depth camera
    # dev = CreativeCameraDevice(mirror=True)
    rtp.processVideoThreaded(dev)
Example #6
0
xend = xstart + dsize[0]
ystart = int(train_data.shape[3] / 2 - dsize[1] / 2)
yend = ystart + dsize[1]
train_data2 = train_data[:, :, ystart:yend, xstart:xend]

dsize = (int(train_data.shape[2] // 4), int(train_data.shape[3] // 4))
xstart = int(train_data.shape[2] / 2 - dsize[0] / 2)
xend = xstart + dsize[0]
ystart = int(train_data.shape[3] / 2 - dsize[1] / 2)
yend = ystart + dsize[1]
train_data4 = train_data[:, :, ystart:yend, xstart:xend]

comrefNetParams = ScaleNetParams(type=1,
                                 nChan=1,
                                 wIn=96,
                                 hIn=96,
                                 batchSize=1,
                                 resizeFactor=2,
                                 numJoints=1,
                                 nDims=3)
comrefNetParams.loadFile = "../../ptm/net_MSRA15_COM_AUGMENT.pkl"
poseNet = ScaleNet(numpy.random.RandomState(23455), cfgParams=comrefNetParams)
train_data = numpy.ndarray.astype(train_data, dtype='float64')
train_data2 = numpy.ndarray.astype(train_data2, dtype='float64')
train_data4 = numpy.ndarray.astype(train_data4, dtype='float64')
#Seq_all list of sequence data
gt3D = []
for i in xrange(len(Seq_all)):
    gt3D_temp = [
        j.gt3Dorig[di.crop_joint_idx].reshape(1, 3) for j in Seq_all[i].data
    ]
    gt3D.extend(gt3D_temp)