Пример #1
0
    #    continue;        
    if file_count<800: 
        print("\t Processing file " + file)
        # Create the object to access the sample
        smp=GestureSample(os.path.join(data,file))
        # ###############################################
        # USE Ground Truth information to learn the model
        # ###############################################
        # Get the list of actions for this frame
        gesturesList=smp.getGestures()
        frame_num = smp.getNumFrames()
        Feature_Array = np.zeros(shape = (frame_num , (njoints*(njoints-1)/2 + njoints**2)*3),dtype=np.float32)
#        Target = np.zeros( shape=(frame_num, target_category), dtype=np.uint8)
        
        #feature generate
        Skeleton_matrix, valid_skel = Extract_feature_UNnormalized(smp,used_joints, 1, frame_num)
        Feature_Array = Extract_feature_Realtime(Skeleton_matrix, njoints)
        Feature_all.append(Feature_Array)
        
        #target generate
        
        labels = np.zeros(frame_num, np.uint8)
        for row in gesturesList:
            labels[int(row[1])-1:int(row[2])-1] = int(row[0])
        Target_all.append(labels)
        del smp

# save the skeleton file:


import cPickle as pickle
    #    continue;
    time_tic = time.time()
    if not file_count < 650:
        print("\t Processing file " + file)
        # Create the object to access the sample
        smp = GestureSample(os.path.join(data_path, file))
        # ###############################################
        # USE Ground Truth information to learn the model
        # ###############################################
        # Get the list of actions for this frame
        gesturesList = smp.getGestures()
        ###########################################################
        # we check whether it's left dominant or right dominanant
        # if right dominant, we correct them to left dominant
        ##########################################################
        Skeleton_matrix, valid_skel = Extract_feature_UNnormalized(
            smp, used_joints, 1, smp.getNumFrames())

        Feature = Extract_feature_Realtime(Skeleton_matrix, njoints)

        Feature_normalized = normalize(Feature, Mean1, Std1)

        ### Feed into DBN
        shared_x = theano.shared(numpy.asarray(Feature_normalized,
                                               dtype=theano.config.floatX),
                                 borrow=True)
        numpy_rng = numpy.random.RandomState(123)

        ### model 1
        ##########################
        dbn = GRBM_DBN(numpy_rng=numpy_rng,
                       n_ins=528,
Пример #3
0
    #    continue;        
    if file_count<650: 
        print("\t Processing file " + file)
        # Create the object to access the sample
        smp=GestureSample(os.path.join(data,file))
        # ###############################################
        # USE Ground Truth information to learn the model
        # ###############################################
        # Get the list of actions for this frame
        gesturesList=smp.getGestures()

        # Iterate for each action in this sample
        for gesture in gesturesList:
            # Get the gesture ID, and start and end frames for the gesture
            gestureID,startFrame,endFrame=gesture
            Skeleton_matrix, valid_skel = Extract_feature_UNnormalized(smp, used_joints, startFrame, endFrame)           
            # to see we actually detect a skeleton:
            if not valid_skel:
                print "No detected Skeleton: ", gestureID
            else:                            
                ### extract the features according to the CVPR2014 paper
                Feature = Extract_feature_Realtime(Skeleton_matrix, njoints)
                Target = numpy.zeros( shape=(Feature.shape[0], STATE_NO*20+1))
                fr_no =  Feature.shape[0]
                for i in range(STATE_NO):  #HMM states force alignment
                        begin_fr = numpy.round(fr_no* i /STATE_NO) + 1
                        end_fr = numpy.round( fr_no*(i+1) /STATE_NO) 
                        #print "begin: %d, end: %d"%(begin_fr-1, end_fr)
                        seg_length=end_fr-begin_fr + 1
                        targets = numpy.zeros( shape =(STATE_NO*20+1,1))
                        targets[ i + STATE_NO*(gestureID-1)] = 1
Пример #4
0
        gesturesList=smp.getGestures()
        # Iterate for each action in this sample
        # Then we also choose 5 frame before and after the ground true data:
        seg_length = 5
        for gesture in gesturesList:
                # Get the gesture ID, and start and end frames for the gesture
                gestureID,startFrame,endFrame=gesture
                # This part is to extract action data

                Skeleton_matrix = numpy.zeros(shape=(5, len(used_joints)*3))
                HipCentre_matrix = numpy.zeros(shape=(5, 3))
                frame_num = 0 
                
                ## extract first 5 frames
                if startFrame-seg_length > 0:
                    Skeleton_matrix, valid_skel = Extract_feature_UNnormalized(smp, used_joints, startFrame-seg_length+1, startFrame)              
                    if not valid_skel:
                        print "No detected Skeleton: ", gestureID
                    else:
                        Feature = Extract_feature_Realtime(Skeleton_matrix, njoints)
                        begin_frame = count
                        end_frame = count+seg_length-1
                        Feature_all[begin_frame:end_frame,:] = Feature#前5帧的特征矩阵
                        Targets[begin_frame:end_frame, -1] = 1#最后一位是1,
                        count=count+seg_length-1

                ## extract last 5 frames
                if endFrame+seg_length < smp.getNumFrames():
                    Skeleton_matrix, valid_skel = Extract_feature_UNnormalized(smp, used_joints, endFrame, endFrame+seg_length-1)              
                    if not valid_skel:
                        print "No detected Skeleton: ", gestureID
Пример #5
0
def generate_eigenjoint(feature_name='sk_eigenjoint_nor_528',
                        labels_name='labels_raw'):
    # Data folder (Training data)
    print("Extracting the training files")
    data = os.path.join("E:\\program\\Chalearn\\rawdata\\train\\")
    target_dir = 'E:\\program\\Chalearn\\Chalearn_LSTM\\target\\'
    # Get the list of training samples
    samples = os.listdir(data)
    output_dir = 'E:\\program\\Chalearn\\Chalearn_LSTM\\feature\\' + feature_name

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    used_joints = [
        'ElbowLeft', 'WristLeft', 'ShoulderLeft', 'HandLeft', 'ElbowRight',
        'WristRight', 'ShoulderRight', 'HandRight', 'Head', 'Spine',
        'HipCenter'
    ]
    njoints = len(used_joints)

    f = open('SK_normalization.pkl', 'r')
    normal_params = pickle.load(f)
    f.close()
    Mean = normal_params['Mean1']
    Std = normal_params['Std1']

    count = 0
    #    target_category = 21
    Target_all = []
    #Feature_all =  numpy.zeros(shape=(400000, (njoints*(njoints-1)/2 + njoints**2)*3),dtype=numpy.float32)
    for file_count, file in enumerate(samples):
        if int(file[-8:-4]) != 417 and int(file[-8:-4]) != 675:
            print("\t Processing file " + file)
            # Create the object to access the sample
            smp = GestureSample(os.path.join(data, file))
            # ###############################################
            # USE Ground Truth information to learn the model
            # ###############################################
            # Get the list of actions for this frame
            gesturesList = smp.getGestures()
            frame_num = smp.getNumFrames()
            Feature_Array = np.zeros(
                shape=(frame_num,
                       (njoints * (njoints - 1) / 2 + njoints**2) * 3),
                dtype=np.float32)
            #        Target = np.zeros( shape=(frame_num, target_category), dtype=np.uint8)

            #feature generate
            Skeleton_matrix, valid_skel = Extract_feature_UNnormalized(
                smp, used_joints, 1, frame_num)
            Feature_Array = Extract_feature_Realtime(Skeleton_matrix, njoints)

            Feature_Array = normalize(Feature_Array, Mean, Std)
            add_ = Feature_Array[-1].reshape((1, Feature_Array.shape[1]))
            Feature_Array = np.concatenate((Feature_Array, add_), axis=0)

            #save sample sk features
            output_name = '%04d.npy' % count

            count += 1
            np.save(os.path.join(output_dir, output_name), Feature_Array)

            #target generate

            labels = np.zeros(frame_num, np.uint8)
            for row in gesturesList:
                labels[int(row[1]) - 1:int(row[2]) - 1] = int(row[0])
            Target_all.append(labels)
            del smp

    np.save(target_dir + '%s.npy' % labels_name, Target_all)