コード例 #1
0
def preprocess(samples, set_label="training"):
    for file_count, file in enumerate(sort(samples)):
        if (set_label == "training"):
            condition = (file_count < 650)
            if pc == "wudi":
                dest = r"D:\Chalearn2014\Data_processed\train"  # dir to  destination processed data
            elif pc == "wudi_linux":
                dest = "/idiap/temp/dwu/chalearn2014_data/Train_processed"
            elif pc == "lio":
                dest = r"/media/lio/Elements/chalearn/preproc/train"
        else:
            condition = (file_count >= 650)
            if pc == "wudi":
                dest = r"D:\Chalearn2014\Data_processed\valid"  # dir to  destination processed data
            elif pc == "wudi_linux":
                dest = "/idiap/temp/dwu/chalearn2014_data/Valid_processed"
            elif pc == "lio":
                dest = r"/media/lio/Elements/chalearn/preproc/valid"  # dir to  destination processed data

        #set == "training" ? (condition = (file_count<650)) : (condition = (file_count>=650))
        if condition:  #wudi only used first 650 for validation !!! Lio be careful!
            print("\t Processing file " + file)
            start_time = time.time()
            # Create the object to access the sample
            sample = GestureSample(os.path.join(data, file))
            # ###############################################
            # USE Ground Truth information to learn the model
            # ###############################################
            # Get the list of actions for this frame
            gestures = sample.getGestures()
            # Iterate for each action in this sample
            for gesture in gestures:
                skelet, depth, gray, user, c = sample.get_data_wudi(gesture, vid_res, NEUTRUAL_SEG_LENGTH)
                if c: print 'corrupt'; continue

                # preprocess
                # skelet_feature: frames * num_features? here gestures because we need netural frames
                skelet_feature, Targets, c = proc_skelet_wudi(sample, used_joints, gesture, STATE_NO,
                                                              NEUTRUAL_SEG_LENGTH)
                if c: print 'corrupt'; continue
                user_o = user.copy()
                user = proc_user(user)
                skelet, c = proc_skelet(skelet)
                # depth: 2(h&b) * frames * 5 (stacked frames) * vid_shape_hand[0] *vid_shape_hand[1]
                user_new, depth, c = proc_depth_wudi(depth, user, user_o, skelet, NEUTRUAL_SEG_LENGTH)
                if c: print 'corrupt'; continue
                # gray:  2(h&b) * frames * 5 (stacked frames) * vid_shape_hand[0] *vid_shape_hand[1]
                gray, c = proc_gray_wudi(gray, user, skelet, NEUTRUAL_SEG_LENGTH)
                if c: print 'corrupt'; continue

                if show_depth: play_vid_wudi(depth, Targets, wait=1000 / 10, norm=False)
                if show_gray: play_vid_wudi(gray, Targets, wait=1000 / 10, norm=False)
                if show_user: play_vid_wudi(user_new, Targets, wait=1000 / 10, norm=False)
                # user_new = user_new.astype("bool")
                traj2D, traj3D, ori, pheight, hand, center = skelet
                skelet = traj3D, ori, pheight

                assert user.dtype == gray.dtype == depth.dtype == traj3D.dtype == ori.dtype == "uint8"
                assert gray.shape == depth.shape
                if not gray.shape[1] == skelet_feature.shape[0] == Targets.shape[0]:
                    print "too early movement or too late,skip one";
                    continue

                # we don't need user info. anyway
                video = empty((2,) + gray.shape, dtype="uint8")
                video[0], video[1] = gray, depth
                store_preproc_wudi(video, skelet_feature, Targets.argmax(axis=1), skelet, dest)


            end_time = time.time()

            print "Processing one batch requires: %d second\n"% ( end_time - start_time)
            if condition and file_count==(len(samples)-1):
                dump_last_data(video,skelet_feature, Targets.argmax(axis=1), skelet, dest)
                print 'Process',p_i,'finished'

            print "Processing one batch requires: %d second\n"% ( end_time - start_time)         
            if condition and file_count==(len(samples)-1):
                dump_last_data(video,skelet_feature, Targets.argmax(axis=1), skelet, dest)

            # we should add the traning data as well
            if not condition and file_count == 650-1:
                dump_last_data(video,skelet_feature, Targets.argmax(axis=1), skelet, dest)
コード例 #2
0
def preprocess(samples, set_label="training"):
    for file_count, file in enumerate(sort(samples)):
        if (set_label == "training"):
            condition = (file_count < 650)
            if pc == "wudi":
                dest = r"D:\Chalearn2014\Data_processed\train"  # dir to  destination processed data
            elif pc == "wudi_linux":
                dest = "/idiap/temp/dwu/chalearn2014_data/Train_processed"
            elif pc == "lio":
                dest = r"/media/lio/Elements/chalearn/preproc/train"
        else:
            condition = (file_count >= 650)
            if pc == "wudi":
                dest = r"D:\Chalearn2014\Data_processed\valid"  # dir to  destination processed data
            elif pc == "wudi_linux":
                dest = "/idiap/temp/dwu/chalearn2014_data/Valid_processed"
            elif pc == "lio":
                dest = r"/media/lio/Elements/chalearn/preproc/valid"  # dir to  destination processed data

        #set == "training" ? (condition = (file_count<650)) : (condition = (file_count>=650))
        if condition:  #wudi only used first 650 for validation !!! Lio be careful!
            print("\t Processing file " + file)
            start_time = time.time()
            # Create the object to access the sample
            sample = GestureSample(os.path.join(data, file))
            # ###############################################
            # USE Ground Truth information to learn the model
            # ###############################################
            # Get the list of actions for this frame
            gestures = sample.getGestures()
            # Iterate for each action in this sample
            for gesture in gestures:
                skelet, depth, gray, user, c = sample.get_data_wudi(
                    gesture, vid_res, NEUTRUAL_SEG_LENGTH)
                if c:
                    print 'corrupt'
                    continue

                # preprocess
                # skelet_feature: frames * num_features? here gestures because we need netural frames
                skelet_feature, Targets, c = proc_skelet_wudi(
                    sample, used_joints, gesture, STATE_NO,
                    NEUTRUAL_SEG_LENGTH)
                if c:
                    print 'corrupt'
                    continue
                user_o = user.copy()
                user = proc_user(user)
                skelet, c = proc_skelet(skelet)
                # depth: 2(h&b) * frames * 5 (stacked frames) * vid_shape_hand[0] *vid_shape_hand[1]
                user_new, depth, c = proc_depth_wudi(depth, user, user_o,
                                                     skelet,
                                                     NEUTRUAL_SEG_LENGTH)
                if c:
                    print 'corrupt'
                    continue
                # gray:  2(h&b) * frames * 5 (stacked frames) * vid_shape_hand[0] *vid_shape_hand[1]
                gray, c = proc_gray_wudi(gray, user, skelet,
                                         NEUTRUAL_SEG_LENGTH)
                if c:
                    print 'corrupt'
                    continue

                if show_depth:
                    play_vid_wudi(depth, Targets, wait=1000 / 10, norm=False)
                if show_gray:
                    play_vid_wudi(gray, Targets, wait=1000 / 10, norm=False)
                if show_user:
                    play_vid_wudi(user_new,
                                  Targets,
                                  wait=1000 / 10,
                                  norm=False)
                # user_new = user_new.astype("bool")
                traj2D, traj3D, ori, pheight, hand, center = skelet
                skelet = traj3D, ori, pheight

                assert user.dtype == gray.dtype == depth.dtype == traj3D.dtype == ori.dtype == "uint8"
                assert gray.shape == depth.shape
                if not gray.shape[1] == skelet_feature.shape[
                        0] == Targets.shape[0]:
                    print "too early movement or too late,skip one"
                    continue

                # we don't need user info. anyway
                video = empty((2, ) + gray.shape, dtype="uint8")
                video[0], video[1] = gray, depth
                store_preproc_wudi(video, skelet_feature,
                                   Targets.argmax(axis=1), skelet, dest)

            end_time = time.time()

            print "Processing one batch requires: %d second\n" % (end_time -
                                                                  start_time)
            if condition and file_count == (len(samples) - 1):
                dump_last_data(video, skelet_feature, Targets.argmax(axis=1),
                               skelet, dest)
                print 'Process', p_i, 'finished'

            print "Processing one batch requires: %d second\n" % (end_time -
                                                                  start_time)
            if condition and file_count == (len(samples) - 1):
                dump_last_data(video, skelet_feature, Targets.argmax(axis=1),
                               skelet, dest)

            # we should add the traning data as well
            if not condition and file_count == 650 - 1:
                dump_last_data(video, skelet_feature, Targets.argmax(axis=1),
                               skelet, dest)
コード例 #3
0
def preprocess(samples):
    for file_count, file in enumerate(sort(samples)):
        print "This is the %d th file : " % file_count
        ##totally 400 training samples, use 360 for training, 40 for validating
        if file_count < 360:
            if pc == "linux_fancy":
                dest = "/home/fancywu/Desktop/GestureRecognition/Train_files/Preprocessed_files/train"
            elif pc == "win_fancy":
                dest = ""
            print "Processing training file ", file

        elif file_count >= 360:
            if pc == "linux_fancy":
                dest = "/home/fancywu/Desktop/GestureRecognition/Train_files/Preprocessed_files/valid"
            elif pc == "win_fancy":
                dest = ""
            print "Processing validating file ", file

        start_time = time()
        ##Create the object to access the sample
        sample = GestureSample(os.path.join(data, file))
        #        print(os.path.join(data, file))

        ##USE Ground Truth information to learn the model
        ##Get the list of gesture for this sample
        gestures = sample.getGestures()
        print "len gestures: ", len(gestures)
        # preprocess each gesture
        for gesture in gestures:
            skelet, depth, gray, user, c = sample.get_data_wudi(
                gesture, vid_res, NEUTRUAL_SEG_LENGTH)
            if c:
                print '1: corrupt'
                continue

            skelet_feature, Targets, c = proc_skelet_wudi(
                sample, used_joints, gesture, HIDDEN_STATE,
                NEUTRUAL_SEG_LENGTH)
            if c:
                print '2: corrupt'
                continue

            user_o = user.copy()
            user = proc_user(user)
            skelet, c = proc_skelet(skelet)
            if c: print '3: corrupt'

            user_new, depth, c = proc_depth_wudi(depth, user, user_o, skelet,
                                                 NEUTRUAL_SEG_LENGTH)
            if c:
                print '4: corrupt'
                continue

            gray, c = proc_gray_wudi(gray, user, skelet, NEUTRUAL_SEG_LENGTH)
            if c:
                print '5: corrupt'
                continue

            if show_depth:
                play_vid_wudi(depth, Targets, wait=1000 / 10, norm=False)
            if show_gray:
                play_vid_wudi(gray, Targets, wait=1000 / 10, norm=False)
            if show_user:
                play_vid_wudi(user_new, Targets, wait=1000 / 10, norm=False)

            traj2D, traj3D, ori, pheight, hand, center = skelet
            skelet = traj3D, ori, pheight

            assert user.dtype == gray.dtype == depth.dtype == traj3D.dtype == ori.dtype == "uint8"
            assert gray.shape == depth.shape

            if not gray.shape[1] == skelet_feature.shape[0] == Targets.shape[0]:
                print "too early or too late movement, skip one"
                continue

            ##we just use gray and depth videos for training, dont need user
            video = empty((2, ) + gray.shape, dtype="uint8")
            video[0], video[1] = gray, depth
            store_preproc_video_skelet_data(video, skelet_feature,
                                            Targets.argmax(axis=1), skelet,
                                            dest)
            print "finished"

        print "Processing one batch requires : %d seconds\n" % (time() -
                                                                start_time)
        if (file_count == len(samples) - 1) or (file_count == 360 - 1):
            store_preproc_video_skelet_data(video,
                                            skelet_feature,
                                            Targets.argmax(axis=1),
                                            skelet,
                                            dest,
                                            last_data=True)

        print "Processing %d sample requies: %3.3f mins" % (
            file_count + 1, (time() - prog_start_time) / 60.)