예제 #1
0
 def __init__(self,
              classifier=LinearSVC(random_state=0),
              fl_filters=None,
              results_file=None,
              no_clusters=100,
              no_samples=800):
     self.no_clusters = no_clusters
     self.no_samples = no_samples
     self.OsName = None
     self.scaler_type = ScalerType.StandardScaler
     self.scaler_type_cluster = ScalerType.StandardScaler
     self.test_name = None
     self.base_path = None
     self.base_path2 = None
     self.base_path3 = None
     self.label_path = None
     self.train_path = None
     self.test_path = None
     self.datasets = None
     self.folders = None
     self.parameters = None
     self.aggregateVideoFeatures = True
     self.classifier_helper = ClassifierHelpers(classifier=classifier, no_clusters=no_clusters)
     self.file_helper = FileHelpers(fl_filters)
     self.mail_helper = MailHelpers()
     self.descr_files = None
     self.trainVideoCount = 0
     self.train_labels = np.array([])
     self.groups = np.array([])
     self.name_dict = {}
     self.number_dict = {}
     self.count_class = 0
     self.descriptor_list = []
     self.results_file = results_file
예제 #2
0
    def create(self, path: Path, source_id: int):

        if path.suffix != '.zip':
            logging.critical('Path provided does not point to a zip file')
            return None

        asset = self.filter_by(filename=str(path.name)).first()  # check if asset already exists in db

        if asset is None:
            sku = FileHelpers.get_sku(path)
            product_name = FileHelpers.parse_product_name(path)
            path_raw = str(path)
            filename = path.name
            size_raw = FileHelpers.get_file_size(path)
            installed_raw = False  # todo check if asset is already installed
            imported_raw = False

            asset = Asset(source_id=source_id,
                          sku=sku,
                          product_name=product_name,
                          path_raw=path_raw,
                          filename=filename,
                          size_raw=size_raw,
                          installed_raw=installed_raw,
                          imported_raw=imported_raw)

            session: Session = sessionmaker(bind=self.engine, expire_on_commit=False)()
            SQLHelpers.commit(session, asset)

        return asset
예제 #3
0
    def _update_source_details(self):
        source_zip_count = self.tree.zip_count
        source_folder_size = FileHelpers.format_bytes(self.tree.size)

        zips_text = 'Zips: ' + str(source_zip_count)
        size_text = 'Size: ' + str(source_folder_size)

        self.count_label.SetLabel(zips_text)
        self.size_label.SetLabel(size_text)
예제 #4
0
    def populate(self, current_node, current_data, source_id: int):
        node_list = []
        session: Session = sessionmaker(bind=self.db.engine)()

        path = Path(current_data['path'])
        sub_list = [x for x in path.iterdir()]

        for sub_path in sub_list:

            if sub_path.is_dir():
                folder: Folder = self.db.folders.create(sub_path, source_id)
                next_data = {
                    'id': folder.id,
                    'type': 'folder',
                    'path': folder.path
                }

                next_node = self.AppendItem(current_node,
                                            sub_path.name,
                                            data=next_data,
                                            image=0)
                node_list.append(next_node)

                temp_list = self.populate(next_node, next_data, source_id)
                node_list += temp_list

            elif sub_path.suffix == '.zip':
                self.zip_count += 1
                self.size += FileHelpers.get_file_size(sub_path)

                asset: Asset = self.db.assets.create(sub_path, source_id)
                next_data = {'id': asset.id, 'type': 'asset', 'path': sub_path}

                next_node = self.AppendItem(current_node,
                                            asset.product_name,
                                            data=next_data,
                                            image=1)
                node_list.append(next_node)

        self.SortChildren(current_node)
        return node_list
예제 #5
0
    def install_asset(self, asset: Asset, library_path: Path):
        logging.debug('Current library_path: ' + str(library_path))
        logging.info('Installing: ' + asset.product_name)

        with ZipFile(asset.path) as file:

            for info in file.infolist():
                if info.is_dir(
                ) or 'Manifest.dsx' in info.filename or 'Supplement.dsx' in info.filename:
                    continue

                source = file.open(info.filename)
                local_path = Path(FileHelpers.clean_path(info.filename))
                absolute_path: Path = library_path / local_path

                root = Path(str(local_path)[:23])
                match = Path('Runtime/Support/')

                if root == match and absolute_path.suffix == '.dsx':
                    meta = str(absolute_path.stem)

                try:
                    if not absolute_path.parent.exists():
                        absolute_path.parent.mkdir(parents=True)
                    with source, open(str(absolute_path), 'wb') as out:
                        copyfileobj(source, out)
                    self.db.file_paths.create(asset.id, str(local_path))
                except OSError as e:
                    logging.error(
                        'Error occurred during zip member extraction')
                    logging.error(e)

        self.db.assets.update(asset.id,
                              installed_raw=True,
                              meta=meta,
                              img_path_raw=img_path)

        logging.info('Finished Install')
예제 #6
0
animation_data_path = "E:/Master/Sorted Mocap/WALKING/walking.npz"
STACKCOUNT = 15
TARGET_FPS = 20
training_prep = DataPreprocessor.ParalellMLPProcessor(STACKCOUNT,
                                                      1.0 / TARGET_FPS, 5)

training_prep.load_np(animation_data_path)

loss_test_wrapper = ModelWrappers.ae_perceptual_loss(training_prep)
loss_test_wrapper.train(0, 600, 0.01)

source_path = "../predictions_print_tests_2"

list_file_name = "animation_list.txt"
animations = fh.get_lines(source_path + "/" + list_file_name)

normal_path = "../test_out/normal"
shuffled_path = "../test_out/shuffled"
rotated_path = "../test_out/rotated"
shuffled_and_rotated_path = "../test_out/rotated&shuffled"

for animation in animations:
    np_file = np.load(animation)

    generated_data = np_file['generated_data']
    reference_data = np_file['reference_data']

    shuffled_reference, shuffled_generated = shuffle(reference_data,
                                                     generated_data,
                                                     random_state=42)
        if idx == 0:
            pass
        else:
            colorclip = ColorClip(size=(clip1.w, clip1.h), color=[1, 1, 1], duration=1)
            clip = concatenate_videoclips([colorclip, clip])
        clips.append(clip)


    final_clip = concatenate_videoclips(clips)    # Overlay the text clip on the first video clip
    final_clip.write_videofile(target, codec='libx264')


file_path = "E:\\Systemordner\\Dokumente\\Pycharm\\Master\\sparse_body_pose_prediction\\moglow_dropout\\unity_motion_export\\UNTITYEXPORT\\"
image_folder = ""
numbers = [45, 720, 734,  # bwd
           338, 1148, 2112,  # circle
           650, 763, 2308,  # diagonal
           976, 1514, 2016,  # fwd
           12, 13, 772  # sideways
           ]
methods = ["RNN2", "REF", "GLOW", "IK"]

for number in numbers:
    file_names = [file_path + "WALKING_" + method + "_" + str(number) + "_trained_on_WALKING.mp4" for method in methods]
    out_file_path = "shuffled_videos\\"
    FileHelpers.create_folder(out_file_path)
    concatenate_animations([file_names[0],file_names[1],file_names[2],file_names[3]], image_folder, out_file_path + str(number) + "_A.mp4")
    concatenate_animations([file_names[3],file_names[2],file_names[1],file_names[0]], image_folder, out_file_path + str(number) + "_B.mp4")
    concatenate_animations([file_names[2],file_names[3],file_names[0],file_names[1]], image_folder, out_file_path + str(number) + "_C.mp4")
    concatenate_animations([file_names[1],file_names[0],file_names[3],file_names[2]], image_folder, out_file_path + str(number) + "_D.mp4")
예제 #8
0
def test_folder_func(folder_name, FF_BATCH_SIZE):
    STACKCOUNT = 15
    TARGET_FPS = 20
    take_last = lambda x: og_folder + "/" + x.split('/')[-1].split('.')[
        0] + "_trained_on_" + folder_name.split("/")[-2]

    # eval_files = [  "E:/Master/Converted Mocap/Eyes_Japan_Dataset/hamada/greeting-01-hello-hamada_poses.npz",
    #                 "E:/Master/Converted Mocap/Eyes_Japan_Dataset/kudo/jump-12-boxer step-kudo_poses.npz",
    #                 "E:/Master/Converted Mocap/KIT/576/MarcusS_AdrianM11_poses.npz",
    #                 "E:/Master/Converted Mocap/KIT/513/balance_on_beam06_poses.npz",
    #                 "E:/Master/Converted Mocap/Eyes_Japan_Dataset/hamada/gesture_etc-14-apologize-hamada_poses.npz",
    #                 "E:/Master/Converted Mocap/Eyes_Japan_Dataset/kanno/walk-01-normal-kanno_poses.npz",
    #                 "E:/Master/Converted Mocap/Eyes_Japan_Dataset/takiguchi/pose-10-recall blackmagic-takiguchi_poses.npz",
    #                 "E:/Master/Converted Mocap/TotalCapture/s1/freestyle2_poses.npz",
    #                 "E:/Master/Converted Mocap/Eyes_Japan_Dataset/hamada/accident-02-dodge fast-hamada_poses.npz",
    #                 "E:/Master/Converted Mocap/BMLhandball/S07_Expert/Trial_upper_left_right_003_poses.npz"]

    # eval_files_old = [ "E:/Master/Sorted Mocap/WALKING/WALKING_2265.npz",
    #                "E:/Master/Sorted Mocap/BASKETBALL/BASKETBALL_10.npz",
    #                "E:/Master/Sorted Mocap/BOXING/BOXING_64.npz",
    #                "E:/Master/Sorted Mocap/THROWING/THROWING_58.npz",
    #                "E:/Master/Sorted Mocap/INTERACTION/INTERACTION_1534.npz"
    #                ]
    PATH_PREFIX = "E:/Master/Sorted Mocap/WALKING/WALKING_"
    numbers = [
        45,
        720,
        734,  #bwd
        338,
        1148,
        2112,  #circle
        650,
        763,
        2308,  #diagonal
        976,
        1514,
        2016,  #fwd
        12,
        13,
        772  #sideways
    ]
    # numbers = []

    eval_files = [PATH_PREFIX + str(elem) + ".npz" for elem in numbers]
    # eval_files = [ "E:/Master/Sorted Mocap/WALKING/WALKING_42.npz",
    #                "E:/Master/Sorted Mocap/WALKING/WALKING_360.npz",
    #                "E:/Master/Sorted Mocap/WALKING/WALKING_420.npz",
    #                "E:/Master/Sorted Mocap/WALKING/WALKING_1337.npz",
    #                "E:/Master/Sorted Mocap/WALKING/WALKING_2265.npz",
    #                ]

    training_prep = DataPreprocessor.ParalellMLPProcessor(
        STACKCOUNT, 1.0 / TARGET_FPS, 5, use_weighted_sampling=True)

    training_prep.append_folder(folder_name,
                                eval_files,
                                mirror=True,
                                reverse=True)
    training_prep.finalize()
    # training_prep.save(folder_name + "walking_augmented_5.npz")
    # training_prep.load_np(folder_name + "walking_augmented_3.npz")
    # from Helpers import StatsPrinter
    # StatsPrinter.print_dirs(training_prep)

    # training_prep.load_np(folder_name + "walking_augmented_2.npz")
    # training_prep.load_np(folder_name + "walking_2.npz")

    # training_prep.load_np(folder_name + "combined.npz")
    # training_prep.append_subfolders("E:/Master/Converted Mocap/BMLhandball", eval_files)
    # training_prep.append_subfolders("E:/Master/Converted Mocap/BMLmovi", eval_files)
    # training_prep.append_subfolders("E:/Master/Converted Mocap/DFaust_67", eval_files)
    # training_prep.append_subfolders("E:/Master/Converted Mocap/EKUT", eval_files)
    # training_prep.append_subfolders("E:/Master/Converted Mocap/Eyes_Japan_Dataset", eval_files)
    # training_prep.append_subfolders("E:/Master/Converted Mocap/HumanEva", eval_files)
    # training_prep.append_subfolders("E:/Master/Converted Mocap/Kit", eval_files)
    # training_prep.append_subfolders("E:/Master/Converted Mocap/MPI_HDM05", eval_files)
    # training_prep.append_subfolders("E:/Master/Converted Moca/MPI_Limits", eval_files)
    # training_prep.append_subfolders("E:/Master/Converted Mocap/MPI_mosh", eval_files)
    # training_prep.append_subfolders("E:/Master/Converted Mocap/SFU", eval_files)
    # training_prep.append_subfolders("E:/Master/Converted Mocap/TotalCapture", eval_files)

    # training_prep.append_file("E:/Master/Sorted Mocap/WALKING/WALKING_2265.npz")
    # training_prep.finalize()
    # for idx in range(1,eval_files.__len__()):
    #     training_prep.append_file(eval_files[idx])
    # training_prep.append_file(eval_files[1])

    # loss_test_wrapper = ModelWrappers.ae_perceptual_loss(training_prep)
    # loss_test_wrapper.train(400, 600, 0.01)
    # gan_wrapper = None

    timings_file = "timings.txt"
    FileHelpers.clear_file(timings_file)

    # gan_wrapper = ModelWrappers.gan_wrapper(training_prep)
    # gan_wrapper.train(1, FF_BATCH_SIZE, 0.0001)

    # GLOW
    # glow_wrapper = ModelWrappers.glow_wrapper(training_prep)
    #
    # last_time = time.perf_counter()
    #
    # glow_wrapper.train(60, 180)
    #
    # comp_time = time.perf_counter()
    # total_time = comp_time - last_time
    # time_per_epoch = total_time / 60
    # FileHelpers.append_line(timings_file, "GLOW Training, time per epoch:" + str(time_per_epoch) + "\t timing:" + str(total_time))
    #
    # for file in eval_files:
    #     eval_prep = DataPreprocessor.ParalellMLPProcessor(STACKCOUNT, 1.0 / TARGET_FPS, 5)
    #     eval_prep.append_file(file)
    #     eval_prep.finalize()
    #
    #     glow_wrapper.predict(eval_prep)
    #
    #     total_time = glow_wrapper.last_inference_time
    #     time_per_frame = total_time / glow_wrapper.final_outputs.shape[0]
    #     FileHelpers.append_line(timings_file, "GLOW, file: "+ str(file) + "\t time per frame: " + str(time_per_frame)+ "\t timing:" +str(total_time) + "\t length:" + str(glow_wrapper.final_outputs.shape[0]))
    #
    #     glow_wrapper.save_prediction(take_last(file) + "_GLOW", gan_wrapper)
    #
    # torch.cuda.empty_cache()

    # FF

    # ff_wrapper = ModelWrappers.ff_wrapper(training_prep)
    #
    # last_time = time.perf_counter()
    #
    # ff_wrapper.train(120, FF_BATCH_SIZE, 0.0001)
    #
    # comp_time = time.perf_counter()
    # total_time = comp_time - last_time
    # time_per_epoch = total_time / 120
    # FileHelpers.append_line(timings_file, "FF Training, time per epoch:" + str(time_per_epoch) + "\t timing:" + str(total_time))
    #
    # for file in eval_files:
    #     eval_prep = DataPreprocessor.ParalellMLPProcessor(STACKCOUNT, 1.0 / TARGET_FPS, 5)
    #     eval_prep.append_file(file)
    #     eval_prep.finalize()
    #
    #     ff_wrapper.predict(eval_prep)
    #
    #     total_time = ff_wrapper.last_inference_time
    #     time_per_frame = total_time / ff_wrapper.final_outputs.shape[0]
    #     FileHelpers.append_line(timings_file, "FF, file: "+ str(file) + "\t time per frame: " + str(time_per_frame)+ "\t timing:" +str(total_time) + "\t length:" + str(ff_wrapper.final_outputs.shape[0]))
    #
    #     ff_wrapper.save_prediction(take_last(file) + "_FF", None)
    #
    # torch.cuda.empty_cache()
    #
    #rnn2
    rnn_wrapper = ModelWrappers.rnn_wrapper_2(training_prep)

    last_time = time.perf_counter()

    # rnn_wrapper.train(80, FF_BATCH_SIZE, 0.0001)
    rnn_wrapper.load_model('rnn_test_save')

    comp_time = time.perf_counter()
    total_time = comp_time - last_time
    time_per_epoch = total_time / 120
    FileHelpers.append_line(
        timings_file, "RNN Training, time per epoch:" + str(time_per_epoch) +
        "\t timing:" + str(total_time))

    for file in eval_files:
        eval_prep = DataPreprocessor.ParalellMLPProcessor(
            STACKCOUNT, 1.0 / TARGET_FPS, 5)
        eval_prep.append_file(file)
        eval_prep.finalize()

        rnn_wrapper.predict(eval_prep)

        total_time = rnn_wrapper.last_inference_time
        time_per_frame = total_time / rnn_wrapper.final_outputs.shape[0]
        FileHelpers.append_line(
            timings_file, "RNN, file: " + str(file) + "\t time per frame: " +
            str(time_per_frame) + "\t timing:" + str(total_time) +
            "\t length:" + str(rnn_wrapper.final_outputs.shape[0]))

        rnn_wrapper.save_prediction(take_last(file) + "_RNN2", None)
예제 #9
0
from Helpers import DataPreprocessor
from Helpers import ModelWrappers
from Helpers import Models
import torch
import sys
import time

from Helpers import FileHelpers

og_folder = "TEMP_REMOVE"

FileHelpers.create_folder(og_folder)
FileHelpers.create_folder(og_folder + "/unity_motion_export")
FileHelpers.create_folder(og_folder + "/stats")
FileHelpers.create_folder(og_folder + "/videos_out")


def test_folder_func(folder_name, FF_BATCH_SIZE):
    STACKCOUNT = 15
    TARGET_FPS = 20
    take_last = lambda x: og_folder + "/" + x.split('/')[-1].split('.')[
        0] + "_trained_on_" + folder_name.split("/")[-2]

    # eval_files = [  "E:/Master/Converted Mocap/Eyes_Japan_Dataset/hamada/greeting-01-hello-hamada_poses.npz",
    #                 "E:/Master/Converted Mocap/Eyes_Japan_Dataset/kudo/jump-12-boxer step-kudo_poses.npz",
    #                 "E:/Master/Converted Mocap/KIT/576/MarcusS_AdrianM11_poses.npz",
    #                 "E:/Master/Converted Mocap/KIT/513/balance_on_beam06_poses.npz",
    #                 "E:/Master/Converted Mocap/Eyes_Japan_Dataset/hamada/gesture_etc-14-apologize-hamada_poses.npz",
    #                 "E:/Master/Converted Mocap/Eyes_Japan_Dataset/kanno/walk-01-normal-kanno_poses.npz",
    #                 "E:/Master/Converted Mocap/Eyes_Japan_Dataset/takiguchi/pose-10-recall blackmagic-takiguchi_poses.npz",
    #                 "E:/Master/Converted Mocap/TotalCapture/s1/freestyle2_poses.npz",
예제 #10
0
class Classifier:
    def __init__(self,
                 classifier=LinearSVC(random_state=0),
                 fl_filters=None,
                 results_file=None,
                 no_clusters=100,
                 no_samples=800):
        self.no_clusters = no_clusters
        self.no_samples = no_samples
        self.OsName = None
        self.scaler_type = ScalerType.StandardScaler
        self.scaler_type_cluster = ScalerType.StandardScaler
        self.test_name = None
        self.base_path = None
        self.base_path2 = None
        self.base_path3 = None
        self.label_path = None
        self.train_path = None
        self.test_path = None
        self.datasets = None
        self.folders = None
        self.parameters = None
        self.aggregateVideoFeatures = True
        self.classifier_helper = ClassifierHelpers(classifier=classifier, no_clusters=no_clusters)
        self.file_helper = FileHelpers(fl_filters)
        self.mail_helper = MailHelpers()
        self.descr_files = None
        self.trainVideoCount = 0
        self.train_labels = np.array([])
        self.groups = np.array([])
        self.name_dict = {}
        self.number_dict = {}
        self.count_class = 0
        self.descriptor_list = []
        self.results_file = results_file

    def trainModelFV_LOOCV_Fusion(self, extension='*.*'):
        """
        This method contains the entire module
        required for training the Bag of Poses model
        Use of helper functions will be extensive.
        """
        self.name_dict, self.number_dict, self.count_class = self.file_helper.getLabelsFromFile(self.label_path)

        # read file. prepare file lists.
        self.files1, self.trainFilesCount1 = self.file_helper.getFilesFromDirectory(self.base_path,
                                                                                    self.datasets,
                                                                                    extension)

        self.files2, self.trainFilesCount2 = self.file_helper.getFilesFromDirectory(self.base_path2,
                                                                                    self.datasets,
                                                                                    extension)

        self.parameters += 'Classifier Parameters\n'
        self.parameters += '%s' % self.classifier_helper.clf

        features_nd1 = np.asarray(self.files1)
        features_nd2 = np.asarray(self.files2)

        features_nd1.sort(axis=0)
        features_nd2.sort(axis=0)

        loo = LeaveOneOut()
        predictions = []
        pre = []
        lab = []
        hits = 0
        c = 0
        for train, test in loo.split(features_nd1):
            feature_test_file1 = str(features_nd1[test][0][0])
            feature_test_file2 = str(features_nd2[test][0][0])

            class_name_test = feature_test_file1.split(os.sep)[-2]
            c += 1

            currenInvDate = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
            print('Step: %i/%i - %s\n%s\n%s' % (c, features_nd1.shape[0], currenInvDate,
                                                feature_test_file1, feature_test_file2))
            if c == 1 or c % 25 == 0:
                self.mail_helper.sendMail("Progress: %s - %s" % (self.test_name, self.OsName),
                                          "Samples processed: %i" % c)

            self.descriptor_list1 = []
            self.descriptor_list2 = []
            self.train_labels = []
            for feature in features_nd1[train]:
                feature = feature[0]
                label_number = self.number_dict[feature.split(os.sep)[-2]]
                self.train_labels = np.append(self.train_labels, label_number)
                des1 = self.file_helper.formatFeatures(feature)
                self.descriptor_list1.append(des1)

            for feature in features_nd2[train]:
                feature = feature[0]
                des2 = self.file_helper.formatFeatures(feature)
                self.descriptor_list2.append(des2)

            # format data as nd array
            ft1 = self.classifier_helper.formatND(self.descriptor_list1)
            ft2 = self.classifier_helper.formatND(self.descriptor_list2)

            gmm1 = GMM(n_components=self.no_clusters, covariance_type='diag', verbose=0)
            gmm1.fit(ft1)

            gmm2 = GMM(n_components=self.no_clusters, covariance_type='diag', verbose=0)
            gmm2.fit(ft2)

            fv_dim1 = self.no_clusters + 2 * self.no_clusters * ft1.shape[1]
            fv_dim2 = self.no_clusters + 2 * self.no_clusters * ft2.shape[1]
            print(fv_dim1, fv_dim2)
            n_videos = train.shape[0]
            features1 = np.array([np.zeros(fv_dim1) for i in range(n_videos)])
            features2 = np.array([np.zeros(fv_dim2) for i in range(n_videos)])
            count1 = 0
            count2 = 0
            for i in range(n_videos):
                len_video1 = len(self.descriptor_list1[i])
                fv1 = fisher_vector(ft1[count1:count1 + len_video1], gmm1)
                features1[i] = fv1
                count1 += len_video1

                len_video2 = len(self.descriptor_list2[i])
                fv2 = fisher_vector(ft2[count2:count2 + len_video2], gmm2)
                features2[i] = fv2
                count2 += len_video2

            print(features1.shape)
            print('Data normalization. 1')
            scaler1 = StandardScaler()
            # train normalization
            features1 = scaler1.fit_transform(features1)
            features1 = power_normalize(features1, 0.5)
            features1 = L2_normalize(features1)

            print(features2.shape)
            print('Data normalization. 2')
            scaler2 = StandardScaler()
            # train normalization
            features2 = scaler2.fit_transform(features2)
            features2 = power_normalize(features2, 0.5)
            features2 = L2_normalize(features2)

            # real label
            lab.extend([self.number_dict[feature_test_file1.split(os.sep)[-2]]])

            # test features 1
            feature_test1 = self.file_helper.formatFeatures(feature_test_file1)
            test_fv1 = fisher_vector(feature_test1, gmm1)
            # train normalization
            test_fv1 = test_fv1.reshape(1, -1)
            test_fv1 = scaler1.transform(test_fv1)
            test_fv1 = power_normalize(test_fv1, 0.5)
            test_fv1 = L2_normalize(test_fv1)

            # test features 2
            feature_test2 = self.file_helper.formatFeatures(feature_test_file2)
            test_fv2 = fisher_vector(feature_test2, gmm2)
            # train normalization
            test_fv2 = test_fv2.reshape(1, -1)
            test_fv2 = scaler2.transform(test_fv2)
            test_fv2 = power_normalize(test_fv2, 0.5)
            test_fv2 = L2_normalize(test_fv2)

            ## concatenate two fv test
            feature_test = np.concatenate((test_fv1, test_fv2), axis=1).reshape(1, -1)

            ## concatenate two fv train
            feature_train = np.concatenate((features1, features2), axis=1)

            # train classifiers
            self.classifier_helper.clf.fit(feature_train, self.train_labels)
            cl = int(self.classifier_helper.clf.predict(feature_test)[0])
            class_name_predict = self.name_dict[str(cl)]
            if class_name_test == class_name_predict:
                hits += 1

            msg_progress = 'Hits: %i/%i  -  Accuracy: %.4f\n\n' % (hits, c, hits / c)
            print(msg_progress)
            if c % 25 == 0:
                self.mail_helper.sendMail("Progress: %s - %s" % (self.test_name, self.OsName), msg_progress)

            # predicted label
            pre.extend([cl])
            predictions.append({
                'image1': feature_test_file1,
                'image2': feature_test_file2,
                'class': cl,
                'object_name': self.name_dict[str(cl)]
            })

        self.saveResults(predictions, pre, lab, features_nd1.shape[0])

    def trainModelFV_LOOCV_Classifiers(self, extension='*.txt'):
        """
        This method contains the entire module
        required for training the Bag of Poses model
        Use of helper functions will be extensive.
        """

        # Here can insert more than one classifier to perform comparison
        names = ["Linear SVM"]
        classifiers = [SVC(kernel='linear')]

        self.name_dict, self.number_dict, self.count_class = self.file_helper.getLabelsFromFile(self.label_path)

        # read file. prepare file lists.
        self.files, self.trainFilesCount = self.file_helper.getFilesFromDirectory(self.base_path,
                                                                                  self.datasets,
                                                                                  extension)

        self.parameters += 'Classifier Parameters\n'
        self.parameters += '%s' % self.classifier_helper.clf

        features_nd = np.asarray(self.files)
        loo = LeaveOneOut()
        predictions = {}
        p = {}
        l = []
        hits = {}
        for name in names:
            predictions[name] = []
            p[name] = []
            hits[name] = 0

        c = 0
        for train, test in loo.split(features_nd):
            feature_test_file = str(features_nd[test][0][0])
            class_name_test = feature_test_file.split(os.sep)[-2]
            c += 1
            currenInvDate = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
            print('Step: %i/%i - %s - %s' % (c, features_nd.shape[0], currenInvDate, feature_test_file))

            # Send email with partial progress
            if c == 1 or c % 25 == 0:
                self.mail_helper.sendMail("Progress: %s - %s" % (self.test_name, self.OsName),
                                          "Samples processed: %i" % c)

            self.descriptor_list = []
            self.train_labels = []
            for feature in features_nd[train]:
                feature = feature[0]
                label_number = self.number_dict[feature.split(os.sep)[-2]]
                self.train_labels = np.append(self.train_labels, label_number)
                des = self.file_helper.formatFeatures(feature)
                self.descriptor_list.append(des)

            # format data as nd array
            self.classifier_helper.formatND(self.descriptor_list)

            # Build Gaussian Mixture Model (GMM) to develop poses vocabulary
            gmm = GMM(n_components=self.no_clusters, covariance_type='diag')
            gmm.fit(self.classifier_helper.descriptor_vstack)

            # Compute dimensions of Fisher Vector ( K*(2D+1)
            fv_dim = self.no_clusters + 2 * self.no_clusters * self.classifier_helper.descriptor_vstack.shape[1]
            print(fv_dim)
            n_videos = train.shape[0]
            features = np.array([np.zeros(fv_dim) for i in range(n_videos)])
            count = 0
            for i in range(n_videos):
                len_video = len(self.descriptor_list[i])
                fv = fisher_vector(self.classifier_helper.descriptor_vstack[count:count + len_video], gmm)
                features[i] = fv
                count += len_video

            print(features.shape)
            print('Data normalization.')
            scaler = StandardScaler()
            print('DN here')
            # train normalization
            features = scaler.fit_transform(features)
            features = power_normalize(features, 0.5)
            features = L2_normalize(features)

            # real label
            l.extend([self.number_dict[feature_test_file.split(os.sep)[-2]]])

            # test features
            feature_test = self.file_helper.formatFeatures(feature_test_file)
            test_fv = fisher_vector(feature_test, gmm)
            # test normalization
            test_fv = test_fv.reshape(1, -1)
            test_fv = scaler.transform(test_fv)
            test_fv = power_normalize(test_fv, 0.5)
            test_fv = L2_normalize(test_fv)

            # train classifiers
            for name, clf in zip(names, classifiers):
                print(name)
                clf.fit(features, self.train_labels)
                cl = int(clf.predict(test_fv)[0])
                class_name_predict = self.name_dict[str(cl)]
                if class_name_test == class_name_predict:
                    hits[name] += 1

                # predicted label
                p[name].extend([cl])
                predictions[name].append({
                    'image': feature_test_file,
                    'class': cl,
                    'object_name': self.name_dict[str(cl)]
                })
            msg_progress = ''
            for name in names:
                msg_progress += 'Classifier: %s - Hits:%i/%i - Accuracy: %.4f\n' % (
                    name.ljust(20), hits[name], c, hits[name] / c)

            print(msg_progress)
            print('\n\n')
            if c == 1 or c % 25 == 0:
                self.mail_helper.sendMail("Progress: %s - %s" % (self.test_name, self.OsName), msg_progress)

        for name in names:
            print(name)
            self.saveResults(predictions[name], p[name], l, features_nd.shape[0], classifier_name=name)

    def saveResults(self, predictions_video, p_video, l_video, videosCount, predictions_clip=None,
                    p_clip=None, l_clip=None, clipsCount=0, classifier_name=None):
        class_names = []
        for key in self.name_dict: class_names.extend([self.name_dict[key]])
        class_names = sorted(class_names)
        np.set_printoptions(precision=2)
        files = []

        currenInvDate = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
        if classifier_name:
            baseResultPath = 'results%s%s-%s-%s%s' % (os.sep, currenInvDate, self.test_name, classifier_name, os.sep)
        else:
            baseResultPath = 'results%s%s-%s%s' % (os.sep, currenInvDate, self.test_name, os.sep)
        if not os.path.exists(os.path.dirname(baseResultPath)):
            try:
                os.makedirs(os.path.dirname(baseResultPath))
            except OSError as exc:  # Guard against race condition
                if exc.errno != errno.EEXIST:
                    raise

        # Saving results clip file
        if predictions_clip is not None:
            resultFileName = baseResultPath + 'results_clip.txt'
            keys = predictions_clip[0].keys()
            with open(resultFileName, 'w', newline='') as f:  # Just use 'w' mode in 3.x
                w = csv.DictWriter(f, keys)
                w.writeheader()
                for data in predictions_clip:
                    w.writerow(data)

        if p_clip is not None:
            # Saving confunsion matrix file clip
            cnf_matrix_clip = confusion_matrix(l_clip, p_clip)
            cnfFileNameClip = baseResultPath + 'cnf_matrix_clip.txt'
            np.savetxt(cnfFileNameClip, cnf_matrix_clip, delimiter=",", fmt='%1.3f')

            # Computing Clip Accuracy
            ac1_clip = accuracy_score(l_clip, p_clip)
            ac2_clip = accuracy_score(l_clip, p_clip, normalize=False)
            print("Accuracy Clip = " + str(ac1_clip))
            print("Hits Clip = " + str(ac2_clip) + "/" + str(clipsCount))

            # Plot non-normalized confusion matrix clip
            plt.figure()
            f = baseResultPath + 'ConfusionNonNormalizedClip.png'
            files.append(f)
            self.classifier_helper.plot_confusion_matrix(cnf_matrix_clip, classes=class_names,
                                                         title='Confusion matrix, without normalization', show=False,
                                                         fileName=f)

            # Plot normalized confusion matrix clip
            plt.figure()
            f = baseResultPath + 'ConfusionNormalizedClip.png'
            files.append(f)
            self.classifier_helper.plot_confusion_matrix(cnf_matrix_clip, classes=class_names, normalize=True,
                                                         title='Normalized confusion matrix', show=False, fileName=f)

        # Saving results video file
        if predictions_video is not None:
            resultFileName = baseResultPath + 'results_video.txt'
            keys = predictions_video[0].keys()
            with open(resultFileName, 'w', newline='') as f:  # Just use 'w' mode in 3.x
                w = csv.DictWriter(f, keys)
                w.writeheader()
                for data in predictions_video:
                    w.writerow(data)

        if p_video is not None:
            # Saving confunsion matrix file vieo
            cnf_matrix_video = confusion_matrix(l_video, p_video)
            cnfFileNameVideo = baseResultPath + 'cnf_matrix_video.txt'
            np.savetxt(cnfFileNameVideo, cnf_matrix_video, delimiter=",", fmt='%1.3f')

            # Computing Video Accuracy
            ac1_video = accuracy_score(l_video, p_video)
            ac2_video = accuracy_score(l_video, p_video, normalize=False)
            print("Accuracy Video = " + str(ac1_video))
            print("Hits Video = " + str(ac2_video) + "/" + str(videosCount))

            # Plot non-normalized confusion matrix video
            plt.figure()
            f = baseResultPath + 'ConfusionNonNormalizedVideo.png'
            files.append(f)
            self.classifier_helper.plot_confusion_matrix(cnf_matrix_video, classes=class_names,
                                                         title='Confusion matrix, without normalization', show=False,
                                                         fileName=f)

            # Plot normalized confusion matrix video
            plt.figure()
            f = baseResultPath + 'ConfusionNormalizedVideo.png'
            files.append(f)
            self.classifier_helper.plot_confusion_matrix(cnf_matrix_video, classes=class_names, normalize=True,
                                                         title='Normalized confusion matrix', show=False, fileName=f)

        # plt.show()

        # Sendmail
        current_date = datetime.datetime.now().strftime("%d/%m/%Y - %H:%M:%S")
        msg = "Test Done at: {}\n\n".format(current_date)
        if p_clip is not None:
            msg += "Accuray Clip: {}\n\nHits: {}//{}\n\n".format(str(ac1_clip), str(ac2_clip), str(clipsCount))
        if p_video is not None:
            msg += "Accuray Video: {}\n\nHits: {}//{}\n\n".format(str(ac1_video), str(ac2_video), str(videosCount))

        if classifier_name:
            subject = "Test Done: %s-%s - %s" % (self.test_name, classifier_name, self.OsName)
        else:
            subject = "Test Done: %s - %s" % (self.test_name, self.OsName)
        self.mail_helper.sendMail(subject, msg, files)

        if self.results_file:
            resultFile = self.results_file
            file = open(resultFile, 'a')
            if classifier_name:
                msg_result_file = '%s-%s;%.4f;%.4f\n' % (
                    self.test_name, classifier_name, ac1_clip if predictions_clip else 0, ac1_video)
            else:
                msg_result_file = '%s;%.4f;%.4f\n' % (self.test_name, ac1_clip if predictions_clip else 0, ac1_video)
            file.write(msg_result_file)
            file.close()

        # Save results
        resultFile = baseResultPath + 'Results.txt'
        file = open(resultFile, 'w')
        file.write(msg)
        file.close()

        # Save parameters
        resultFile = baseResultPath + 'Parameters.txt'
        file = open(resultFile, 'w')
        file.write(self.parameters)
        file.close()

    def build_FV_Features(self, extension='*.*'):
        """
        This method contains the entire module
        to compute features for all dataset data
        for Visualization plot propose.
        """
        self.name_dict, self.number_dict, self.count_class = self.file_helper.getLabelsFromFile(self.label_path)

        # read file. prepare file lists.
        self.files1, self.filesCount1 = self.file_helper.getFilesFromDirectory(self.base_path,
                                                                               self.datasets,
                                                                               extension)
        self.files2, self.filesCount2 = self.file_helper.getFilesFromDirectory(self.base_path2,
                                                                               self.datasets,
                                                                               extension)

        features_nd1 = np.asarray(self.files1)
        features_nd2 = np.asarray(self.files2)
        features_nd1.sort(axis=0)
        features_nd2.sort(axis=0)

        labels_train = []
        self.descriptor_list1 = []
        self.descriptor_list2 = []

        for feature in features_nd1:
            feature = feature[0]
            label_number = self.number_dict[feature.split(os.sep)[-2]]
            label_name = self.name_dict[str(label_number)]
            labels_train = np.append(labels_train, label_name)
            des1 = self.file_helper.formatFeatures(feature)
            self.descriptor_list1.append(des1)

        for feature in features_nd2:
            feature = feature[0]
            des2 = self.file_helper.formatFeatures(feature)
            self.descriptor_list2.append(des2)

        # format data as nd array
        ft1 = self.classifier_helper.formatND(self.descriptor_list1)
        ft2 = self.classifier_helper.formatND(self.descriptor_list2)

        gmm1 = GMM(n_components=self.no_clusters, covariance_type='diag', verbose=0)
        gmm1.fit(ft1)

        gmm2 = GMM(n_components=self.no_clusters, covariance_type='diag', verbose=0)
        gmm2.fit(ft2)

        fv_dim1 = self.no_clusters + 2 * self.no_clusters * ft1.shape[1]
        fv_dim2 = self.no_clusters + 2 * self.no_clusters * ft2.shape[1]
        print(fv_dim1, fv_dim2)
        n_videos = features_nd1.shape[0]
        features1 = np.array([np.zeros(fv_dim1) for i in range(n_videos)])
        features2 = np.array([np.zeros(fv_dim2) for i in range(n_videos)])
        count1 = 0
        count2 = 0
        for i in range(n_videos):
            len_video1 = len(self.descriptor_list1[i])
            fv1 = fisher_vector(ft1[count1:count1 + len_video1], gmm1)
            features1[i] = fv1
            count1 += len_video1

            len_video2 = len(self.descriptor_list2[i])
            fv2 = fisher_vector(ft2[count2:count2 + len_video2], gmm2)
            features2[i] = fv2
            count2 += len_video2

        print(features1.shape)
        print('Data normalization. 1')
        scaler1 = StandardScaler()
        # train normalization
        features1 = scaler1.fit_transform(features1)
        features1 = power_normalize(features1, 0.5)
        features1 = L2_normalize(features1)

        print(features2.shape)
        print('Data normalization. 2')
        scaler2 = StandardScaler()
        # train normalization
        features2 = scaler2.fit_transform(features2)
        features2 = power_normalize(features2, 0.5)
        features2 = L2_normalize(features2)

        # concatenate two fv train
        features_train = np.concatenate((features1, features2), axis=1)

        return features_train, labels_train
from matplotlib.ticker import FuncFormatter
import matplotlib.pyplot as plt
import numpy as np

from Helpers import FileHelpers, ModelWrappers
from Helpers import DataPreprocessor
from Helpers import StatsPrinter
target_folder = "stats/"
training_prep_folder = "E:/Master/Sorted Mocap/WALKING/"
from sklearn.utils import shuffle

FileHelpers.create_folder(target_folder)
numbers = [
    45,
    720,
    734,  # bwd
    338,
    1148,
    2112,  # circle
    650,
    763,
    2308,  # diagonal
    976,
    1514,
    2016,  # fwd
    12,
    13,
    772  # sideways
]
motiondict = {
    45: "bwd",
예제 #12
0
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()


def autolabel(rects):
    """Attach a text label above each bar in *rects*, displaying its height."""
    for rect in rects:
        height = rect.get_height()
        ax.annotate(
            '{}'.format(height),
            xy=(rect.get_x(), height),
            xytext=(0, 3),  # 3 points vertical offset
            textcoords="offset points",
            ha='center',
            va='bottom')


from Helpers import FileHelpers
FileHelpers.clear_file("file_count.txt")
values = ','.join([str(elem) for elem in values])
labels = ','.join([str(elem) for elem in labels])

FileHelpers.append_line("file_count.txt", values)
FileHelpers.append_line("file_count.txt", labels)
autolabel(rects1)

fig.tight_layout()

plt.show()
예제 #13
0
 def size(self, places=2):
     return FileHelpers.format_bytes(self.size_raw, places)