def main():

    u_data, v_data, fg_imgs, original_imgs, abnormal_fg_imgs = load_data(
    )  #load data from ref_data

    weight = Weight_matrix().get_weight_matrix(
    )  #use normalization method for feature correction

    thisFeatureExtractor = Feature_extractor(original_imgs, fg_imgs,
                                             abnormal_fg_imgs, u_data, v_data,
                                             weight)

    train_data, train_labels = thisFeatureExtractor.get_features_and_labels(
        80, 140)  #training frames

    ########################## To see the train data features distribution, uncomment next line##################################
    #uvPlot(train_data[:,0],train_data[:,1],train_labels,False)
    #############################################################################################################################

    classifiers = Classifiers(train_data, train_labels)

    test_data, test_labels = thisFeatureExtractor.get_features_and_labels(
        140, 199)  #testing frames

    for name, model in classifiers.models.items():  #get each classifier
        for ind, original_img in enumerate(
                original_imgs[:-1]):  #get each frame

            pos, thisImg, _, _ = thisFeatureExtractor.getPosition(
                fg_imgs, ind)  #get the position of each person in this frame

            features, _ = thisFeatureExtractor.get_features_and_labels(
                ind, ind + 1,
                False)  #get the features for each person in the frame

            labels = classifiers.models[name].predict(features)  #predict label

            plot(pos, labels, thisImg, name)  #show

        classifiers.prediction_metrics(
            test_data, test_labels,
            name)  #metrics for each classifier based on the test data
    def main(self):

        pre = Preprocessing.Preprocessing(self.features_to_consider)

        if self.normalise:
            pre.scale()  # TODO different scaling options

        if self.classification_model == 'logisticRegression':
            logReg_predict = Classifiers.Classifiers(pre.features_dataframe,
                                                     self.features_to_consider)

            if self.splitByDate:
                logReg_predict.splitDataset(self.trainingDateStart,
                                            self.trainingDateEnd,
                                            self.testDateStart,
                                            self.testDateEnd)

            logReg_predict.LogRegTrainModel()
            logReg_predict.LogRegTestModel()

        else:
            print("other model")
Esempio n. 3
0
def main():
    directory = '/xxxx/features'
    feature_names = [name for name in os.listdir(directory) if name == '200']
    feature_names.sort(reverse=True)
    for feature_name in feature_names:
        # K=dictKvalue[feature_name]
        K = 40
        data_dir = os.path.join(directory, feature_name)
        dataset = load_dataset('sigactcuboid', data_dir)
        classifiers = Classifiers(dataset.get_data(), dataset.feature_name,
                                  dataset.model_dir, K)

        for name, model in classifiers.models.items():  #get each classifier
            model_directory = os.path.join(dataset.model_dir)
            modname = os.path.join(
                model_directory,
                name + '_' + 'train' + '_' + feature_name + '.pkl')
            model_data = load_model(modname)
            labels, scores, test_time = model_data.predict()

            timefile = os.path.join(
                model_directory,
                name + '_' + 'test' + '_' + feature_name + '.json')
            times2json(test_time, timefile)