def main():
    config_dir = "config_files"
    config = StudyConfig(config_dir)

    # the object with variable definitions based on the specified configuration file. It includes data description,
    # definitions of run parameters (independent of deep definitions vs not)
    parameters = config.populate_study_parameters("CTS_UbiComp2020_1sample.toml")
    print(parameters)

    data = DataConstructor(parameters)
    test_data = data.test_subj_dataset

    all_categories = [str(i) for i in range(0, 36)]
    for i, cat in enumerate(all_categories):
        if len(cat) == 1:
            all_categories[i] = "button00" + cat
        elif len(cat) == 2:
            all_categories[i] = "button0" + cat

    model_subdir = join(parameters.study_name, "trained_models")
    saved_model_dir = utils.create_dir(join(utils.get_root_path("saved_objects"), model_subdir))

    model_name = "LSTM-batch-128-CTS_UbiComp2020_DescType.RawData_SeqLen.ExtendEdge_lstm_stat_2000e-fold-2-10.pt"
    model_path = join(saved_model_dir, model_name)

    predicted_val = sample_val(test_data, model_path)

    print(f"Predicted Category is {predicted_val}.")
def main():
    config_dir = "config_files"
    config = StudyConfig(config_dir)

    # create a template of a configuration file with all the fields initialized to None
    config.create_config_file_template()

    # the object with variable definitions based on the specified configuration file. It includes data description,
    # definitions of run parameters (independent of deep definitions vs not)
    parameters_firm = config.populate_study_parameters(
        "CTS_one_subj_firm.toml")
    data_firm = DataConstructor(parameters_firm)
    subject_dict_firm = data_firm.get_subject_dataset()
    new_subject_dict_firm = relabel_whole_dataset(subject_dict_firm, "Firm")

    parameters_soft = config.populate_study_parameters(
        "CTS_one_subj_soft.toml")
    data_soft = DataConstructor(parameters_soft)
    subject_dict_soft = data_soft.get_subject_dataset()
    new_subject_dict_soft = relabel_whole_dataset(subject_dict_soft, "Soft")

    parameters_variable = config.populate_study_parameters(
        "CTS_one_subj_variable.toml")
    data_variable = DataConstructor(parameters_variable)
    subject_dict_variable = data_variable.get_subject_dataset()
    new_subject_dict_variable = relabel_whole_dataset(subject_dict_variable,
                                                      "Variable")

    assert parameters_firm.nfft == parameters_soft.nfft and parameters_firm.nfft == parameters_variable.nfft
    assert parameters_firm.sampling_freq == parameters_soft.sampling_freq and parameters_firm.sampling_freq == \
           parameters_variable.sampling_freq

    parameters = parameters_firm
    all_subj_dict = {
        **new_subject_dict_firm,
        **new_subject_dict_soft,
        **new_subject_dict_variable
    }

    hash_matcher = HashMatcher(all_subj_dict, parameters)
Пример #3
0
                if point < 0:
                    neg = neg + point
                if point > 0:
                    pos = pos + point
            all_gradients.append([pos, neg])

        gradient_sums = np.array(all_gradients).flatten()
        return gradient_sums


if __name__ == "__main__":
    print("Running feature_constructor module...")
    print("Is cuda available?", torch.cuda.is_available())

    config_dir = "config_files"
    config = StudyConfig(config_dir)

    # create a template of a configuration file with all the fields initialized to None
    config.create_config_file_template()
    parameters = config.populate_study_parameters("CTS_5taps_per_button.toml")

    # generating the data from files
    data = DataConstructor(parameters)
    subject_dict = data.get_subject_dataset()

    # define a category balancer (implementing the abstract CategoryBalancer)
    category_balancer = WithinSubjectOversampler()
    dataset_processor = StatDatasetProcessor(parameters, balancer=category_balancer)

    feature_constructor = BoTWFeatureConstructor(dataset_processor, parameters, feature_axis=2)
    dataset_desc_name = "CTS_firm_chunk_" + str(parameters.samples_per_chunk) + "_interval_" + str(
    @property
    def mult_attr(self) -> int:
        return self.__mult_attr

    def _produce_specific_features(self, subject_dataset: types.subj_dataset) -> Optional[types.subj_dataset]:
        # feature_dataset = self.descriptor_computer.dataset_descriptors
        feature_dataset = self.descriptor_computer.produce_dataset_descriptors(subject_dataset)
        return feature_dataset


if __name__ == "__main__":
    print("Running msbsd_feature_constructor module...")

    config_dir = "config_files"
    config = StudyConfig(config_dir)

    # create a template of a configuration file with all the fields initialized to None
    config.create_config_file_template()
    parameters = config.populate_study_parameters("CTS_Keyboard_simple.toml")

    # generating the data from files
    data = DataConstructor(parameters)
    subject_dict = data.get_subject_dataset()

    category_balancer = WithinSubjectOversampler()
    dataset_processor = StatDatasetProcessor(parameters, balancer=category_balancer)

    descriptor_computer = DescriptorComputer(DescType.MSD, subject_dict, parameters, normalize=True, extra_name="")
    feature_constructor = KeypointFeatureConstructor(parameters, descriptor_computer)
Пример #5
0
def main():
    parser = argparse.ArgumentParser(description='BioHCI arguments')
    parser.add_argument('--disable-cuda',
                        action='store_true',
                        help='Disable CUDA')
    parser.add_argument('--visualization',
                        action='store_true',
                        help='Generate plots to visualize the raw data')
    parser.add_argument('--verbose',
                        '-v',
                        action='store_true',
                        help='Display more details during the run')
    args = parser.parse_args()

    # checking whether cuda is available and enabled
    args.cuda = not args.disable_cuda and torch.cuda.is_available()
    print("Is cuda available?", torch.cuda.is_available())
    print("Is the option to use cuda set?", args.cuda)

    # for reproducible results
    seed = 0
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)

    # printing style of numpy arrays
    np.set_printoptions(precision=3, suppress=True)

    # """
    config_dir = "config_files"
    config = StudyConfig(config_dir)

    # create a template of a configuration file with all the fields initialized to None
    config.create_config_file_template()

    # the object with variable definitions based on the specified configuration file. It includes data description,
    # definitions of run parameters (independent of deep definitions vs not)
    # parameters = config.populate_study_parameters("CTS_UbiComp2020.toml")
    parameters = config.populate_study_parameters("CTS_4Electrodes.toml")
    print(parameters)

    # generating the data from files
    data = DataConstructor(parameters)
    cv_subject_dict = data.cv_subj_dataset
    # for subj_name, subj in cv_subject_dict.items():
    #     print(f"Subject name: {subj_name}")

    # unique_categories = list(set(subj.categories))
    # for unique_cat in unique_categories:
    #     for i, cat in enumerate(subj.categories):
    #         data_to_plot = []
    #         if unique_cat == cat:
    #             data = subj.data[i]
    #             data_to_plot.append(data)

    # x = np.arange(0, data.shape[0])
    # for i in range (0, data.shape[1]):
    #     feature = data[:, i]
    #     plt.plot(x, feature, label=str(i))
    # plt.legend()
    # plt.show()

    # print("")

    test_subject_dict = data.test_subj_dataset
    category_balancer = WithinSubjectOversampler()

    # define a data splitter object (to be used for setting aside a testing set, as well as train/validation split
    # data_splitter = AcrossSubjectSplitter(cv_subject_dict)
    data_splitter = WithinSubjectSplitter(cv_subject_dict)
    cv_descriptor_computer = DescriptorComputer(DescType.RawData,
                                                cv_subject_dict,
                                                parameters,
                                                seq_len=SeqLen.ExtendEdge,
                                                extra_name="_100_samples_")
    feature_constructor = KeypointFeatureConstructor(parameters,
                                                     cv_descriptor_computer)

    # estimating number of resulting features based on the shape of the dataset, to be passed later to the feature
    # constructor
    input_size = estimate_num_features(cv_subject_dict, feature_constructor)
    # input_size = input_size * 250
    dataset_categories = data.get_all_dataset_categories()

    assert parameters.neural_net is True
    button_learning_def = NeuralNetworkDefinition(
        input_size=input_size,
        output_size=len(dataset_categories),
        use_cuda=args.cuda)
    # learning analyser
    assert parameters.neural_net is True
    analyser = NNAnalyser(data_splitter, feature_constructor,
                          category_balancer, parameters, button_learning_def,
                          dataset_categories,
                          cv_descriptor_computer.dataset_desc_name)
    analyser.perform_cross_validation(cv_subject_dict)
    analyser.evaluate_all_models(test_subject_dict)
    analyser.close_logger()
    # """

    # cm_obj_name = "LSTM-batch-128-CTS_UbiComp2020_DescType.RawData_SeqLen.ExtendEdge_lstm_stat_2000e_test_confusion_matrix.pt"
    # cm_obj_path = "/home/dq38/remote_pycharm/BioHCI-Project/BioHCI/saved_objects/CTS_UbiComp2020/confusion_matrices/" + cm_obj_name
    # save_fig_path = "/home/dq38/remote_pycharm/BioHCI-Project/Results/CTS_UbiComp2020/learning_logs/LSTM-batch-128-CTS_UbiComp2020_DescType.RawData_SeqLen.ExtendEdge_lstm_stat_2000e_test_confusion_matrix.pdf"
    # generate_confusion_matrix_fig_from_obj_name(cm_obj_path, save_fig_path)

    print("\nEnd of main program.")
            for label in label_list:
                img_sub_list = []
                pattern = "_" + str(label) + ".png"
                for paths, files in path_files.items():
                    for f in files:
                        fname = os.path.basename(f)
                        if fname.endswith(pattern):
                            img_sub_list.append(f)
                fig_name = os.path.join(save_dir,
                                        "p1_all_cond_" + str(label) + ".png")
                self.create_figure(img_sub_list, fig_name, 1, 3, save_dir)


if __name__ == "__main__":
    config_dir = "config_files"
    config = StudyConfig(config_dir)

    # create a template of a configuration file with all the fields initialized to None
    config.create_config_file_template()

    # the object with variable definitions based on the specified configuration file. It includes data description,
    # definitions of run parameters (independent of deep definitions vs not)
    # parameters = config.populate_study_parameters("CTS_one_subj_variable.toml")
    parameters = config.populate_study_parameters("CTS_4Electrodes.toml")

    data = DataConstructor(parameters)
    cv_subject_dict = data.cv_subj_dataset
    # build a visualizer object for the class to plot the dataset in different forms
    # we use the subject dataset as a source (a dictionary subj_name -> subj data split in categories)
    saveplot_dir_path = "Results/" + parameters.study_name + "/dataset plots"
    raw_data_vis = RawDataVisualizer(cv_subject_dict,
        Args:
            heatmap_name: the name of the pickled heatmap object to convert into a figure

        """
        assert heatmap_name.endswith(".pkl")
        path = join(self.dataset_eval_dir, heatmap_name)
        if os.path.exists(path):
            with (open(path, "rb")) as openfile:
                heatmap = pickle.load(openfile)
                self.generate_heatmap_fig_from_obj(heatmap)


if __name__ == "__main__":
    np.set_printoptions(threshold=10000, linewidth=100000, precision=1)
    config_dir = "config_files"
    config = StudyConfig(config_dir)

    # create a template of a configuration file with all the fields initialized to None
    config.create_config_file_template()
    # parameters = config.populate_study_parameters("CTS_5taps_per_button.toml")
    parameters = config.populate_study_parameters("CTS_EICS2020.toml")

    # generating the data from files
    data = DataConstructor(parameters)
    subject_dataset = data.get_subject_dataset()

    # get all the categories of the dataset
    all_dataset_categories = data.get_all_dataset_categories()

    # determine the shape of the array
    heatmap_shape = (len(set(all_dataset_categories)), len(set(all_dataset_categories)))
Пример #8
0
        assert mean_array.shape[-1] == 2
        diff = np.log(mean_array[:, :, 0]) - np.log(mean_array[:, :, 1])

        diff = np.expand_dims(diff, axis=feature_axis)
        return diff

    @property
    def mult_attr(self) -> int:
        return self.__mult_attr


if __name__ == "__main__":
    print("Running feature_constructor module...")

    config_dir = "config_files"
    config = StudyConfig(config_dir)

    # create a template of a configuration file with all the fields initialized to None
    config.create_config_file_template()
    parameters = config.populate_study_parameters("EEG_Workload.toml")

    # generating the data from files
    data = DataConstructor(parameters)
    subject_dict = data.get_subject_dataset()

    category_balancer = WithinSubjectOversampler()
    dataset_processor = StatDatasetProcessor(parameters)

    feature_constructor = StatFeatureConstructor(parameters, dataset_processor)

    feature_dataset = feature_constructor.produce_feature_dataset(subject_dict)