Ejemplo n.º 1
0
    def test_run(self):
        method = LogisticRegression()
        dataset = RepertoireDataset()
        dataset.encoded_data = EncodedData(examples=np.array([[1, 2, 3],
                                                              [2, 3, 4],
                                                              [1, 2, 3],
                                                              [2, 3, 4],
                                                              [1, 2, 3],
                                                              [2, 3, 4]]),
                                           labels={
                                               "l1": [1, 0, 1, 0, 1, 0],
                                               "l2": [0, 1, 0, 1, 0, 1]
                                           },
                                           feature_names=["f1", "f2", "f3"])

        path = EnvironmentSettings.root_path + "test/tmp/mlmethodtrainer/"

        method = MLMethodTrainer.run(
            MLMethodTrainerParams(
                result_path=path,
                dataset=dataset,
                label="l1",
                method=method,
                model_selection_n_folds=2,
                model_selection_cv=True,
                cores_for_training=1,
                train_predictions_path=f"{path}predictions.csv",
                ml_details_path=f"{path}details.yaml",
                optimization_metric="balanced_accuracy"))

        method.predict(EncodedData(np.array([1, 2, 3]).reshape(1, -1)), "l1")
        self.assertTrue(os.path.isfile(f"{path}predictions.csv"))
        self.assertTrue(os.path.isfile(f"{path}details.yaml"))

        shutil.rmtree(path)
Ejemplo n.º 2
0
    def test_predict(self):
        x = np.array([[1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1]])
        y = {"test1": [1, 0, 2, 0], "test2": [1, 0, 2, 0]}

        lr = LogisticRegression()
        lr.fit(EncodedData(x, y), "test2")

        test_x = np.array([[0, 1, 0], [1, 0, 0]])
        y = lr.predict(EncodedData(test_x), "test2")

        self.assertTrue(len(y["test2"]) == 2)
        self.assertTrue(y["test2"][1] in [0, 1, 2])
Ejemplo n.º 3
0
    def test_predict(self):
        x = np.array([[1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1]])
        y = {"test1": [1, 0, 2, 0], "test2": [1, 0, 2, 0]}

        knn = KNN(parameters={"n_neighbors": 2})
        knn.fit(EncodedData(sparse.csr_matrix(x), labels=y), "test2")

        test_x = np.array([[0, 1, 0], [1, 0, 0]])
        y = knn.predict(EncodedData(sparse.csr_matrix(test_x)), "test2")

        self.assertTrue(len(y["test2"]) == 2)
        self.assertTrue(y["test2"][1] in [0, 1, 2])
Ejemplo n.º 4
0
    def test_predict(self):
        x = np.array([[1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1]])
        y = {"test": np.array([1, 0, 2, 0])}

        svm = SVM()
        svm.fit(EncodedData(x, y), "test")

        test_x = np.array([[0, 1, 0], [1, 0, 0]])
        y = svm.predict(EncodedData(test_x), 'test')["test"]

        self.assertTrue(len(y) == 2)
        self.assertTrue(y[0] in [0, 1, 2])
        self.assertTrue(y[1] in [0, 1, 2])
    def test_predict(self):
        x = np.array([[1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1]])
        y = {"default": np.array([1, 0, 2, 0])}

        rfc = RandomForestClassifier()
        rfc.fit(EncodedData(x, y), 'default')

        test_x = np.array([[0, 1, 0], [1, 0, 0]])
        y = rfc.predict(EncodedData(test_x), 'default')["default"]

        self.assertTrue(len(y) == 2)
        self.assertTrue(y[0] in [0, 1, 2])
        self.assertTrue(y[1] in [0, 1, 2])
Ejemplo n.º 6
0
    def _encode_data(self, dataset, params: EncoderParams) -> EncodedData:
        encoded_example_list, example_ids, encoded_labels, feature_annotation_names = CacheHandler.memo_by_params(
            self._prepare_caching_params(dataset, params,
                                         KmerFrequencyEncoder.STEP_ENCODED),
            lambda: self._encode_examples(dataset, params))

        vectorized_examples, feature_names = CacheHandler.memo_by_params(
            self._prepare_caching_params(dataset, params,
                                         KmerFrequencyEncoder.STEP_VECTORIZED),
            lambda: self._vectorize_encoded(examples=encoded_example_list,
                                            params=params))

        normalized_examples = CacheHandler.memo_by_params(
            self._prepare_caching_params(dataset, params,
                                         KmerFrequencyEncoder.STEP_NORMALIZED),
            lambda: FeatureScaler.normalize(vectorized_examples, self.
                                            normalization_type))

        if self.scale_to_unit_variance:
            examples = self.scale_normalized(params, dataset,
                                             normalized_examples)
        else:
            examples = normalized_examples

        feature_annotations = self._get_feature_annotations(
            feature_names, feature_annotation_names)

        encoded_data = EncodedData(examples=examples,
                                   labels=encoded_labels,
                                   feature_names=feature_names,
                                   example_ids=example_ids,
                                   feature_annotations=feature_annotations,
                                   encoding=KmerFrequencyEncoder.__name__)

        return encoded_data
    def test_fit_by_cross_validation(self):
        x = EncodedData(sparse.csr_matrix(
            np.array([[1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1], [1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1]])),
            labels={"t1": [1, 0, 2, 0, 1, 0, 2, 0], "t2": [1, 0, 2, 0, 1, 0, 2, 0]})

        rfc = RandomForestClassifier()
        rfc.fit_by_cross_validation(x, number_of_splits=2, label_name="t2")
Ejemplo n.º 8
0
    def annotate_repertoires(dataset: RepertoireDataset, criteria: dict, name: str = "annotation"):
        """
        Takes an encoded dataset and adds a new label to the encoded_dataset with boolean values showing whether a
        repertoire matched the specified criteria or not.
        """
        dataset = copy.deepcopy(dataset)

        data = pd.DataFrame(dataset.encoded_data.labels)

        matcher = CriteriaMatcher()
        results = matcher.match(criteria=criteria, data=data)

        labels = dataset.encoded_data.labels
        labels[name] = np.array(results)

        encoded = EncodedData(
            examples=dataset.encoded_data.examples,
            labels=labels,
            example_ids=dataset.encoded_data.example_ids,
            feature_names=dataset.encoded_data.feature_names,
            feature_annotations=dataset.encoded_data.feature_annotations
        )

        result = RepertoireDataset(
            params=dataset.params,
            encoded_data=encoded,
            repertoires=dataset.repertoires,
            identifier=dataset.identifier,
            metadata_file=dataset.metadata_file
        )

        return result
Ejemplo n.º 9
0
    def make_encoded_data(self, path):
        metadata_filepath = f"{path}/metadata.tsv"

        rep_ids = [f"REP{i}" for i in range(10)]
        status_label = [chr((i % 2) + 65) for i in range(10)
                        ]  # List of alternating strings "A" "B"

        metadata = pd.DataFrame({"ID": rep_ids, "status": status_label})
        metadata.to_csv(sep="\t", index=False, path_or_buf=metadata_filepath)

        for rep_id in rep_ids:
            repertoire_seqs = [self.get_random_sequence() for i in range(100)]

            repertoire_data = pd.DataFrame({
                "amino_acid":
                repertoire_seqs,
                "templates": [rn.choice(range(1, 1000)) for i in range(100)]
            })

            repertoire_data.to_csv(sep="\t",
                                   index=False,
                                   path_or_buf=f"{path}/{rep_id}.tsv")

        return EncodedData(examples=None,
                           labels={"status": status_label},
                           example_ids=rep_ids,
                           encoding=DeepRCEncoder.__name__,
                           info={
                               "metadata_filepath": metadata_filepath,
                               "max_sequence_length": 30
                           })
Ejemplo n.º 10
0
    def _encode_data(self, dataset: SequenceDataset, params: EncoderParams):
        sequence_objs = [obj for obj in dataset.get_data(params.pool_size)]

        sequences = [obj.get_sequence() for obj in sequence_objs]
        example_ids = dataset.get_example_ids()
        max_seq_len = max([len(seq) for seq in sequences])
        labels = self._get_labels(sequence_objs,
                                  params) if params.encode_labels else None

        examples = self._encode_sequence_list(
            sequences,
            pad_n_sequences=len(sequence_objs),
            pad_sequence_len=max_seq_len)

        feature_names = self._get_feature_names(max_seq_len)

        if self.flatten:
            examples = examples.reshape(
                (len(sequence_objs),
                 max_seq_len * len(self.onehot_dimensions)))
            feature_names = [
                item for sublist in feature_names for item in sublist
            ]

        encoded_data = EncodedData(examples=examples,
                                   labels=labels,
                                   example_ids=example_ids,
                                   feature_names=feature_names,
                                   encoding=OneHotEncoder.__name__)

        return encoded_data
Ejemplo n.º 11
0
    def encode(self, dataset, params: EncoderParams):

        examples, keys, labels = self._encode_examples(dataset, params)
        examples, kmer_keys = self._vectorize_examples(examples, params, keys)

        # normalize to zero mean and unit variance only features coming from Atchley factors
        tmp_examples = examples[:, :, :-1] if not self.normalize_all_features else examples
        flattened_vectorized_examples = tmp_examples.reshape(examples.shape[0] * examples.shape[1], -1)
        if self.scaler_path is None:
            self.scaler_path = params.result_path + "atchley_factor_scaler.pickle"
        scaled_examples = FeatureScaler.standard_scale(self.scaler_path, flattened_vectorized_examples)
        if hasattr(scaled_examples, "todense"):
            scaled_examples = scaled_examples.todense()

        if self.normalize_all_features:
            examples = np.array(scaled_examples).reshape(examples.shape[0], len(kmer_keys), -1)
        else:
            examples[:, :, :-1] = np.array(scaled_examples).reshape(examples.shape[0], len(kmer_keys), -1)

        # swap axes to get examples x atchley_factors x kmers dimensions
        examples = np.swapaxes(examples, 1, 2)

        feature_names = [f"atchley_factor_{j}_aa_{i}" for i in range(1, self.k+1) for j in range(1, Util.ATCHLEY_FACTOR_COUNT+1)] + ["abundance"]
        encoded_data = EncodedData(examples=examples, example_ids=dataset.get_example_ids(), feature_names=feature_names, labels=labels,
                                   encoding=AtchleyKmerEncoder.__name__, info={"kmer_keys": kmer_keys})

        encoded_dataset = dataset.clone()
        encoded_dataset.encoded_data = encoded_data

        return encoded_dataset
Ejemplo n.º 12
0
def load_encoded_data(labels_path: str, encoding_details_path: str, design_matrix_path: str) -> EncodedData:
    """
    Utility function for adding ML methods; if one encodes data using immuneML through YAML specification and exports the
    encoded data using DesignMatrixExporter, this function can be used to import the data and return it in the format
    it would be available if the ML method was called from within immuneML

    Args:

        labels_path (str): path to labels file as exported by the DesignMatrixExporter
        encoding_details_path (str): path to the details file, where example_ids, feature_names and the encoding name will be imported from
        design_matrix_path (str): path to csv or npy file where the design matrix is stored

    Returns:

        EncodedData object as it would be provided to an ML method within immuneML

    """
    # read the data from these files
    examples = pd.read_csv(design_matrix_path).values if ".csv" in design_matrix_path[:-4] else np.load(design_matrix_path, allow_pickle=True)
    labels = pd.read_csv(labels_path).to_dict('list')

    with open(encoding_details_path, "r") as file:
        encoding_details = yaml.safe_load(file)

    # create an EncodedData object which can be used as an input argument for the fit or predict functions
    encoded_data = EncodedData(examples=examples, labels=labels,
                               example_ids=encoding_details['example_ids'],
                               feature_names=encoding_details['feature_names'],
                               encoding=encoding_details['encoding'])

    return encoded_data
Ejemplo n.º 13
0
    def _create_dummy_encoded_data(self, path):
        n_subjects = 8
        n_features = 300
        n_timepoints = 2
        n_examples = n_subjects * n_timepoints
        diseased_subjects = range(0, 4)

        subjects = [subject for subject in range(n_subjects) for timepoint in range(n_timepoints)]
        timepoints = [timepoint for subject in range(n_subjects) for timepoint in range(n_timepoints)]
        disease_statuses = [subject in diseased_subjects for subject in subjects]

        kmers = [''.join(random.choices(string.ascii_uppercase, k=3)) for i in range(n_features)]

        encoded_data = {
            'examples': sparse.csr_matrix(
                np.random.normal(50, 10, n_examples * n_features).reshape((n_examples, n_features))),
            'example_ids': [i for i in range(n_examples)],
            'labels': {
                "subject_id": np.array([f"subject {i}" for i in subjects]),
                "disease_status": np.array([f"disease: {i}" for i in disease_statuses]),
                "timepoint": np.array([f"timepoint {i}" for i in timepoints])
            },
            'feature_names': kmers,
            'feature_annotations': pd.DataFrame({
                "sequence": kmers
            }),
            'encoding': "random"
        }

        dataset = RepertoireDataset(encoded_data=EncodedData(**encoded_data))

        return dataset
Ejemplo n.º 14
0
    def annotate_features(dataset: RepertoireDataset, criteria: dict, name: str = "annotation"):
        """
        Takes an encoded dataset and adds a new column to the feature_annotations with boolean values showing whether a
        feature matched the specified criteria or not.
        """
        dataset = copy.deepcopy(dataset)

        feature_annotations = dataset.encoded_data.feature_annotations

        matcher = CriteriaMatcher()
        results = matcher.match(criteria=criteria, data=feature_annotations)

        feature_annotations[name] = results

        encoded = EncodedData(
            examples=dataset.encoded_data.examples,
            labels=dataset.encoded_data.labels,
            example_ids=dataset.encoded_data.example_ids,
            feature_names=dataset.encoded_data.feature_names,
            feature_annotations=feature_annotations
        )

        result = RepertoireDataset(
            params=dataset.params,
            encoded_data=encoded,
            repertoires=dataset.get_data(),
            identifier=dataset.identifier,
            metadata_file=dataset.metadata_file
        )

        return result
    def test_generate(self):
        path = EnvironmentSettings.tmp_test_path + "relevant_sequence_exporter/"
        PathBuilder.build(path)

        df = pd.DataFrame({
            "v_genes": ["TRBV1-1", "TRBV1-1"],
            'j_genes': ["TRBJ1-1", "TRBJ1-2"],
            "sequence_aas": ['ACCF', "EEFG"]
        })
        df.to_csv(path + 'sequences.csv', index=False)

        dataset = RandomDatasetGenerator.generate_repertoire_dataset(
            2, {2: 1}, {4: 1}, {}, path + "data/")
        dataset.encoded_data = EncodedData(
            examples=None,
            info={'relevant_sequence_path': path + 'sequences.csv'},
            encoding="SequenceAbundanceEncoder")

        report_result = RelevantSequenceExporter(dataset, path + "result/",
                                                 'somename').generate_report()

        self.assertEqual(1, len(report_result.output_tables))
        self.assertTrue(os.path.isfile(report_result.output_tables[0].path))

        self.assertTrue(
            all(col in ["v_call", "j_call", "cdr3_aa"] for col in pd.read_csv(
                report_result.output_tables[0].path).columns))

        shutil.rmtree(path)
Ejemplo n.º 16
0
    def filter_features(dataset: RepertoireDataset, criteria: dict):
        """
        Takes an encoded dataset and filters features based on a given set of criteria. Only features meeting
        these criteria will be retained in the new dataset object.
        """
        dataset = copy.deepcopy(dataset)

        feature_annotations = dataset.encoded_data.feature_annotations

        matcher = CriteriaMatcher()
        results = matcher.match(criteria=criteria, data=feature_annotations)
        indices = np.where(np.array(results))[0]

        feature_annotations = feature_annotations.iloc[indices, :]
        examples = dataset.encoded_data.examples[:, indices]
        repertoires = dataset.repertoires
        feature_names = [dataset.encoded_data.feature_names[i] for i in indices]

        encoded = EncodedData(
            examples=examples,
            labels=dataset.encoded_data.labels,
            example_ids=dataset.encoded_data.example_ids,
            feature_names=feature_names,
            feature_annotations=feature_annotations
        )

        result = RepertoireDataset(
            params=dataset.params,
            encoded_data=encoded,
            repertoires=repertoires,
            identifier=dataset.identifier,
            metadata_file=dataset.metadata_file
        )

        return result
Ejemplo n.º 17
0
    def _prepare_data(self):
        x = np.array([[0., 1., 2., 3.],
                      [1., 0., 1., 2.],
                      [2., 1., 0., 1.],
                      [3., 2., 1., 0.]])
        y = {"test": np.array([0, 0, 1, 1])}

        return x, y, EncodedData(examples=x, labels=y)
    def train_classifier(self):
        classifier = ProbabilisticBinaryClassifier(100, 0.1)

        X = np.array([[3, 4], [1, 7], [5, 7], [3, 8]])
        y = {"cmv": [True, False, True, False]}

        classifier.fit(EncodedData(X, y), "cmv")

        return classifier
Ejemplo n.º 19
0
    def test_fit_by_cross_validation(self):
        x = EncodedData(np.array([[1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1],
                                  [1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1]]),
                        labels={
                            "test1": [1, 0, 2, 0, 1, 0, 2, 0],
                            "test2": [1, 0, 2, 0, 1, 0, 2, 0]
                        })

        knn = KNN(parameters={"n_neighbors": 2})
        knn.fit_by_cross_validation(x, number_of_splits=2, label_name="test1")
    def test_fit(self):

        classifier = self.train_classifier()

        predictions = classifier.predict(
            EncodedData(np.array([[6, 7], [1, 6]])), "cmv")
        proba_predictions = classifier.predict_proba(
            EncodedData(np.array([[6, 7], [1, 6]])), "cmv")

        labels = classifier.get_classes_for_label("cmv")

        self.assertEqual([True, False], predictions["cmv"])
        self.assertTrue(
            proba_predictions["cmv"][0, 1] > proba_predictions["cmv"][0, 0])
        self.assertTrue(
            proba_predictions["cmv"][1, 0] > proba_predictions["cmv"][1, 1])
        self.assertTrue((proba_predictions["cmv"] <= 1.0).all()
                        and (proba_predictions["cmv"] >= 0.0).all())
        self.assertTrue(isinstance(labels, np.ndarray))
Ejemplo n.º 21
0
    def test_fit_by_cross_validation(self):
        x = EncodedData(
            np.array([[1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1], [1, 0, 0],
                      [0, 1, 1], [1, 1, 1], [0, 1, 1]]), {
                          "t1": [1, 0, 2, 0, 1, 0, 2, 0],
                          "t2": [1, 0, 2, 0, 1, 0, 2, 0]
                      })

        svm = SVM()
        svm.fit_by_cross_validation(x, number_of_splits=2, label_name="t1")
Ejemplo n.º 22
0
    def test_fit_by_cross_validation(self):
        x = EncodedData(
            np.array([[1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1], [1, 0, 0],
                      [0, 1, 1], [1, 1, 1], [0, 1, 1]]), {
                          "test1": [1, 0, 2, 0, 1, 0, 2, 0],
                          "test2": [1, 0, 2, 0, 1, 0, 2, 0]
                      })

        lr = LogisticRegression()
        lr.fit_by_cross_validation(x, number_of_splits=2, label_name="test2")
Ejemplo n.º 23
0
    def _encode_data(self, dataset: ReceptorDataset, params: EncoderParams):
        receptor_objs = [receptor for receptor in dataset.get_data()]
        sequences = [[
            getattr(obj, chain).get_sequence() for chain in obj.get_chains()
        ] for obj in receptor_objs]
        first_chain_seqs, second_chain_seqs = zip(*sequences)

        max_seq_len = max(max([len(seq) for seq in first_chain_seqs]),
                          max([len(seq) for seq in second_chain_seqs]))

        example_ids = dataset.get_example_ids()
        labels = self._get_labels(receptor_objs,
                                  params) if params.encode_labels else None

        examples_first_chain = self._encode_sequence_list(
            first_chain_seqs,
            pad_n_sequences=len(receptor_objs),
            pad_sequence_len=max_seq_len)
        examples_second_chain = self._encode_sequence_list(
            second_chain_seqs,
            pad_n_sequences=len(receptor_objs),
            pad_sequence_len=max_seq_len)

        examples = np.stack((examples_first_chain, examples_second_chain),
                            axis=1)

        feature_names = self._get_feature_names(max_seq_len,
                                                receptor_objs[0].get_chains())

        if self.flatten:
            examples = examples.reshape(
                (len(receptor_objs),
                 2 * max_seq_len * len(self.onehot_dimensions)))
            feature_names = [
                item for sublist in feature_names for subsublist in sublist
                for item in subsublist
            ]

        encoded_data = EncodedData(
            examples=examples,
            labels=labels,
            example_ids=example_ids,
            feature_names=feature_names,
            encoding=OneHotEncoder.__name__,
            info={
                "chain_names":
                receptor_objs[0].get_chains() if all(
                    receptor_obj.get_chains() == receptor_objs[0].get_chains()
                    for receptor_obj in receptor_objs) else None
            })

        return encoded_data
Ejemplo n.º 24
0
 def _make_encoded_data(self, encoded_data, indices):
     examples = np.swapaxes(encoded_data.examples, 2, 3)
     return EncodedData(
         examples=torch.from_numpy(examples[indices]).float(),
         labels={
             label: torch.from_numpy(
                 np.array([encoded_data.labels[label][i] for i in indices])
                 == self.class_mapping[1]).float()
             for label in encoded_data.labels.keys()
         },
         example_ids=[encoded_data.example_ids[i] for i in indices],
         feature_names=encoded_data.feature_names,
         feature_annotations=encoded_data.feature_annotations,
         encoding=encoded_data.encoding)
Ejemplo n.º 25
0
    def _encode_data(self, dataset: RepertoireDataset, params: EncoderParams):
        labels = params.label_config.get_labels_by_name()

        assert len(labels) == 1, \
            "SequenceAbundanceEncoder: this encoding works only for single label."

        examples = self._calculate_sequence_abundance(dataset, self.comparison_data, labels[0], params)

        encoded_data = EncodedData(examples, dataset.get_metadata([labels[0]]) if params.encode_labels else None, dataset.get_repertoire_ids(),
                                   [SequenceAbundanceEncoder.RELEVANT_SEQUENCE_ABUNDANCE, SequenceAbundanceEncoder.TOTAL_SEQUENCE_ABUNDANCE],
                                   encoding=SequenceAbundanceEncoder.__name__, info={'relevant_sequence_path': self.relevant_sequence_csv_path})

        encoded_dataset = RepertoireDataset(params=dataset.params, encoded_data=encoded_data, repertoires=dataset.repertoires)

        return encoded_dataset
Ejemplo n.º 26
0
    def encode(self, dataset, params: EncoderParams) -> RepertoireDataset:

        train_repertoire_ids = EncoderHelper.prepare_training_ids(
            dataset, params)
        distance_matrix = self.build_distance_matrix(dataset, params,
                                                     train_repertoire_ids)
        labels = self.build_labels(dataset,
                                   params) if params.encode_labels else None

        encoded_dataset = dataset.clone()
        encoded_dataset.encoded_data = EncodedData(
            examples=distance_matrix,
            labels=labels,
            example_ids=distance_matrix.index.values,
            encoding=DistanceEncoder.__name__)

        return encoded_dataset
Ejemplo n.º 27
0
    def _encode_sequence_count(self, dataset: RepertoireDataset, comparison_data: ComparisonData, label: str, params: EncoderParams) -> EncodedData:
        sequence_p_values_indices, indices_path, relevant_sequences_path = SequenceFilterHelper.get_relevant_sequences(dataset, params, comparison_data, label, self.p_value_threshold,
                                                                                self.comparison_attributes, self.relevant_indices_path)
        if self.relevant_indices_path is None:
            self.relevant_indices_path = indices_path
        if self.relevant_sequence_csv_path is None:
            self.relevant_sequence_csv_path = relevant_sequences_path

        count_matrix = self._build_count_matrix(comparison_data, dataset.get_repertoire_ids(), sequence_p_values_indices)
        feature_names = comparison_data.get_item_names()[sequence_p_values_indices]

        encoded_data = EncodedData(count_matrix, dataset.get_metadata([label]) if params.encode_labels else None,
                                   dataset.get_repertoire_ids(),
                                   feature_names,
                                   encoding=SequenceCountEncoder.__name__, info={'relevant_sequence_path': self.relevant_sequence_csv_path})

        return encoded_data
    def _encode_new_dataset(self, dataset, params: EncoderParams):
        encoded_dataset = RepertoireDataset(repertoires=dataset.repertoires, params=dataset.params,
                                            metadata_file=dataset.metadata_file)
        encoded_repertoires, labels = self._encode_repertoires(dataset, params)

        feature_annotations = self._get_feature_info()

        encoded_dataset.add_encoded_data(EncodedData(
            examples=encoded_repertoires,
            labels=labels,
            feature_names=list(feature_annotations["sequence_id"]),
            feature_annotations=feature_annotations,
            example_ids=[repertoire.identifier for repertoire in dataset.get_data()],
            encoding=MatchedSequencesEncoder.__name__
        ))

        return encoded_dataset
Ejemplo n.º 29
0
    def test_store(self):
        x = np.array([[1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1]])
        y = {"default": np.array([1, 0, 2, 0])}

        knn = KNN()
        knn.fit(EncodedData(sparse.csr_matrix(x), y), "default")

        path = EnvironmentSettings.root_path + "test/tmp/knn/"

        knn.store(path, ["f1", "f2", "f3"])
        self.assertTrue(os.path.isfile(path + "knn.pickle"))

        with open(path + "knn.pickle", "rb") as file:
            knn2 = pickle.load(file)

        self.assertTrue(isinstance(knn2["default"], KNeighborsClassifier))

        shutil.rmtree(path)
Ejemplo n.º 30
0
    def test_store(self):
        x = np.array([[1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1]])
        y = {"default": np.array([1, 0, 2, 0])}

        lr = LogisticRegression()
        lr.fit(EncodedData(x, y), 'default')

        path = EnvironmentSettings.root_path + "test/tmp/lr/"

        lr.store(path, ["f1", "f2", "f3"])
        self.assertTrue(os.path.isfile(path + "logistic_regression.pickle"))

        with open(path + "logistic_regression.pickle", "rb") as file:
            lr2 = pickle.load(file)

        self.assertTrue(isinstance(lr2["default"], SklearnLogisticRegression))

        shutil.rmtree(path)