def test_fit(self):

        classifier = self.train_classifier()

        predictions = classifier.predict(EncodedData(np.array([[6, 7], [1, 6]])), Label("cmv"))
        proba_predictions = classifier.predict_proba(EncodedData(np.array([[6, 7], [1, 6]])), Label("cmv"))

        self.assertEqual([True, False], predictions["cmv"])
        self.assertTrue(proba_predictions["cmv"][0, 1] > proba_predictions["cmv"][0, 0])
        self.assertTrue(proba_predictions["cmv"][1, 0] > proba_predictions["cmv"][1, 1])
        self.assertTrue((proba_predictions["cmv"] <= 1.0).all() and (proba_predictions["cmv"] >= 0.0).all())
Example #2
0
    def test_predict(self):
        x = np.array([[1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1]])
        y = {"test1": [1, 0, 2, 0], "test2": [1, 0, 2, 0]}

        lr = LogisticRegression()
        lr.fit(EncodedData(x, y), Label("test2"))

        test_x = np.array([[0, 1, 0], [1, 0, 0]])
        y = lr.predict(EncodedData(test_x), Label("test2"))

        self.assertTrue(len(y["test2"]) == 2)
        self.assertTrue(y["test2"][1] in [0, 1, 2])
Example #3
0
    def test_predict(self):
        x = np.array([[1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1]])
        y = {"test1": [1, 0, 2, 0], "test2": [1, 0, 2, 0]}

        knn = KNN(parameters={"n_neighbors": 2})
        knn.fit(EncodedData(sparse.csr_matrix(x), labels=y), "test2")

        test_x = np.array([[0, 1, 0], [1, 0, 0]])
        y = knn.predict(EncodedData(sparse.csr_matrix(test_x)), "test2")

        self.assertTrue(len(y["test2"]) == 2)
        self.assertTrue(y["test2"][1] in [0, 1, 2])
    def test_predict(self):
        x = np.array([[1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1]])
        y = {"default": np.array([1, 0, 2, 0])}

        rfc = RandomForestClassifier()
        rfc.fit(EncodedData(x, y), Label("default"))

        test_x = np.array([[0, 1, 0], [1, 0, 0]])
        y = rfc.predict(EncodedData(test_x), Label("default"))["default"]

        self.assertTrue(len(y) == 2)
        self.assertTrue(y[0] in [0, 1, 2])
        self.assertTrue(y[1] in [0, 1, 2])
Example #5
0
    def test_predict(self):
        x = np.array([[1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1]])
        y = {"test": np.array([1, 0, 2, 0])}

        svm = SVM()
        svm.fit(EncodedData(x, y), "test")

        test_x = np.array([[0, 1, 0], [1, 0, 0]])
        y = svm.predict(EncodedData(test_x), 'test')["test"]

        self.assertTrue(len(y) == 2)
        self.assertTrue(y[0] in [0, 1, 2])
        self.assertTrue(y[1] in [0, 1, 2])
    def _encode_new_dataset(self, dataset, params: EncoderParams):
        encoded_dataset = RepertoireDataset(
            repertoires=dataset.repertoires,
            labels=dataset.labels,
            metadata_file=dataset.metadata_file)

        feature_annotations = self._get_feature_info()
        encoded_repertoires, labels, example_ids = self._encode_repertoires(
            dataset, params)

        encoded_dataset.add_encoded_data(
            EncodedData(
                # examples contains a np.ndarray with counts
                examples=encoded_repertoires,
                # example_ids contains a list of repertoire identifiers
                example_ids=example_ids,
                # feature_names contains a list of reference receptor identifiers
                feature_names=[
                    "{receptor_id}.{chain}".format(
                        receptor_id=row["receptor_id"], chain=row["chain"])
                    for index, row in feature_annotations.iterrows()
                ],
                # feature_annotations contains a PD dataframe with sequence and VDJ gene usage per reference receptor
                feature_annotations=feature_annotations,
                labels=labels,
                encoding=MatchedReceptorsEncoder.__name__))

        return encoded_dataset
    def _encode_data(self, dataset: RepertoireDataset, params: EncoderParams):
        labels = params.label_config.get_labels_by_name()

        assert len(labels) == 1, \
            "SequenceAbundanceEncoder: this encoding works only for single label."

        examples = self._calculate_sequence_abundance(dataset,
                                                      self.comparison_data,
                                                      labels[0], params)

        encoded_data = EncodedData(
            examples,
            dataset.get_metadata([labels[0]])
            if params.encode_labels else None,
            dataset.get_repertoire_ids(), [
                SequenceAbundanceEncoder.RELEVANT_SEQUENCE_ABUNDANCE,
                SequenceAbundanceEncoder.TOTAL_SEQUENCE_ABUNDANCE
            ],
            encoding=SequenceAbundanceEncoder.__name__,
            info={'relevant_sequence_path': self.relevant_sequence_csv_path})

        encoded_dataset = RepertoireDataset(labels=dataset.labels,
                                            encoded_data=encoded_data,
                                            repertoires=dataset.repertoires)

        return encoded_dataset
    def _encode_data(self, dataset, params: EncoderParams):
        self._set_max_dims(dataset)

        arguments = [(repertoire, params) for repertoire in dataset.repertoires]

        with Pool(params.pool_size) as pool:
            chunksize = math.floor(dataset.get_example_count() / params.pool_size) + 1
            repertoires = pool.starmap(self._get_encoded_repertoire, arguments, chunksize=chunksize)

        encoded_repertoires, repertoire_names, labels = zip(*repertoires)

        examples = np.stack(encoded_repertoires, axis=0)

        labels = {k: [dic[k] for dic in labels] for k in labels[0]}

        feature_names = self._get_feature_names(self.max_seq_len, self.max_rep_len)

        if self.flatten:
            examples = examples.reshape(dataset.get_example_count(), self.max_rep_len * self.max_seq_len * len(self.onehot_dimensions))
            feature_names = [item for sublist in feature_names for subsublist in sublist for item in subsublist]

        encoded_data = EncodedData(examples=examples,
                                   example_ids=repertoire_names,
                                   labels=labels,
                                   feature_names=feature_names,
                                   encoding=OneHotEncoder.__name__)

        return encoded_data
Example #9
0
    def _encode_data(self, dataset, params: EncoderParams) -> EncodedData:
        encoded_example_list, example_ids, encoded_labels, feature_annotation_names = CacheHandler.memo_by_params(
            self._prepare_caching_params(dataset, params,
                                         KmerFrequencyEncoder.STEP_ENCODED),
            lambda: self._encode_examples(dataset, params))

        self._initialize_vectorizer(params)
        vectorized_examples = self._vectorize_encoded(
            examples=encoded_example_list, params=params)
        feature_names = self.vectorizer.feature_names_
        normalized_examples = FeatureScaler.normalize(vectorized_examples,
                                                      self.normalization_type)

        if self.scale_to_unit_variance:
            examples = self.scale_normalized(params, dataset,
                                             normalized_examples)
        else:
            examples = normalized_examples

        feature_annotations = self._get_feature_annotations(
            feature_names, feature_annotation_names)

        encoded_data = EncodedData(examples=examples,
                                   labels=encoded_labels,
                                   feature_names=feature_names,
                                   example_ids=example_ids,
                                   feature_annotations=feature_annotations,
                                   encoding=KmerFrequencyEncoder.__name__)

        return encoded_data
Example #10
0
    def test_fit(self):
        x = np.array([[1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1]])
        y = {"test": np.array([1, 0, 2, 0])}

        knn = KNN()
        knn.fit(EncodedData(examples=sparse.csr_matrix(x), labels=y),
                Label("test"))
    def _encode_data(self, dataset: ReceptorDataset, params: EncoderParams):
        receptor_objs = [receptor for receptor in dataset.get_data()]
        sequences = [[
            getattr(obj, chain).get_sequence(self.sequence_type)
            for chain in obj.get_chains()
        ] for obj in receptor_objs]
        first_chain_seqs, second_chain_seqs = zip(*sequences)

        if any(seq is None for seq in first_chain_seqs) or any(
                seq is None for seq in second_chain_seqs):
            raise ValueError(
                f"{OneHotEncoder.__name__}: receptor dataset {dataset.name} (id: {dataset.identifier}) contains empty sequences for the "
                f"specified sequence type {self.sequence_type.name.lower()}. Please check that the dataset is imported correctly."
            )

        max_seq_len = max(max([len(seq) for seq in first_chain_seqs]),
                          max([len(seq) for seq in second_chain_seqs]))

        example_ids = dataset.get_example_ids()
        labels = self._get_labels(receptor_objs,
                                  params) if params.encode_labels else None

        examples_first_chain = self._encode_sequence_list(
            first_chain_seqs,
            pad_n_sequences=len(receptor_objs),
            pad_sequence_len=max_seq_len)
        examples_second_chain = self._encode_sequence_list(
            second_chain_seqs,
            pad_n_sequences=len(receptor_objs),
            pad_sequence_len=max_seq_len)

        examples = np.stack((examples_first_chain, examples_second_chain),
                            axis=1)

        feature_names = self._get_feature_names(max_seq_len,
                                                receptor_objs[0].get_chains())

        if self.flatten:
            examples = examples.reshape(
                (len(receptor_objs),
                 2 * max_seq_len * len(self.onehot_dimensions)))
            feature_names = [
                item for sublist in feature_names for subsublist in sublist
                for item in subsublist
            ]

        encoded_data = EncodedData(
            examples=examples,
            labels=labels,
            example_ids=example_ids,
            feature_names=feature_names,
            encoding=OneHotEncoder.__name__,
            info={
                "chain_names":
                receptor_objs[0].get_chains() if all(
                    receptor_obj.get_chains() == receptor_objs[0].get_chains()
                    for receptor_obj in receptor_objs) else None
            })

        return encoded_data
    def test_fit_by_cross_validation(self):
        x = EncodedData(sparse.csr_matrix(
            np.array([[1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1], [1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1]])),
            labels={"t1": [1, 0, 2, 0, 1, 0, 2, 0], "t2": [1, 0, 2, 0, 1, 0, 2, 0]})

        rfc = RandomForestClassifier()
        rfc.fit_by_cross_validation(x, number_of_splits=2, label=Label("t2"))
    def _encode_data(self, dataset: SequenceDataset, params: EncoderParams):
        sequence_objs = [obj for obj in dataset.get_data(params.pool_size)]

        sequences = [obj.get_sequence() for obj in sequence_objs]
        example_ids = dataset.get_example_ids()
        max_seq_len = max([len(seq) for seq in sequences])
        labels = self._get_labels(sequence_objs,
                                  params) if params.encode_labels else None

        examples = self._encode_sequence_list(
            sequences,
            pad_n_sequences=len(sequence_objs),
            pad_sequence_len=max_seq_len)

        feature_names = self._get_feature_names(max_seq_len)

        if self.flatten:
            examples = examples.reshape(
                (len(sequence_objs),
                 max_seq_len * len(self.onehot_dimensions)))
            feature_names = [
                item for sublist in feature_names for item in sublist
            ]

        encoded_data = EncodedData(examples=examples,
                                   labels=labels,
                                   example_ids=example_ids,
                                   feature_names=feature_names,
                                   encoding=OneHotEncoder.__name__)

        return encoded_data
    def test_exporter(self):
        dataset = RepertoireDataset(encoded_data=EncodedData(examples=csr_matrix(np.arange(12).reshape(3, 4)),
                                                             labels={"l1": [1, 0, 1], "l2": [0, 0, 1]},
                                                             example_ids=[0, 1, 2],
                                                             feature_names=["f1", "f2", "f3", "f4"],
                                                             encoding="test_encoding"))

        path = EnvironmentSettings.tmp_test_path / "designmatrrixexporterreport/"

        report = DesignMatrixExporter(dataset=dataset, result_path=path,
                                      name="design_matrix", file_format='csv')
        report.generate_report()
        self.assertTrue(os.path.isfile(path / "design_matrix.csv"))
        report.file_format = 'csv.zip'
        report._export_matrix()
        self.assertTrue(os.path.isfile(path / "design_matrix.csv.zip"))

        report.file_format = 'npy'
        report._export_matrix()
        self.assertTrue(os.path.isfile(path / "design_matrix.npy"))
        report.file_format = 'npy.zip'
        report._export_matrix()
        self.assertTrue(os.path.isfile(path / "design_matrix.npy.zip"))

        report.file_format = 'hdf5'
        report._export_matrix()
        self.assertTrue(os.path.isfile(path / "design_matrix.hdf5"))
        report.file_format = 'hdf5.zip'
        report._export_matrix()
        self.assertTrue(os.path.isfile(path / "design_matrix.hdf5.zip"))
        shutil.rmtree(path)

        with self.assertRaises(AssertionError):
            DesignMatrixExporter.build_object(**{'file_format': "random"})
    def test_generate(self):
        dataset = RepertoireDataset(encoded_data=EncodedData(examples=csr_matrix(np.arange(12).reshape(3, 4)),
                                                             labels={"l1": [1, 0, 1], "l2": [0, 0, 1]},
                                                             example_ids=[0, 1, 2],
                                                             feature_names=["f1", "f2", "f3", "f4"],
                                                             encoding="test_encoding"))

        path = EnvironmentSettings.tmp_test_path / "designmatrrixexporterreport/"

        report = DesignMatrixExporter(dataset, path, name='report', file_format='csv')
        report.generate_report()
        self.assertTrue(os.path.isfile(path / "design_matrix.csv"))

        self.assertTrue(os.path.isfile(path / "labels.csv"))
        self.assertTrue(os.path.isfile(path / "encoding_details.yaml"))

        matrix = pd.read_csv(path / "design_matrix.csv", sep=",").values
        self.assertTrue(np.array_equal(matrix, np.arange(12).reshape(3, 4)))

        labels = pd.read_csv(path / "labels.csv", sep=",").values
        self.assertTrue(np.array_equal(labels, np.array([[1, 0], [0, 0], [1, 1]])))

        with open(path / "encoding_details.yaml", "r") as file:
            loaded = yaml.safe_load(file)

        self.assertTrue("feature_names" in loaded)
        self.assertTrue("encoding" in loaded)
        self.assertTrue("example_ids" in loaded)

        self.assertTrue(np.array_equal(loaded["example_ids"], np.array([0, 1, 2])))
        self.assertTrue(np.array_equal(loaded["feature_names"], np.array(["f1", "f2", "f3", "f4"])))
        self.assertEqual("test_encoding", loaded["encoding"])

        shutil.rmtree(path)
    def _create_dummy_encoded_data(self, path):
        n_subjects = 50
        n_features = 30

        kmers = [''.join(random.choices(string.ascii_uppercase, k=3)) for i in range(n_features)]

        encoded_data = {
            'examples': sparse.csr_matrix(
                np.random.normal(50, 10, n_subjects * n_features).reshape((n_subjects, n_features))),
            'example_ids': [''.join(random.choices(string.ascii_uppercase, k=4)) for i in range(n_subjects)],
            'labels': {
            },
            'feature_names': kmers,
            'feature_annotations': pd.DataFrame({
                "sequence": kmers
            }),
            'encoding': "random"
        }

        metadata_filepath = path / "metadata.csv"

        metadata = pd.DataFrame({"patient": np.array([i % 2 == 0 for i in range(n_subjects)])})
        metadata.to_csv(metadata_filepath, index=False)

        dataset = RepertoireDataset(encoded_data=EncodedData(**encoded_data), metadata_file=metadata_filepath)

        return dataset
    def test_generate(self):
        path = EnvironmentSettings.tmp_test_path / "relevant_sequence_exporter/"
        PathBuilder.build(path)

        df = pd.DataFrame({
            "v_genes": ["TRBV1-1", "TRBV1-1"],
            'j_genes': ["TRBJ1-1", "TRBJ1-2"],
            "sequence_aas": ['ACCF', "EEFG"]
        })
        df.to_csv(path / 'sequences.csv', index=False)

        dataset = RandomDatasetGenerator.generate_repertoire_dataset(
            2, {2: 1}, {4: 1}, {}, path / "data")
        dataset.encoded_data = EncodedData(
            examples=None,
            info={'relevant_sequence_path': path / 'sequences.csv'},
            encoding="SequenceAbundanceEncoder")

        report_result = RelevantSequenceExporter(dataset, path / "result",
                                                 'somename').generate_report()

        self.assertEqual(1, len(report_result.output_tables))
        self.assertTrue(os.path.isfile(report_result.output_tables[0].path))

        self.assertTrue(
            all(col in ["v_call", "j_call", "cdr3_aa"] for col in pd.read_csv(
                report_result.output_tables[0].path).columns))

        shutil.rmtree(path)
Example #18
0
    def make_encoded_data(self, path: Path):
        metadata_filepath = path / f"metadata.tsv"

        rep_ids = [f"REP{i}" for i in range(10)]
        status_label = [chr((i % 2) + 65) for i in range(10)
                        ]  # List of alternating strings "A" "B"

        metadata = pd.DataFrame({"ID": rep_ids, "status": status_label})
        metadata.to_csv(sep="\t", index=False, path_or_buf=metadata_filepath)

        for rep_id in rep_ids:
            repertoire_seqs = [self.get_random_sequence() for i in range(100)]

            repertoire_data = pd.DataFrame({
                "amino_acid":
                repertoire_seqs,
                "templates": [rn.choice(range(1, 1000)) for i in range(100)]
            })

            repertoire_data.to_csv(sep="\t",
                                   index=False,
                                   path_or_buf=path / f"{rep_id}.tsv")

        return EncodedData(examples=None,
                           labels={"status": status_label},
                           example_ids=rep_ids,
                           encoding=DeepRCEncoder.__name__,
                           info={
                               "metadata_filepath": metadata_filepath,
                               "max_sequence_length": 30
                           })
Example #19
0
def load_encoded_data(labels_path: str, encoding_details_path: str, design_matrix_path: str) -> EncodedData:
    """
    Utility function for adding ML methods; if one encodes data using immuneML through YAML specification and exports the
    encoded data using DesignMatrixExporter, this function can be used to import the data and return it in the format
    it would be available if the ML method was called from within immuneML

    Args:

        labels_path (str): path to labels file as exported by the DesignMatrixExporter
        encoding_details_path (str): path to the details file, where example_ids, feature_names and the encoding name will be imported from
        design_matrix_path (str): path to csv or npy file where the design matrix is stored

    Returns:

        EncodedData object as it would be provided to an ML method within immuneML

    """
    # read the data from these files
    # TODO: support H5py
    examples = pd.read_csv(design_matrix_path).values if design_matrix_path.endswith(".csv") else np.load(design_matrix_path, allow_pickle=True)
    labels = pd.read_csv(labels_path).to_dict('list')

    with open(encoding_details_path, "r") as file:
        encoding_details = yaml.safe_load(file)

    # create an EncodedData object which can be used as an input argument for the fit or predict functions
    encoded_data = EncodedData(examples=examples, labels=labels,
                               example_ids=encoding_details['example_ids'],
                               feature_names=encoding_details['feature_names'],
                               encoding=encoding_details['encoding'])

    return encoded_data
Example #20
0
    def _prepare_data(self):
        x = np.array([[0., 1., 2., 3.],
                      [1., 0., 1., 2.],
                      [2., 1., 0., 1.],
                      [3., 2., 1., 0.]])
        y = {"test": np.array([0, 0, 1, 1])}

        return x, y, EncodedData(examples=x, labels=y)
Example #21
0
    def _create_dummy_lr_model(self):
        dummy_lr = LogisticRegression()
        encoded_tr = EncodedData(np.random.rand(100, 20),
                                 {"l1": [i % 2 for i in range(0, 100)]})

        dummy_lr.fit_by_cross_validation(encoded_tr, number_of_splits=2,
                                         label=Label("l1", values=[0, 1]))
        return dummy_lr, encoded_tr
Example #22
0
 def _make_encoded_data(self, encoded_data, indices):
     examples = np.swapaxes(encoded_data.examples, 2, 3)
     return EncodedData(examples=torch.from_numpy(examples[indices]).float(),
                        labels={
                            label: torch.from_numpy(np.array([encoded_data.labels[label][i] for i in indices]) == self.class_mapping[1]).float()
                            for label in encoded_data.labels.keys()},
                        example_ids=[encoded_data.example_ids[i] for i in indices], feature_names=encoded_data.feature_names,
                        feature_annotations=encoded_data.feature_annotations, encoding=encoded_data.encoding)
    def test_run(self):
        path = EnvironmentSettings.root_path / "test/tmp/mlmethodassessment/"
        PathBuilder.build(path)
        dataset = RepertoireDataset(repertoires=RepertoireBuilder.build(
            [["AA"], ["CC"], ["AA"], ["CC"], ["AA"], ["CC"], ["AA"], ["CC"],
             ["AA"], ["CC"], ["AA"], ["CC"]], path)[0])
        dataset.encoded_data = EncodedData(
            examples=np.array([[1, 1], [1, 1], [3, 3], [1, 1], [1, 1], [3, 3],
                               [1, 1], [1, 1], [3, 3], [1, 1], [1, 1], [3,
                                                                        3]]),
            labels={
                "l1": [1, 1, 3, 1, 1, 3, 1, 1, 3, 1, 1, 3],
                "l2": [1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3]
            })

        label_config = LabelConfiguration()
        label_config.add_label("l1", [1, 3])

        label = Label(name='l1', values=[1, 2])

        method1 = LogisticRegression()
        method1.fit(dataset.encoded_data, label=label)

        res = MLMethodAssessment.run(
            MLMethodAssessmentParams(
                dataset=dataset,
                method=method1,
                metrics={
                    Metric.ACCURACY, Metric.BALANCED_ACCURACY, Metric.F1_MACRO
                },
                optimization_metric=Metric.LOG_LOSS,
                predictions_path=EnvironmentSettings.root_path /
                "test/tmp/mlmethodassessment/predictions.csv",
                label=label,
                ml_score_path=EnvironmentSettings.root_path /
                "test/tmp/mlmethodassessment/ml_score.csv",
                split_index=1,
                path=EnvironmentSettings.root_path /
                "test/tmp/mlmethodassessment/"))

        self.assertTrue(isinstance(res, dict))
        self.assertTrue(res[Metric.LOG_LOSS.name.lower()] <= 0.1)

        self.assertTrue(
            os.path.isfile(EnvironmentSettings.root_path /
                           "test/tmp/mlmethodassessment/ml_score.csv"))

        df = pd.read_csv(EnvironmentSettings.root_path /
                         "test/tmp/mlmethodassessment/ml_score.csv")
        self.assertTrue(df.shape[0] == 1)

        df = pd.read_csv(EnvironmentSettings.root_path /
                         "test/tmp/mlmethodassessment/predictions.csv")
        self.assertEqual(12, df.shape[0])

        shutil.rmtree(EnvironmentSettings.root_path /
                      "test/tmp/mlmethodassessment/")
Example #24
0
    def train_classifier(self):
        classifier = ProbabilisticBinaryClassifier(100, 0.1)

        X = np.array([[3, 4], [1, 7], [5, 7], [3, 8]])
        y = {"cmv": [True, False, True, False]}

        classifier.fit(EncodedData(X, y), "cmv")

        return classifier
Example #25
0
    def test_fit_by_cross_validation(self):
        x = EncodedData(np.array([[1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1],
                                  [1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1]]),
                        labels={
                            "test1": [1, 0, 2, 0, 1, 0, 2, 0],
                            "test2": [1, 0, 2, 0, 1, 0, 2, 0]
                        })

        knn = KNN(parameters={"n_neighbors": 2})
        knn.fit_by_cross_validation(x, number_of_splits=2, label_name="test1")
Example #26
0
    def test_fit_by_cross_validation(self):
        x = EncodedData(
            np.array([[1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1], [1, 0, 0],
                      [0, 1, 1], [1, 1, 1], [0, 1, 1]]), {
                          "t1": [1, 0, 2, 0, 1, 0, 2, 0],
                          "t2": [1, 0, 2, 0, 1, 0, 2, 0]
                      })

        svm = SVC(parameter_grid={"penalty": ["l1"], "dual": [False]})
        svm.fit_by_cross_validation(x, number_of_splits=2, label_name="t1")
Example #27
0
    def test_fit_by_cross_validation(self):
        x = EncodedData(
            np.array([[1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1], [1, 0, 0],
                      [0, 1, 1], [1, 1, 1], [0, 1, 1]]), {
                          "test1": [1, 0, 2, 0, 1, 0, 2, 0],
                          "test2": [1, 0, 2, 0, 1, 0, 2, 0]
                      })

        lr = LogisticRegression()
        lr.fit_by_cross_validation(x, number_of_splits=2, label=Label("test2"))
Example #28
0
    def test_fit_by_cross_validation(self):
        x = EncodedData(
            np.array([[1, 0, 0], [0, 1, 1], [1, 1, 1], [0, 1, 1], [1, 0, 0],
                      [0, 1, 1], [1, 1, 1], [0, 1, 1]]), {
                          "t1": [1, 0, 2, 0, 1, 0, 2, 0],
                          "t2": [1, 0, 2, 0, 1, 0, 2, 0]
                      })

        svm = SVM()
        svm.fit_by_cross_validation(x, number_of_splits=2, label_name="t1")
Example #29
0
    def test_fit(self):

        classifier = self.train_classifier()

        predictions = classifier.predict(
            EncodedData(np.array([[6, 7], [1, 6]])), "cmv")
        proba_predictions = classifier.predict_proba(
            EncodedData(np.array([[6, 7], [1, 6]])), "cmv")

        labels = classifier.get_classes_for_label("cmv")

        self.assertEqual([True, False], predictions["cmv"])
        self.assertTrue(
            proba_predictions["cmv"][0, 1] > proba_predictions["cmv"][0, 0])
        self.assertTrue(
            proba_predictions["cmv"][1, 0] > proba_predictions["cmv"][1, 1])
        self.assertTrue((proba_predictions["cmv"] <= 1.0).all()
                        and (proba_predictions["cmv"] >= 0.0).all())
        self.assertTrue(isinstance(labels, np.ndarray))
Example #30
0
    def encode(self, dataset, params: EncoderParams) -> RepertoireDataset:

        train_repertoire_ids = EncoderHelper.prepare_training_ids(dataset, params)
        distance_matrix = self.build_distance_matrix(dataset, params, train_repertoire_ids)
        labels = self.build_labels(dataset, params) if params.encode_labels else None

        encoded_dataset = dataset.clone()
        encoded_dataset.encoded_data = EncodedData(examples=distance_matrix, labels=labels, example_ids=distance_matrix.index.values,
                                                   encoding=DistanceEncoder.__name__)

        return encoded_dataset