def test_knn(): stream = SEAGenerator(random_state=1) learner = KNNClassifier(n_neighbors=8, max_window_size=2000, leaf_size=40) cnt = 0 max_samples = 5000 predictions = array('i') correct_predictions = 0 wait_samples = 100 X_batch = [] y_batch = [] while cnt < max_samples: X, y = stream.next_sample() X_batch.append(X[0]) y_batch.append(y[0]) # Test every n samples if (cnt % wait_samples == 0) and (cnt != 0): predictions.append(learner.predict(X)[0]) if y[0] == predictions[-1]: correct_predictions += 1 learner.partial_fit(X, y) cnt += 1 expected_predictions = array('i', [ 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1 ]) assert np.alltrue(predictions == expected_predictions) expected_correct_predictions = 49 assert correct_predictions == expected_correct_predictions expected_info = "KNNClassifier(leaf_size=40, max_window_size=2000, " \ "metric='euclidean', n_neighbors=8)" info = " ".join([line.strip() for line in learner.get_info().split()]) assert info == expected_info learner.reset() info = " ".join([line.strip() for line in learner.get_info().split()]) assert info == expected_info X_batch = np.array(X_batch) y_batch = np.array(y_batch) learner.fit(X_batch[:4500], y_batch[:4500], classes=[0, 1]) predictions = learner.predict(X_batch[4501:4550]) expected_predictions = array('i', [ 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0 ]) assert np.alltrue(predictions == expected_predictions) correct_predictions = sum(predictions == y_batch[4501:4550]) expected_correct_predictions = 49 assert correct_predictions == expected_correct_predictions assert type(learner.predict(X)) == np.ndarray assert type(learner.predict_proba(X)) == np.ndarray
class LabelPredict: def __init__(self, texts: list): self.tokenizer = TfidfVectorizer() self.tokenizer.fit(texts) self.labels_sent = {"POSITIVE": np.array([1, 0, 0]), "NEUTRAL": np.array([0, 1, 0]), "NEGATIVE": np.array([0, 0, 1])} self.labels_sent = {"POSITIVE": 0, "NEUTRAL": 1, "NEGATIVE": 2} self.reverse_sent = {0: {"POSITIVE": True, "NEUTRAL": False, "NEGATIVE": False}, 1: {"POSITIVE": False, "NEUTRAL": True, "NEGATIVE": False}, 2: {"POSITIVE": False, "NEUTRAL": False, "NEGATIVE": True}} self.labels_relevance = ["Irrelevant"] self.labels = [] self.lcc = ClassifierChain(SGDClassifier(max_iter=100, loss='log', random_state=1)) self.clrel = KNNClassifier() self.clsent = KNNClassifier() def _labels2array(self, labeldict: dict): target = [] for label in self.labels: if label in labeldict and labeldict[label] == True: target.append(1) else: target.append(0) return np.array(target) def retrain(self, labeled_tweets: list): labels = set() for tweet in labeled_tweets: if "labels" in tweet and len(tweet["labels"]) > 0: labels.update([l for l in tweet["labels"] if not (l in self.labels_sent or l in self.labels_relevance)]) self.labels = list(labels) assert "Irrelevant" not in self.labels, "Something went wrong" self.lcc = ClassifierChain(SGDClassifier(max_iter=100, loss='log', random_state=1)) self.clrel = KNNClassifier() self.clsent = KNNClassifier() X, y, ys, yr = [], [], [], [] for tweet in labeled_tweets: if "labels" in tweet and len(tweet["labels"]) > 0: X.append(tweet["tweet"]) y.append(self._labels2array(tweet["labels"])) sls = [l for l, v in tweet["labels"].items() if l in self.labels_sent and v] if len(sls) == 1: ys.append(self.labels_sent[sls[0]]) else: ys.append(self.labels_sent["NEUTRAL"]) if self.labels_relevance[0] in tweet["labels"] and tweet["labels"][self.labels_relevance[0]]: yr.append(1) else: yr.append(0) X = np.array(self.tokenizer.transform(X).todense()) y = np.array(y) ys = np.array(ys) yr = np.array(yr) self.clsent.fit(X, ys) print("Trained Sentiment Classifier") self.clrel.fit(X, yr) print("Trained Relevance Classifier") X2, y2 = [], [] for Xe, ye in zip(X, y): if ye.sum() > 0: X2.append(Xe) y2.append(ye) X = np.array(X2) y = np.array(y2) self.lcc.fit(X, y) print("Trained Catecorical Classifier") def predict(self, text: str): X = np.array(self.tokenizer.transform([text]).todense()).reshape((1, -1)) predicted = self.lcc.predict(X) labels_add = {label: bool(value) for label, value in zip(self.labels, predicted.flatten())} sent_pred = self.clsent.predict(X) labels_add.update(self.reverse_sent[sent_pred.flatten()[0]]) assert "POSITIVE" in labels_add, "Klassifikation nicht eindeutig" if self.clrel.predict(X) == np.array([1]): labels_add[self.labels_relevance[0]] = True else: labels_add[self.labels_relevance[0]] = False return labels_add def train_item(self, tweet): text = tweet["tweet"] labeldict = tweet["labels"] for l in labeldict: if l not in self.labels and l not in self.labels_relevance and l not in self.labels_sent: print("RETRAIN!") return False y = self._labels2array(labeldict).reshape((1, -1)) X = np.array(self.tokenizer.transform([text]).todense()).reshape((1, -1)) sls = [l for l, v in labeldict.items() if l in self.labels_sent and v] if len(sls) == 1: ys = self.labels_sent[sls[0]] else: ys = self.labels_sent["NEUTRAL"] ys = np.array([ys]) if self.labels_relevance[0] in labeldict and labeldict[self.labels_relevance[0]]: yr = np.array([1]) else: yr = np.array([0]) if y.sum() > 0: self.lcc.partial_fit(X, y) if yr.sum() > 0: self.clrel.partial_fit(X, yr) if ys.sum() > 0: self.clsent.partial_fit(X, ys) return True