Exemple #1
0
def createDataSet(trainData, trainDatabase, useOtherLabel=True):
    for record in trainData:
        abstractSentences = SENTENCE_SPLITTER.tokenize(
            record[ABSTRACT_DOCUMENT])
        abstractSentenceIndicesToDelete = list()
        if PURPOSE_DOCUMENT in record:
            purposes = [record[PURPOSE_DOCUMENT]]
            for purpose in purposes:
                sentenceId = purpose[0]
                abstractSentenceIndicesToDelete.append(sentenceId)
                sentence = purpose[1]
                sentenceSplitted = removeCommonWordsAndTokenize(sentence)
                trainDatabase.save({
                    "token": [Label.PURPOSE.value],
                    "sentence": sentenceSplitted
                })

        populateRecords(abstractSentenceIndicesToDelete,
                        record,
                        trainDatabase,
                        recordType=TASKS_DOCUMENT,
                        label=Label.TASKS)
        populateRecords(abstractSentenceIndicesToDelete,
                        record,
                        trainDatabase,
                        recordType=CONCLUSIONS_DOCUMENT,
                        label=Label.CONCLUSIONS)
        populateRecords(abstractSentenceIndicesToDelete,
                        record,
                        trainDatabase,
                        recordType=RESULTS_DOCUMENT,
                        label=Label.RESULTS)

        if useOtherLabel:
            abstractSentenceIndicesToDelete.sort(reverse=True)
            for indice in abstractSentenceIndicesToDelete:
                del abstractSentences[indice]
            try:
                _, abstractSentence = choice(list(
                    enumerate(abstractSentences)))
                print(abstractSentence)
                trainDatabase.save({
                    "token": [Label.OTHER.value],
                    "sentence":
                    removeCommonWordsAndTokenize(abstractSentence)
                })
            except:
                print("Empty list")
Exemple #2
0
    def manual_test(
        self,
        sentence="Mērķis Izpētīt dažādas metodes teksta temata klasifikācijai, implementēt tās tekstiem latviešu valodā un salīdzināt tās"
    ):
        """
        Some useful methods for model valuation
        :param sentence:
        :return:
        """

        testSentence = removeCommonWordsAndTokenize(sentence)

        print(testSentence)

        testSentenceVector = self.model.infer_vector(testSentence)
        # print(testSentenceVector)

        # print("other score: {}".format(1 - spatial.distance.cosine(testSentenceVector, model.docvecs["other"])))
        # print("purpose score: {}".format(1 - spatial.distance.cosine(testSentenceVector, model.docvecs["purpose"])))

        y_pred = self.logreg.predict([testSentenceVector])
        print("y_pred")
        print(y_pred)

        print("labels")
        print(self.logreg.classes_)

        print(numpy.round((self.logreg.predict_proba([testSentenceVector])),
                          3))

        print(self.model.docvecs.most_similar(positive=[testSentenceVector]))
        print(
            self.model.docvecs.most_similar(positive=[testSentenceVector],
                                            topn=1)[0][0])
Exemple #3
0
    def test_english_sentence_splitter(self):
        sentence = "9 šis teikums saSTāv no burtiem a b c 33333333 "
        words = removeCommonWordsAndTokenize(sentence)

        for a in words:
            print(a)

        correctWords = ['SKAITLIS', 'šis', 'teikums', 'sastāv', 'burtiem', 'SKAITLIS']
        self.assertTrue(words == correctWords)
Exemple #4
0
def populateRecords(abstractSentenceIndicesToDelete, record, trainDatabase,
                    recordType, label):
    if recordType in record:
        tasks = record[recordType]
        for task in tasks:
            sentenceId = task[0]
            abstractSentenceIndicesToDelete.append(sentenceId)
            sentence = task[1]
            sentenceSplitted = removeCommonWordsAndTokenize(sentence)
            trainDatabase.save({
                "token": [label.value],
                "sentence": sentenceSplitted
            })
def predict(x):
    # old style
    # testSentenceVector = model.infer_vector(tokenize(x))
    # probabilities = logreg.predict_proba([testSentenceVector])

    print(x)
    testSentenceVector = model.calculateSentenceVector(
        removeCommonWordsAndTokenize(x))
    probabilities = model.logreg.predict_proba([testSentenceVector])

    # doc2VecModel.predictClass()
    print(x)
    print(probabilities)
    return np.squeeze(np.asarray(numpy.round(probabilities, 3)))
Exemple #6
0
def populatePredictionMatrix(abstractSentences):
    data = list()
    for idx, sentence in enumerate(abstractSentences):
        sentenceWords = removeCommonWordsAndTokenize(sentence)
        print(sentenceWords)
        if len(sentenceWords) == 1 and sentenceWords[0] == "SKAITLIS":
            data.append(np.zeros(len(model.logreg.classes_)))
            continue
        try:
            testSentenceVector = model.calculateSentenceVector(sentenceWords)
        except ValueError:
            data.append(np.zeros(len(model.logreg.classes_)))
            continue
        proba = model.logreg.predict_proba([testSentenceVector])
        data.append(proba[0])
    matrix = np.array(data)
    return matrix
Exemple #7
0
 def predictClass(self, sentence):
     testSentence = removeCommonWordsAndTokenize(sentence)
     testSentenceVector = self.calculateSentenceVector(testSentence)
     y_pred = self.logreg.predict([testSentenceVector])
     return y_pred[0]