Esempio n. 1
0
 def __init__(self, k=1):
     # phrase detector
     self.phrase_detector = PmiPhraseDetector(RawSentenceStream())
     # number converter
     self.tokenizer = RawTokenizer()
     # build model
     self.model = W2Vmodel(PhraseSentenceStream(self.phrase_detector))
     # parameters
     self.p = 0.8
     self.k = k
Esempio n. 2
0
 def __init__(self, alpha=6.0):
     # phrase detector
     self.phrase_detector = PmiPhraseDetector(RawSentenceStream())
     # number converter
     self.tokenizer = RawTokenizer()
     # build model
     self.w2v = W2Vmodel(PhraseSentenceStream(self.phrase_detector))
     self.tfidf = TFIDFmodel()
     # parameters
     self.alpha = alpha
     self.k = 3
     self.p = 0.80
Esempio n. 3
0
 def __init__(self, size=50, modelfile=None):
     self.phrase_detector = PmiPhraseDetector(
         RawSentenceStream(fz_docs=False))
     # build model
     epochs = 2
     self.model = D2Vmodel(
         PhraseSentenceStream(self.phrase_detector,
                              extract_func=extract_docid,
                              fz_docs=True,
                              reshuffles=epochs - 1),
         name="DOCID",
         dataset_name="CASEREPORT",
         epochs=epochs,
         dimension=size,
         modelfile=modelfile,
     )
     self.doc_index = DocIndex(CaseReportLibrary(), "CASEREPORT")
import logging
from irmodels.W2Vmodel import W2Vmodel
from textanalysis.texts import PhraseSentenceStream, RawSentenceStream
from textanalysis.phrasedetection import PmiPhraseDetector
import numpy as np
from random import sample
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA

# setup logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
                    level=logging.INFO)
# phrase detector
phrase_detector = PmiPhraseDetector(RawSentenceStream())
# build model
m = W2Vmodel(PhraseSentenceStream(phrase_detector))

diseases = {}
symptoms = {}

with open("testdiseases.txt", 'r') as infile:
    for line in infile.read().split("\n")[:-1]:
        parts = line.split(",")
        diseases[parts[1]] = int(parts[0])

with open("testsymptoms.txt", 'r') as infile:
    for line in infile.read().split("\n")[:-1]:
        symptoms[line] = 1

# disease data
keywords = diseases.keys()
Esempio n. 5
0
__author__ = 'matias'

from irmodels.D2Vmodel import D2Vmodel
from textanalysis.texts import PhraseSentenceStream, RawSentenceStream, ExtractDiseases
from textanalysis.phrasedetection import PmiPhraseDetector
from scipy.spatial.distance import cosine
from heapq import heappush, heappop
import numpy as np
import logging

# setup logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
                    level=logging.INFO)
# phrase detector
phrase_detector = PmiPhraseDetector(RawSentenceStream(fz_docs=False))

extract_disease = ExtractDiseases()

# build model
epochs = 3
m = D2Vmodel(PhraseSentenceStream(phrase_detector,
                                  extract_func=extract_disease,
                                  fz_docs=False,
                                  reshuffles=epochs - 1),
             name="DISEASE",
             dataset_name="CASEREPORT",
             epochs=epochs)

vec_lupus = m.inner_model["man"]
print np.all(np.isnan(vec_lupus))
Esempio n. 6
0
from irmodels.D2Vmodel import D2Vmodel, DocIndex
from textanalysis.texts import PhraseSentenceStream, RawSentenceStream, extract_docid, extract_mesh_terms
from textanalysis.phrasedetection import PmiPhraseDetector
import logging
from scipy.spatial.distance import cosine
from textanalysis.texts import FZArticleLibrary, CaseReportLibrary
from heapq import heappush, heappop
import numpy as np

# setup logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
                    level=logging.INFO)
# phrase detector
pmi_level = 90
phrase_detector = PmiPhraseDetector(RawSentenceStream(fz_docs=False),
                                    filename=str("PHRASE_%s_2_CASEREPORT_RAW" %
                                                 (pmi_level, )))

# build model
epochs = 2
m = D2Vmodel(PhraseSentenceStream(phrase_detector,
                                  extract_func=extract_docid,
                                  fz_docs=False,
                                  reshuffles=epochs - 1),
             name="DOCID",
             dataset_name="CASEREPORT",
             epochs=epochs,
             dimension=40)

doc_index = DocIndex(CaseReportLibrary(), "CASEREPORT")
"""