def load_parameters(self):
        self.amount_prediction_method = self.load_data_from_file(data_type=self.SAVE_TYPE_MODEL,
                                                                 file_name='amount_method')
        self.trend_prediction_method = self.load_data_from_file(data_type=self.SAVE_TYPE_MODEL,
                                                                file_name='trend_method')
        self.data_features = self.load_data_from_file(data_type=self.SAVE_TYPE_MODEL, file_name='features')
        self.stock_symbol = self.load_data_from_file(data_type=self.SAVE_TYPE_MODEL, file_name='symbol')
        self.data_parser = self.load_data_from_file(data_type=self.SAVE_TYPE_MODEL, file_name='data_parser')
        amount_model_path = os.path.join(os.path.abspath(self.model_path), 'amount_model')
        trend_model_path = os.path.join(os.path.abspath(self.model_path), 'trend_model')

        if self.amount_prediction_method == self.RANDOM_FOREST:
            amount_model = RandomForestModel.load(sc=self.sc, path=amount_model_path)
        elif self.amount_prediction_method == self.LINEAR_REGRESSION:
            amount_model = LinearRegressionModel.load(sc=self.sc, path=amount_model_path)
        else:
            amount_model = self.load_data_from_file(data_type=self.SAVE_TYPE_MODEL, file_name='amount_model')

        if self.trend_prediction_method == self.RANDOM_FOREST:
            trend_model = RandomForestModel.load(sc=self.sc, path=trend_model_path)
        elif self.trend_prediction_method == self.LOGISTIC_REGRESSION:
            trend_model = LogisticRegressionModel.load(sc=self.sc, path=trend_model_path)
        elif self.trend_prediction_method == self.NAIVE_BAYES:
            trend_model = NaiveBayesModel.load(sc=self.sc, path=trend_model_path)
        elif self.trend_prediction_method == self.SVM:
            trend_model = SVMModel.load(sc=self.sc, path=trend_model_path)
        else:
            trend_model = self.load_data_from_file(data_type=self.SAVE_TYPE_MODEL, file_name='trend_model')

        return trend_model, amount_model
Esempio n. 2
0

def parseLine(line):
    parts = line.split(',')
    label = float(parts[0])
    features = Vectors.dense([float(x) for x in parts[1].split(' ')])
    return LabeledPoint(label, features)
# $example off$

if __name__ == "__main__":

    sc = SparkContext(appName="Jay")

    # $example on$
    data = sc.textFile('data/mllib/naive_bayes_data.txt').map(parseLine)

    # Split data aproximately into training (60%) and test (40%)
    training, test = data.randomSplit([0.6, 0.4], seed=0)

    # Train a naive Bayes model.
    model = NaiveBayes.train(training, 1.0)

    # Make prediction and test accuracy.
    predictionAndLabel = test.map(lambda p: (model.predict(p.features), p.label))
    accuracy = 1.0 * predictionAndLabel.filter(lambda (x, v): x == v).count() / test.count()

    # Save and load model
    model.save(sc, "jay/myNaiveBayesModel")
    sameModel = NaiveBayesModel.load(sc, "jay/myNaiveBayesModel")
    # $example off$
Esempio n. 3
0
    for word in lowercased:
        punct_removed = ''.join([letter for letter in word if not letter in PUNCTUATION])
        no_punctuation.append(punct_removed)
    no_stopwords = [w for w in no_punctuation if not w in STOPWORDS]
    stemmed = [STEMMER.stem(w) for w in no_stopwords]
    return [w for w in stemmed if w]


def parseLine(line):
    parts = line.split(',')
    label = float(parts[0])
    features = Vectors.dense([float(x) for x in parts[1].split(' ')])
    return LabeledPoint(label, features)

data = sc.textFile('C:\Users\SigurdLap\PycharmProjects\sparkTwitter\naiveBayes.txt').map(parseLine)

# Split data approximately into training (60%) and test (40%)
training, test = data.randomSplit([0.6, 0.4], seed=0)
# Prøve split i 5 deler, cross validation

# Train a naive Bayes model.
model = NaiveBayes.train(training, 1.0)

# Make prediction and test accuracy.
predictionAndLabel = test.map(lambda p: (model.predict(p.features), p.label))
accuracy = 1.0 * predictionAndLabel.filter(lambda (x, v): x == v).count() / test.count()

# Save and load model
model.save(sc, "target/tmp/myNaiveBayesModel")
sameModel = NaiveBayesModel.load(sc, "target/tmp/myNaiveBayesModel")
    no_stopwords = [w for w in no_punctuation if not w in STOPWORDS]
    stemmed = [STEMMER.stem(w) for w in no_stopwords]
    result = [w for w in stemmed if w]
    if not result:
        return [""]
    return result

folderpath='hdfs://ec2-54-213-170-202.us-west-2.compute.amazonaws.com:9000/user/root/crawled_data'

sc = SparkContext()
data_raw = sc.wholeTextFiles(folderpath)
data_cleaned = data_raw.map(lambda (filename, text): (filename, tokenize(text)))
htf = HashingTF(50000)
data_hashed = data_cleaned.map(lambda (filename, text): (filename, htf.transform(text)))
data_hashed.persist()
sameModel = NaiveBayesModel.load(sc, 'hdfs://ec2-54-213-170-202.us-west-2.compute.amazonaws.com:9000/user/root/bbcmodel')
predictedLabel = data_hashed.map(lambda (filename, text): (filename.split("/")[-1][:-4],sameModel.predict(text)))
preds = predictedLabel.collect()

conn = psycopg2.connect(database="NewsSource", user="******", password="******", host="newdb.cnceaogjppz8.us-west-2.rds.amazonaws.com", port="5432")
cur = conn.cursor()
update = 'update articlestable set classifiedcategory=%s where id=%s'

newscategory = {hash('entertainment'):'entertainment',hash('sports'):'sports',hash('politics'):'politics',hash('technology'):'technology',hash('business'):'business'}

for pred in preds:       
      
    if(hash('entertainment')== pred[1]):
     category = 'entertainment'
    elif(hash('sports')== pred[1]):
     category = 'sports'
Esempio n. 5
0
# $example off$

if __name__ == "__main__":

    sc = SparkContext(appName="PythonNaiveBayesExample")

    # $example on$
    data = sc.textFile("data/mllib/sample_naive_bayes_data.txt").map(parseLine)

    # Split data approximately into training (60%) and test (40%)
    training, test = data.randomSplit([0.6, 0.4], seed=0)

    # Train a naive Bayes model.
    model = NaiveBayes.train(training, 1.0)

    # Make prediction and test accuracy.
    predictionAndLabel = test.map(lambda p: (model.predict(p.features), p.label))
    accuracy = 1.0 * predictionAndLabel.filter(lambda (x, v): x == v).count() / test.count()
    print("model accuracy {}".format(accuracy))

    # Save and load model
    output_dir = "target/tmp/myNaiveBayesModel"
    shutil.rmtree(output_dir, ignore_errors=True)
    model.save(sc, output_dir)
    sameModel = NaiveBayesModel.load(sc, output_dir)
    predictionAndLabel = test.map(lambda p: (sameModel.predict(p.features), p.label))
    accuracy = 1.0 * predictionAndLabel.filter(lambda (x, v): x == v).count() / test.count()
    print("sameModel accuracy {}".format(accuracy))

    # $example off$
    label = float(parts[0])
    features = Vectors.dense([float(x) for x in parts[1].split(' ')])
    return LabeledPoint(label, features)


# $example off$

if __name__ == "__main__":

    sc = SparkContext(appName="PythonNaiveBayesExample")

    # $example on$
    data = sc.textFile('data/mllib/sample_naive_bayes_data.txt').map(parseLine)

    # Split data aproximately into training (60%) and test (40%)
    training, test = data.randomSplit([0.6, 0.4], seed=0)

    # Train a naive Bayes model.
    model = NaiveBayes.train(training, 1.0)

    # Make prediction and test accuracy.
    predictionAndLabel = test.map(lambda p:
                                  (model.predict(p.features), p.label))
    accuracy = 1.0 * predictionAndLabel.filter(
        lambda (x, v): x == v).count() / test.count()

    # Save and load model
    model.save(sc, "target/tmp/myNaiveBayesModel")
    sameModel = NaiveBayesModel.load(sc, "target/tmp/myNaiveBayesModel")
    # $example off$
def main():
    sc = SparkContext(appName="BayesClassifer")
    htf = HashingTF(50000)
    data = sc.textFile(
        '/home/varshav/work/PycharmProjects/Sentiment/cleaned_bayes_labels1.csv'
    )
    data_cleaned = data.map(lambda line: line.split(","))
    # Create an RDD of LabeledPoints using category labels as labels and tokenized, hashed text as feature vectors
    data_hashed = data_cleaned.map(
        lambda (label, text): LabeledPoint(label, htf.transform(text)))
    data_hashed.persist()
    # data = sc.textFile('/home/admin/work/spark-1.4.1-bin-hadoop2.4/data/mllib/sample_naive_bayes_data.txt').map(parseLine)
    #print data
    # Split data aproximately into training (60%) and test (40%)
    training, test = data_hashed.randomSplit([0.70, 0.30], seed=0)

    sameModel = NaiveBayesModel.load(
        sc, "/home/varshav/work/PycharmProjects/StockAnalysis/myModel")

    print "----------"
    print sameModel.predict(htf.transform("posts jump in net profit"))

    predictionAndLabel = test.map(lambda p:
                                  (sameModel.predict(p.features), p.label))
    predictionAndLabel1 = training.map(
        lambda p: (sameModel.predict(p.features), p.label))
    prediction = 1.0 * predictionAndLabel.filter(
        lambda (x, v): x == v).count() / test.count()
    prediction1 = 1.0 * predictionAndLabel1.filter(
        lambda (x, v): x == v).count() / training.count()

    buy_buy = 1.0 * predictionAndLabel.filter(
        lambda (x, v): x == 1 and v == 1).count()
    sell_sell = 1.0 * predictionAndLabel.filter(
        lambda (x, v): x == 2 and v == 2).count()
    hold_hold = 1.0 * predictionAndLabel.filter(
        lambda (x, v): x == 3 and v == 3).count()

    print buy_buy
    print sell_sell
    print hold_hold

    # Statistics by class
    labels = data_hashed.map(lambda lp: lp.label).distinct().collect()
    print labels
    print type(labels[0])
    '''
    for label in sorted(labels):
        print("Class %s precision = %s" % (label, metrics.precision(label)))
        print("Class %s recall = %s" % (label, metrics.recall(label)))
        print("Class %s F1 Measure = %s" % (label, metrics.fMeasure(label, beta=1.0)))
    '''
    '''
    print("Class %s precision = %s" % (1, metrics.precision(1)))
    print("Class %s recall = %s" % (1, metrics.recall(1)))
    print("Class %s F1 Measure = %s" % (1, metrics.fMeasure()))

    print("Class %s precision = %s" % (2, metrics.precision(2)))
    print("Class %s recall = %s" % (2, metrics.recall(2)))
    print("Class %s F1 Measure = %s" % (2, metrics.fMeasure(2)))
    '''
    # Weighted stats
    '''
    print("Weighted recall = %s" % metrics.weightedRecall)
    print("Weighted precision = %s" % metrics.weightedPrecision)
    print("Weighted F(1) Score = %s" % metrics.weightedFMeasure())
    print("Weighted F(0.5) Score = %s" % metrics.weightedFMeasure(beta=0.5))
    print("Weighted false positive rate = %s" % metrics.weightedFalsePositiveRate)
    '''

    sc.stop()
Esempio n. 8
0
    sorted_dict = sorted(dictionary_RDD_IDFs_Weights.items(),
                         key=operator.itemgetter(1))

    # Set to max of N words for corresponding number of features for which the model is trained
    Dictionary = []
    for key, value in sorted_dict:
        Dictionary.append(key)

    print len(Dictionary)

    # Create a broadcast variable for the Dictionary
    Dictionary_BV = sc.broadcast(sorted(Dictionary))

    # Load Naive Bayes Model
    model_path = "/Users/path/to/twitter_analytics/NB_model"
    sameModel = NaiveBayesModel.load(sc, model_path)

    # Start intro Video -  make sure to first run "chmod a+x play.sh" otherwise --> permission denied exception
    video = "Users:path:to:vids:intro.mp4"
    video_1 = subprocess.Popen("osascript runner.scpt " + "'" + video + "'",
                               shell=True)

    # Get user twitter-handle
    x = int(
        input(
            "Do you have a twitter account? \n(1) Yes \n(2) No \nYour choice: "
        ))
    if x == 1:
        user_handle = raw_input("Please provide user twitter handle: ")
        friends = get_friends(user_handle, api)
    from pyspark import SparkContext
    from pyspark.mllib.feature import HashingTF
    from pyspark.mllib.regression import LabeledPoint
    from pyspark.mllib.classification import NaiveBayes
    from pyspark import SparkConf
    from pyspark.mllib.classification import NaiveBayes, NaiveBayesModel
    from pyspark.mllib.linalg import Vectors

    print("Successfully imported Spark Modules")

except ImportError as e:
    print("Can not import Spark Modules", e)
    sys.exit(1)

sc = SparkContext(appName="Test")
sameModel = NaiveBayesModel.load(
    sc, "/home/sparkCluster/work/PycharmProjects/StockAnalysis/myModel")
htf = HashingTF(50000)

pg_no = 1
company_name = []
company_list = []
companies = []
codes_list = []
comp = []
experts = []
stopwords = []
exclude = []


def getCompanyName():
    global comp
Esempio n. 10
0
    #Tokenize the text

    tokenhtml = tokenize(a)
    print(tokenhtml)

    # Put the text into a list of dict [{"text": body}]

    for i in range(0,len(tokenhtml)):
        body = ''
        body += tokenhtml[i]+' '
    html_dict.append({"text":body})

    # Load the data into spark

    sc = SparkContext()
    htmldata = sc.parallelize(html_dict)

    # Calculate TF-IDF

    tf = HashingTF().transform(htmldata.map(lambda doc: doc["text"], preservesPartitioning=True))
    idf = IDF().fit(tf)
    tfidf = idf.transform(tf)

    # Load the Naive Bayes model and predict

    sameModel = NaiveBayesModel.load(sc, "/Users/apple/Dropbox/2016Spring/COSC526/MacHW1/mymodel")
    predictionAndLabel = tfidf.map(lambda p: (sameModel.predict(p)))

    # Check the result

    print(format(predictionAndLabel.take(1)))
	def getModel(self, path):
		if self.type == 'NaiveBayes':
			return NaiveBayesModel.load(self.sc, path)
		elif self.type == 'DecisionTree':
			return DecisionTreeModel.load(self.sc, path)
Esempio n. 12
0
# **************************************************************************************************************
# 옷 카테고리 예측
# **************************************************************************************************************

from pyspark.mllib.feature import HashingTF, IDF
from pyspark.mllib.classification import NaiveBayes, NaiveBayesModel
from pyspark.mllib.linalg import Vectors

pdf = pd.read_csv('file:///home/ec2-user/data/parseData.csv',encoding='utf-8')
df = sqlContext.createDataFrame(pdf)

#df.show()

htf = HashingTF(10000)
categoryModel = NaiveBayesModel.load(sc, "target/tmp/parseModel")

# **************************************************************************************************************
# 분류
# **************************************************************************************************************

# labelDf.show()

def getGenderLabelCode(rdd, label):
    GenderRdd = rdd.map(lambda row: row.gender).distinct()

    def getGenderCode(rdd):
        dic = {'etc': 'e'}
        for feature in rdd.collect():
            uniToStr = str(feature)
            dic[uniToStr] = uniToStr[0]
Esempio n. 13
0
from pyspark.mllib.classification import NaiveBayes, NaiveBayesModel
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.regression import LabeledPoint


def parseLine(line):
    parts = line.split(',')
    label = float(parts[0])
    features = Vectors.dense([float(x) for x in parts[1].split(' ')])
    return LabeledPoint(label, features)


data = sc.textFile('data/mllib/sample_naive_bayes_data.txt').map(parseLine)

# Split data aproximately into training (60%) and test (40%)
training, test = data.randomSplit([0.6, 0.4], seed=0)

# Train a naive Bayes model.
model = NaiveBayes.train(training, 1.0)

# Make prediction and test accuracy.
predictionAndLabel = test.map(lambda p: (model.predict(p.features), p.label))
accuracy = 1.0 * predictionAndLabel.filter(
    lambda (x, v): x == v).count() / test.count()

# Save and load model
model.save(sc, "myModelPath")
sameModel = NaiveBayesModel.load(sc, "myModelPath")
Esempio n. 14
0
}

from pyspark.streaming.kafka import KafkaUtils
directKafkaStream = KafkaUtils.createDirectStream(
    ssc, kafka_configuration_params["topic"],
    {"metadata.broker.list": kafka_configuration_params["connectionstring"]})

from pyspark.mllib.classification import SVMModel, LogisticRegressionModel, NaiveBayesModel

# LR_model = LogisticRegressionModel.load(sc, "../../notebooks/LR_model")
# SVM_model = SVMModel.load(sc, "../../notebooks/SVM_model")
# NB_model = NaiveBayesModel.load(sc, "../../notebooks/NB_model")

LR_model = LogisticRegressionModel.load(sc, "<path-to-LR-model>")
SVM_model = SVMModel.load(sc, "<path-to-SVM-model>")
NB_model = NaiveBayesModel.load(sc, "<path-to-NB-model>")

import nltk
import random
from nltk.tokenize import word_tokenize

#allowed_word_types = ["JJ"]
allowed_word_types = ["<selected-word-type>"]

#rdd_all_words = sc.textFile("../../notebooks/all_words/part-00000")
rdd_all_words = sc.textFile("<path-to-words-file>")
rdd_broadcast_all_words = sc.broadcast(rdd_all_words.collect())


def convert_tweet_to_instance(tweets):
Esempio n. 15
0
def load(sc, filename):
    model = NaiveBayesModel.load(sc, filename)
    return sc, model
Esempio n. 16
0
app = Flask(__name__)
conf = SparkConf()
conf.setAppName("Classification")
try:
    sc.stop()
except:
    pass
sc = SparkContext(pyFiles=[
    '/home/ubuntu/project_src/flaskapp/createLabeledPoint.py',
    '/home/ubuntu/project_src/flaskapp/ClassSet.py',
    '/home/ubuntu/project_src/flaskapp/FuncSet.py',
    '/home/ubuntu/project_src/flaskapp/hello.py'
]).getOrCreate(conf=conf)
testm = DecisionTreeModel.load(
    sc, "hdfs://*****:*****@app.route('/')
def hello_world():
    return 'From python hello!'


@app.route('/index')
def index():
    return render_template("index.html")


@app.route('/train')
def trainodule():
    pass
Esempio n. 17
0
 def getModel(self, path):
     if self.type == 'NaiveBayes':
         return NaiveBayesModel.load(self.sc, path)
     elif self.type == 'DecisionTree':
         return DecisionTreeModel.load(self.sc, path)
    def classify(self, transformer):
        votes = []
        for c in self._classifiers:
            v = c.predict(transformer)
            votes.append(v)
        return mode(votes)


conf = SparkConf()
conf.setAppName("TA")
sc = SparkContext(conf=conf)
tre = StreamingContext(sc, 10)
htf = HashingTF(50000)

NB_directory = 'hdfs://master:9000/user/hadoop/NaiveBayes'
NB_model = NaiveBayesModel.load(sc, NB_directory)

LR_directory = 'hdfs://master:9000/user/hadoop/LogisticRegression'
LR_model = LogisticRegressionModel.load(sc, LR_directory)

DT_output_dir = 'hdfs://master:9000/user/hadoop/DT'
DT_model = DecisionTreeModel.load(sc, DT_output_dir)

voted_classifier = VoteClassifier(NB_model, LR_model, DT_model)


def sentiment(test_sample):
    sample_data_test = test_sample.split(" ")
    cli = htf.transform(sample_data_test)
    return voted_classifier.classify(cli)
Esempio n. 19
0
if __name__ == "__main__":

    sc = SparkContext(appName="PythonNaiveBayesExample")

    # $example on$
    # Load and parse the data file.
    data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt")

    # Split data approximately into training (60%) and test (40%)
    training, test = data.randomSplit([0.6, 0.4])

    # Train a naive Bayes model.
    model = NaiveBayes.train(training, 1.0)

    # Make prediction and test accuracy.
    predictionAndLabel = test.map(lambda p: (model.predict(p.features), p.label))
    accuracy = 1.0 * predictionAndLabel.filter(lambda pl: pl[0] == pl[1]).count() / test.count()
    print('model accuracy {}'.format(accuracy))

    # Save and load model
    output_dir = 'target/tmp/myNaiveBayesModel'
    shutil.rmtree(output_dir, ignore_errors=True)
    model.save(sc, output_dir)
    sameModel = NaiveBayesModel.load(sc, output_dir)
    predictionAndLabel = test.map(lambda p: (sameModel.predict(p.features), p.label))
    accuracy = 1.0 * predictionAndLabel.filter(lambda pl: pl[0] == pl[1]).count() / test.count()
    print('sameModel accuracy {}'.format(accuracy))

    # $example off$
if ascontext:
    if ascontext.isComputeDataModelOnly():
        ascontext.setSparkOutputSchema(output_schema)
        sys.exit(0)
    else:
        modelpath = ascontext.getModelContentToPath("model")
        model_metadata = json.loads(ascontext.getModelContentToString("model.metadata"))

# create a DataModelTools to handle data model and data conversions
datamodel = model_metadata["datamodel"]
dmt = DataModelTools(datamodel)
predictors = model_metadata["predictors"]
DataModelTools.checkPredictors(datamodel,predictors,df)

from pyspark.mllib.classification import NaiveBayesModel
model = NaiveBayesModel.load(sc, modelpath);

# to score the model, we need an RDD of DenseVector (the numeric encoded values of the predictors), use DataModelTools to do this
dv = dmt.extractDenseVector(df,predictors).map(lambda x:x[1])

# scoring generates an RDD of predictions (but not the original features)
predictions = model.predict(dv)

# now we need to zip together the original rows from the DataFrame and the RDD of predictions
# we end up with an RDD containing the list of values from the original dataframe plus the predicted class, converted from the encoded number to the original string
def rowToList(row):
        result = []
        for idx in range(0, len(row)):
            result.append(row[idx])
        return result
Esempio n. 21
0
 def init_spark_components(self):
     print("Loading Model")
     self.model = NaiveBayesModel.load(sc,
                                       path.join(self.base_path, 'model'))
     self.tf = HashingTF()
Esempio n. 22
0
 def loadModelFromDisk(self, sc):
     print("Loading pretrained model from disk \n")
     model = NaiveBayesModel.load(
         sc, "hdfs://192.168.1.33:9000//NaiveBayes.model")
     print("Complate \n")
     return model
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
import time as tm
from threading import Thread
import numpy as np

conf = SparkConf().setAppName("appName").setMaster("local")
conf.set("spark.executor.memory", "2g")
sc = SparkContext(conf=conf)
spark = SparkSession(sc)

#Load pretrained models
output_dir1 = '/home/emmittxu/Desktop/Stock-Sentiment-alalysis/Models/myNaiveBayesModel'
output_dir2 = '/home/emmittxu/Desktop/Stock-Sentiment-alalysis/Models/sent_stockModel'
print("Loading model.......")
model1 = NaiveBayesModel.load(sc, output_dir1)
model2 = NaiveBayesModel.load(sc, output_dir2)
print("Models successfully loaded......")

#Global variables to record the number of positive and negative sentiments
negative = 0.0
neutral = 0.0
positive = 0.0


#Do feature extraction using TF-IDF and feed feature vectors to the sentiment classifier
def vectorize_feature(training):
    try:
        global positive
        global negative
        positive = 0
Esempio n. 24
0
__author__ = 'ruben'

from pyspark.mllib.classification import NaiveBayes, NaiveBayesModel
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.regression import LabeledPoint

def parseLine(line):
    parts = line.split(',')
    label = float(parts[0])
    features = Vectors.dense([float(x) for x in parts[1].split(' ')])
    return LabeledPoint(label, features)

data = sc.textFile('data/mllib/sample_naive_bayes_data.txt').map(parseLine)

# Split data aproximately into training (60%) and test (40%)
training, test = data.randomSplit([0.6, 0.4], seed = 0)

# Train a naive Bayes model.
model = NaiveBayes.train(training, 1.0)

# Make prediction and test accuracy.
predictionAndLabel = test.map(lambda p : (model.predict(p.features), p.label))
accuracy = 1.0 * predictionAndLabel.filter(lambda (x, v): x == v).count() / test.count()

# Save and load model
model.save(sc, "myModelPath")
sameModel = NaiveBayesModel.load(sc, "myModelPath")
import sys

os.environ['SPARK_HOME'] = 'spark/spark'
sys.path.append('spark/spark/python/')


try:
   from pyspark import SparkContext
   from pyspark import SparkConf
   print("Successfully imported Spark Modules")

except ImportError as e:
   print("Can not import Spark Modules", e)
   sys.exit(1)


config = SparkConf().setMaster('local[*]').setAppName('SparkService')
sc = SparkContext(conf=config)
sc.setLogLevel("ERROR")


from pyspark.mllib.feature import HashingTF
from pyspark.mllib.classification import NaiveBayesModel

hashingTF = HashingTF()

sameModel = NaiveBayesModel.load(sc, "spark/nbm")

print(sameModel.predict(hashingTF.transform("This is good place".split(" "))))

Esempio n. 26
0
def save_model(model, model_name):
    output_dir = model_name
    shutil.rmtree(output_dir, ignore_errors=True)
    model.save(sc, output_dir)


print('*' * 50, 'MODELS_TRAIN', '*' * 50)
iris = datasets.load_iris()
data_set = iris.data
Y = iris.target
data_set = pd.DataFrame(data_set)
data_set['labels'] = Y
print(data_set.head(5))
print(data_set.shape)

s_df = sqlContext.createDataFrame(data_set)
train_dataset = s_df.rdd.map(lambda x: LabeledPoint(x[-1], x[:4]))
training, test = train_dataset.randomSplit([0.6, 0.4])

model = NaiveBayes.train(training, 0.7)
predictionAndLabel = test.map(lambda p: (model.predict(p.features), p.label))
accuracy(predictionAndLabel)

################################################SAVE_LOAD###############################################################
print('*' * 50, 'SAVE_LOAD', '*' * 50)
save_model(model, 'myNaiveBayesModel')
sameModel = NaiveBayesModel.load(sc, 'myNaiveBayesModel')
predictionAndLabel_1 = test.map(lambda p: (model.predict(p.features), p.label))
accuracy(predictionAndLabel_1)
 def load_model(cls, path="$SPARK_HOME/NaiveBayes"):
     """
     """
     return NaiveBayesModel.load(sc, path)
Esempio n. 28
0
 def load_model(self, context, path):
     return NaiveBayesModel.load(context, path)
Esempio n. 29
0
    average = numpy.average(score)
    deviation = numpy.std(score)
    return 50 + 10 * ((score - average) / deviation)

# mix-in
NaiveBayesModel.likelihood = likelihood

conf = SparkConf().setAppName("sample").setMaster("local")
sc = SparkContext(conf=conf)

path = os.path.abspath(os.path.dirname(__file__))
texts = pickle.load(open("%s/model/texts.pick" % path))
labels = pickle.load(open("%s/model/labels.pick" % path))

texts = sc.parallelize(texts)
htf = HashingTF(1000)  # Warning!! default value is 2^20
htf.transform(texts)

words = sys.argv[1].split()
test_tf = htf.transform(words)

model = NaiveBayesModel.load(sc, "%s/model" % path)
test = model.predict(test_tf)

likelihoods = model.likelihood(test_tf)
print "likelihoods: %s" % likelihoods
print "standard scores: %s" % standard_score(likelihoods)
print "label: %s" % labels[int(test)].encode('utf-8')
# json_data = {"likelihood": likelihoods[int(test)], "label": labels[int(test)].encode('utf-8')}
# print json.dumps(json_data)
Esempio n. 30
0
    terms = tags.split()

    # filter words that not exist in the vocabulary
    terms = [x for x in list(set(terms)) if x in list(set(vocabulary))]

    indices = list(map(lambda x: vocabulary.index(x), list(set(terms))))
    indices.sort()
    occurrences = list(
        map(lambda x: float(terms.count(vocabulary[x])), indices))

    return [len(vocabulary), indices, occurrences]


conf = SparkConf()
conf.setAppName("NaiveBaye")
conf.set('spark.driver.memory', '6g')
conf.set('spark.executor.memory', '6g')
conf.set('spark.cores.max', 156)

#load tags passed as parameter
tags = sys.argv[1]
bow = bow(tags)  #bag of words of that tags

sc = SparkContext(conf=conf)  # SparkContext

model = NaiveBayesModel.load(sc, "model")

result = model.predict(SparseVector(bow[0], bow[1], bow[2]))

print str(classValues[result])
def main(sc, sqlContext):

    #start = timer()

    #print '---Pegando usuario, posts, tokens e categorias do MongoDB---'
    #start_i = timer()
    user = findUserById(iduser)
    posts = findPosts(user) 
    
    tokens, category, categoryAndSubcategory = getTokensAndCategories()
    postsRDD = (sc.parallelize(posts).map(lambda s: (s[0], word_tokenize(s[1].lower()), s[2], s[3]))
                    .map(lambda p: (p[0], [x for x in p[1] if x in tokens] ,p[2], p[3]))
                    .cache())

    

    #print '####levou %d segundos' % (timer() - start_i)

    #print '---Pegando produtos do MongoDB---'
    #start_i = timer()

    #print '####levou %d segundos' % (timer() - start_i)
    
    #print '---Criando corpusRDD---'
    #start_i = timer()
    stpwrds = stopwords.words('portuguese')
    corpusRDD = (postsRDD.map(lambda s: (s[0], [PorterStemmer().stem(x) for x in s[1] if x not in stpwrds], s[2], s[3]))
                         .filter(lambda x: len(x[1]) >= 20 or (x[2] == u'Post' and len(x[1])>0))
                         .cache())
    #print '####levou %d segundos' % (timer() - start_i)

    #print '---Calculando TF-IDF---'
    #start_i = timer()
    wordsData = corpusRDD.map(lambda s: Row(label=int(s[0]), words=s[1], type=s[2]))
    wordsDataDF = sqlContext.createDataFrame(wordsData).unionAll(sqlContext.read.parquet("/home/ubuntu/recsys-tcc-ml/parquet/wordsDataDF.parquet"))


    numTokens = len(tokens)
    hashingTF = HashingTF(inputCol="words", outputCol="rawFeatures", numFeatures=numTokens)
    idf = IDF(inputCol="rawFeatures", outputCol="features")

    featurizedData = hashingTF.transform(wordsDataDF)

    idfModel = idf.fit(featurizedData)
    tfIDF = idfModel.transform(featurizedData).cache()

    postTFIDF = (tfIDF
                    .filter(tfIDF.type==u'Post')
                    #.map(lambda s: Row(label=s[0], type=s[1], words=s[2], rawFeatures=s[3], features=s[4], sentiment=SVM.predict(s[4])))
                    .cache())

    #postTFIDF = postTFIDF.filter(lambda p: p.sentiment == 1)
    #print '####levou %d segundos' % (timer() - start_i)

    #print '---Carregando modelo---'
    #start_i = timer()
    NB = NaiveBayesModel.load(sc, '/home/ubuntu/recsys-tcc-ml/models/naivebayes/modelo_categoria')
    SVM = SVMModel.load(sc, "/home/ubuntu/recsys-tcc-ml/models/svm")
    #print '####levou %d segundos' % (timer() - start_i)

    #print '---Usando o modelo---'
    #start_i = timer()
    predictions = (postTFIDF
                        .map(lambda p: (NB.predict(p.features), p[0], SVM.predict(p.features)))
                        .filter(lambda p: p[2]==1)
                        .map(lambda p: (p[0], p[1]))
                        .groupByKey()
                        .mapValues(list)
                        .collect())

    #print '####levou %d segundos' % (timer() - start_i)
    #print '---Calculando similaridades---'
    #start_i = timer()
    suggestions = []

    for prediction in predictions:
        category_to_use = category[int(prediction[0])]
        #print ' Calculando similaridades para a categoria: {}'.format(category_to_use)
        tf = tfIDF.filter(tfIDF.type==category_to_use).cache()
        for post in prediction[1]:
            postVector = postTFIDF.filter(postTFIDF.label == post).map(lambda x: x.features).collect()[0]
            sim = (tf
                    .map(lambda x: (post, x.label, cossine(x.features, postVector)))
                    .filter(lambda x: x[2]>=threshold)
                    .collect())
            if len(sim) > 0:
                suggestions.append(sim)

    #print '####levou %d segundos' % (timer() - start_i)

    if len(suggestions) > 0:
        #print '---Inserindo recomendacoes no MongoDB---'
        #start_i = timer()
        insertSuggestions(suggestions, iduser, posts)
    def classify(self, transformer):
        votes = []
        for c in self._classifiers:
            v = c.predict(transformer)
            votes.append(v)
        return mode(votes)

conf = SparkConf()
conf.setAppName("TA") 
sc = SparkContext(conf=conf)
ssc = StreamingContext(sc,10)
htf = HashingTF(50000)

NB_output_dir = 'hdfs://master:9000/user/hadoop/NaiveBayes'
NB_load_model= NaiveBayesModel.load(sc, NB_output_dir)

LR_output_dir = 'hdfs://master:9000/user/hadoop/LogisticRegression'
LR_load_model= LogisticRegressionModel.load(sc, LR_output_dir)

DT_output_dir = 'hdfs://master:9000/user/hadoop/DT'
DT_load_model= DecisionTreeModel.load(sc, DT_output_dir)


voted_classifier = VoteClassifier(NB_load_model, LR_load_model, DT_load_model)

def sentiment(test_sample):
    
    test_sample_sp = test_sample.split(" ")
    trans = htf.transform(test_sample_sp)
    return voted_classifier.classify(trans)