Example #1
0
def main():
	predictor = MFUPredictor()

	db_helper = ApeicDBHelper()
	users = db_helper.get_users()
	for user in users:
		logs = db_helper.get_logs(user)
		
		sessions = db_helper.get_sessions(user)
		training_logs, testing_logs = split(sessions, aggregated=True)
		predictor.train(training_logs)
		launches = map(lambda x: x['application'], testing_logs)
		predictions = map(lambda x: predictor.predict(x), testing_logs)
		hr, mrr = predictor.test(launches, predictions)
		print hr, mrr
Example #2
0
def main():
    predictor = MFUPredictor()

    db_helper = ApeicDBHelper()
    users = db_helper.get_users()
    for user in users:
        logs = db_helper.get_logs(user)

        sessions = db_helper.get_sessions(user)
        training_logs, testing_logs = split(sessions, aggregated=True)
        predictor.train(training_logs)
        launches = map(lambda x: x['application'], testing_logs)
        predictions = map(lambda x: predictor.predict(x), testing_logs)
        hr, mrr = predictor.test(launches, predictions)
        print hr, mrr
Example #3
0
def main():
    predictor = LUPredictor()

    db_helper = ApeicDBHelper()
    users = db_helper.get_users()

    for user in users:
        sessions = db_helper.get_sessions(user)
        training_logs, testing_logs = split(sessions, aggregated=True)
        
        predictor.train(training_logs)
        launches = map(lambda x: x['application'], testing_logs[2:])
        predictions = map(lambda i: predictor.predict(\
            {'lu1': testing_logs[i-1]['application'], 'lu2': testing_logs[i-2]['application']}), \
            xrange(2, len(testing_logs)))
        hr, mrr = predictor.test(launches, predictions)
        print hr, mrr
Example #4
0
def main():
    predictor = LUPredictor()

    db_helper = ApeicDBHelper()
    users = db_helper.get_users()

    for user in users:
        sessions = db_helper.get_sessions(user)
        training_logs, testing_logs = split(sessions, aggregated=True)

        predictor.train(training_logs)
        launches = map(lambda x: x['application'], testing_logs[2:])
        predictions = map(lambda i: predictor.predict(\
            {'lu1': testing_logs[i-1]['application'], 'lu2': testing_logs[i-2]['application']}), \
            xrange(2, len(testing_logs)))
        hr, mrr = predictor.test(launches, predictions)
        print hr, mrr
Example #5
0
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler

##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=100,
                            centers=centers,
                            cluster_std=0.4,
                            random_state=0)
X = StandardScaler().fit_transform(X)

from numpy import array
from predictor.predictor import Predictor, split
db_helper = ApeicDBHelper()
for user in db_helper.get_users()[7:]:
    sessions = db_helper.get_sessions(user)
    training_logs, testing_logs = split(sessions, aggregated=True)

    training_logs = filter(
        lambda x: x['latitude'] != 0 and x['longitude'] != 0, training_logs)
    latlng_pairs = list(
        set(map(lambda x: (x['latitude'], x['longitude']), training_logs)))
    print latlng_pairs
    print len(latlng_pairs)
    # X = array(latlng_pairs)
    # print X.size
    # print X
    result = []
    for la1, ln1 in latlng_pairs:
Example #6
0
from predictor.preprocessor import Preprocessor
from predictor.mfu_predictor import MFUPredictor
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.naive_bayes import GaussianNB
from sklearn.feature_extraction import DictVectorizer
from sklearn import tree


def split(sessions, ratio=0.8):
    split_index = int(len(sessions) * ratio)
    return sessions[:split_index], sessions[split_index:]


import sys
if __name__ == '__main__':
    db_helper = ApeicDBHelper()
    users = db_helper.get_users()

    accuracies = []
    for user in users:
        print colored(user, attrs=['blink'])

        sessions = db_helper.get_sessions(user)
        training_sessions, testing_sessions = split(sessions, 0.8)
        preprocessor = Preprocessor([])

        start = 0
        tesiting_sessions = filter(lambda x: len(x) > start, testing_sessions)
        logs = preprocessor.aggregate_sessions(\
         training_sessions + \
         map(lambda x: [x[start]], tesiting_sessions))
Example #7
0
from predictor.preprocessor import Preprocessor
from predictor.mfu_predictor import MFUPredictor
from sklearn.naive_bayes import MultinomialNB, BernoulliNB 
from sklearn.naive_bayes import GaussianNB
from sklearn.feature_extraction import DictVectorizer
from sklearn import tree


def split(sessions, ratio=0.8):
	split_index = int(len(sessions)*ratio)
	return sessions[:split_index], sessions[split_index:]


import sys
if __name__ == '__main__':
	db_helper = ApeicDBHelper()
	users = db_helper.get_users()

	accuracies = []
	for user in users:
		print colored(user, attrs=['blink'])

		sessions = db_helper.get_sessions(user)
		training_sessions, testing_sessions = split(sessions, 0.8)
		preprocessor = Preprocessor([])

		start = 0
		tesiting_sessions = filter(lambda x: len(x) > start, testing_sessions)
		logs = preprocessor.aggregate_sessions(\
			training_sessions + \
			map(lambda x: [x[start]], tesiting_sessions))
Example #8
0
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler


##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=100, centers=centers, cluster_std=0.4,
                            random_state=0)
X = StandardScaler().fit_transform(X)

from numpy import array
from predictor.predictor import Predictor, split
db_helper = ApeicDBHelper()
for user in db_helper.get_users()[7:]:
    sessions = db_helper.get_sessions(user)
    training_logs, testing_logs = split(sessions, aggregated=True)

    
    training_logs = filter(lambda x: x['latitude'] != 0 and x['longitude']!= 0, training_logs)
    latlng_pairs = list(set(map(lambda x: (x['latitude'], x['longitude']), training_logs)))
    print latlng_pairs
    print len(latlng_pairs)
    # X = array(latlng_pairs)
    # print X.size
    # print X
    result = []
    for la1, ln1 in latlng_pairs:
        dists = []