Ejemplo n.º 1
0
    def prepare_features(self, train_data, term_vectors):

        # main runner for feature extraction
        print ("... Preparing dataframe inclusing all words and tf-idf.")
        [article_word_count, word_tfidf, publishers, article_numbers] = self.prepare_dictionary_article(term_vectors)

        # Normalize Osfamily and Publisher
        [train_data, os_list, publisher_list] = self.normalize_features(train_data)
        DataHandling.store_data(Constants.model_path + "os_list.pickle", os_list)
        DataHandling.store_data(Constants.model_path + "publisher_list.pickle", publisher_list)

        # Calculate article popularity
        article_popularity = self.article_popularity(train_data, article_word_count, article_numbers)
        DataHandling.store_data(Constants.model_path + "article_popularity.pickle", article_popularity)

        # calculate distance array of articles
        print ("... Preparing training data including the article distances between original article and recommended.")
        train_data = self.prepare_training_article_distance(train_data, article_word_count, word_tfidf)
        DataHandling.store_data(Constants.model_path + "train_data_with_article_distances.pickle", train_data)

        return train_data, article_popularity, os_list, publisher_list
Ejemplo n.º 2
0
#Taking only users that have historically clicked into training.
train_data = train_data.loc[train_data['UserClicksAd'] > 0]

#Balancing training data
#Since the dataset is imbalanced, we balance negative and positive samples for training.
# positive_samples = train_data.loc[train_data['Output'] == 1]
# positive_samples_count = train_data.loc[train_data['Output'] == 1].shape[0]
# negative_samples = train_data.loc[train_data['Output'] == 0]
# rows = random.sample(negative_samples.index, positive_samples_count)
# negative_samples = negative_samples.ix[rows]
# train_data = pd.concat([positive_samples, negative_samples])

"""
prepare the model and test it
"""
model = MYModel()
model.trainBatch(train_data)
DataHandling.store_data(Constants.model_path + 'Model_NB.pickle', model)


#Cross-fold validation of training model
scores = model.crossfoldValidation(train_data, 10)
print(scores)


"""
#Pickle or CSV final training data (For testing and Matlab)
# train_data = DataHandling.load_data(Constants.model_path + 'train_data_with_prediction_prob.pickle')
# train_data.to_csv('prediction_file.csv')
"""
Ejemplo n.º 3
0
    def predict_prob(self, data, load_classifier=False):
        if load_classifier:
            self.classifier = DataHandling.load_data(Constants.model_path + 'classifier_NB.pickle')

        return self.classifier.predict_prob(data)
Ejemplo n.º 4
0
 def trainBatch(self, train_data):
     self.classifier.train(train_data)
     print('... Pickling the model')
     DataHandling.store_data(Constants.model_path + 'classifier_NB.pickle', self.classifier)
Ejemplo n.º 5
0
# fileNameTestData = "data/test.csv" # well. obviously we dont hand this one out
topN = 5

termVectors = json.load(open(fileNameTermVectors))
train_data = pd.read_csv(fileNameTrainData)



""":type: pd.DataFrame"""
# testData = pd.read_csv(fileNameTestData)
"""
Test model on unseen data. After each prediction step, you may update you model. This is not mandatory though.
"""

# Loading the trained model in case we do not want to train on new data
model = DataHandling.load_data(Constants.model_path + 'Model_NB.pickle')
os_list = DataHandling.load_data(Constants.model_path + 'os_list.pickle')
publisher_list = DataHandling.load_data(Constants.model_path + 'publisher_list.pickle')

#In case there is a new termVectors file we use this to extract features, I assume it is because articles will be updated.
feature_extraction = FeatureExtraction()
[article_word_count, word_tfidf, publishers, article_numbers] = feature_extraction.prepare_dictionary_article(termVectors)
# #For testing train data the same as test data
testData = train_data

article_popularity = DataHandling.load_data(Constants.model_path + 'article_popularity.pickle')
train_data = DataHandling.load_data(Constants.model_path + 'train_data_with_article_distances.pickle')

for (rowNum, row) in testData.iterrows():
    inputFeatures = row[["Publisher", "Osfamily", "ItemSrc", "UserID", "UserClicksAd"]]