Esempio n. 1
0
 def save_read_article_feedback(self, article, score):
     '''
     Saves a feeback by user: read article.
     
     NOTE: a feedback for one article might be added twice.
     '''
     r = ReadArticleFeedback(user_id = self.mongodb_user.id,
                             article = article, score = score)
     r.save()
Esempio n. 2
0
def ajax_add_user():
    '''
    Called remotely to add a new user.
    '''
    if not current_user.is_authenticated():
        abort(403)

    name = request.form['name']
    email = request.form['email'].lower()
    new_password = request.form['new_password']
    new_password_repeat = request.form['new_password_repeat'] 
        
    if current_user.mongodb_user.email != "*****@*****.**":
        abort(403)
        
    #check passwords
    if new_password != new_password_repeat:
        abort(400)
        
    if new_password == "":
        abort(400)
        
    #hash password
    m = hashlib.sha256()
    m.update(new_password.encode("UTF-8"))
    m.update(SALT.encode("UTF-8"))
        
    #check if user with email address already exists
    users_with_same_email = User.objects(email = email)
    if len(users_with_same_email) > 0:
        abort(400)
        
    try:
        app.logger.debug("Adding new user %s" % name)
            
        #just pick the first article as feedback
        first_article = Article.objects().first()
        first_profile = LearnedProfile(features = first_article.features)
            
        new_user = User(name = name, password = m.hexdigest(),
                        email = email,
                        learned_profile = [first_profile])
        new_user.save(safe=True)
        
        first_feedback = ReadArticleFeedback(user_id = new_user.id,
                                            article = first_article, 
                                            score = 1.0)
        first_feedback.save()
            
        app.logger.debug("...done.")
    except Exception as inst:
        app.logger.error("Could not add new user: %s: %s" % (type(inst), type))
        abort(500)
        
    return ""
Esempio n. 3
0
 def train(self, read_article_ids = None, unread_article_ids = None):
     #Load user feedback if needed
     if read_article_ids is None:
         read_article_ids = (r.article.id for r in ReadArticleFeedback.objects(user_id = self.user.id).only("article"))
         
     user_feedback = Article.objects(id__in = read_article_ids)
     
     #TODO: cluster feedback articles and save more than one profile
     
     num_loaded_articles = 0
     centroid = numpy.zeros(self.num_features_, dtype=numpy.float32)
     
     for article in user_feedback:
         try:
             article_features_as_full_vec = self.get_features(article)
         except Exception as inst:
             logger.error("Could not get features for article %s: %s" %
                          (article.id, inst))
             continue
         
         #do we need this?
         tmp_doc = matutils.unitvec(article_features_as_full_vec)
         
         #add up tmp_doc
         centroid = numpy.add(centroid, tmp_doc)
         num_loaded_articles += 1 
         
     #average each element
     if num_loaded_articles != 0:
         centroid = centroid / num_loaded_articles
         
     centroid = matutils.full2sparse(centroid)
     
     #set user model data
     self.user_model_features = [centroid]
Esempio n. 4
0
def get_article_samples(config_):
    #Connect to mongo database
    logger.info("Connect to database...")
    connect(config_['database']['db-name'], 
            username= config_['database']['user'], 
            password= config_['database']['passwd'], 
            port = config_['database']['port'])
    
    #get user
    user = User.objects(email=u"*****@*****.**").first()
    
    ranked_article_ids = (a.article.id 
                          for a 
                          in RankedArticle.objects(user_id = user.id).only("article"))
    all_article_ids = Set(a.id 
                          for a 
                          in Article.objects(id__in = ranked_article_ids).only("id"))
    
    read_article_ids = Set(a.article.id 
                           for a 
                           in ReadArticleFeedback.objects(user_id = user.id).only("article"))
    
    unread_article_ids = all_article_ids - read_article_ids
    
    #sample test articles
    X, y = get_samples(extractor, read_article_ids, unread_article_ids)
    
    return X, y
Esempio n. 5
0
    def train(self, read_article_ids=None, unread_article_ids=None):
        """
        Trains the DecisionTree Classifier.
        read_article_ids should be an iterable over read article ids
        unread_article_ids should be an iterable over unread article ids
        
        If one is None it will be loaded from database.
        """
        
        #Load user feedback if needed
        if read_article_ids is None:
            read_article_ids = set(r.article.id
                                   for r in ReadArticleFeedback.objects(user_id=self.user.id).only("article"))
        else:
            read_article_ids = set(read_article_ids)

        #Get all articles the user did not read.
        if unread_article_ids is None:
            ranked_article_ids = (a.article.id
                                  for a in RankedArticle.objects(user_id=self.user.id).only("article"))
            all_article_ids = set(a.id
                                  for a in Article.objects(id__in=ranked_article_ids).only("id"))
            unread_article_ids = all_article_ids - read_article_ids
        
        #convert all article features
        all_articles, marks = self._get_samples(read_article_ids, 
                                                unread_article_ids,
                                                p_synthetic_samples=self.p_synthetic_samples,
                                                p_majority_samples=self.p_majority_samples)

        logger.debug("Learn on %d samples." % len(marks))            

        self.clf = tree.DecisionTreeClassifier()
        self.clf.fit(all_articles, marks)
Esempio n. 6
0
    def train(self, read_article_ids=None, unread_article_ids=None):
        """
        Trains the Bayes Classifier.
        read_article_ids should be an iterable over read article ids
        unread_article_ids should be an iterable over unread article ids
        
        If one is None it will be loaded from database.
        """
        
        #Load user feedback if needed
        if read_article_ids is None:
            read_article_ids = set(r.article.id for r
                                   in ReadArticleFeedback.objects(user_id=self.user.id).only("article"))
        else:
            read_article_ids = set(read_article_ids)
        
        logger.info("Use %d read articles for learning." % len(read_article_ids))
        read_articles = Article.objects(id__in=read_article_ids)

        #Get all articles the user did not read.
        if unread_article_ids is None:
            ranked_article_ids = (a.article.id for a in RankedArticle.objects(user_id=self.user.id).only("article"))
            all_article_ids = set(a.id for a in Article.objects(id__in=ranked_article_ids).only("id"))
            unread_article_ids = all_article_ids - read_article_ids
            
        #undersample unreads
        logger.info("Use %d unread articles for learning." % (len(unread_article_ids)))
        
        unread_articles = Article.objects(id__in=unread_article_ids)
        
        #convert all article features
        all_articles = UserModelBayes.AllArticles(read_articles, unread_articles, self.get_features)

        self.clf.fit(np.array(list(all_articles)), np.array(list(all_articles.get_marks())))
Esempio n. 7
0
 def get_articles(self, date):
     '''
     Returns list of articles between date 0:00 and date 24:00
     '''
     
     #use select_related = 2 to fetch all vendor data
     articles_ = Article.objects(vendor__in=current_user.mongodb_user.subscriptions, 
                             date__gte = date.date(), 
                             date__lt = date.date() + timedelta(days=1)).select_related(2)
     
     #mark articles as read/unread and add id field
     articles_as_dict = []
     for a in articles_:
         #check in database if article has Read Feedback
         feedback = ReadArticleFeedback.objects(user_id = self.mongodb_user.id,
                                                article = a).first()
         
         tmp_article = a._data
         
         if feedback is None:
             tmp_article['read'] = False
         else:
             tmp_article['read'] = True 
             
         tmp_article['id'] = a.id
         
         articles_as_dict.append(tmp_article)
 
     return articles_as_dict
Esempio n. 8
0
    def train(self, read_article_ids = None, unread_article_ids = None):
        '''
        Trains the several SVM and Naive Bayes Classifiers.
        read_article_ids should be an iterable over read article ids
        unread_article_ids should be an iterable over unread article ids
        
        If one is None it will be loaded from database.
        '''
        
        #Load user feedback if needed
        if read_article_ids is None:
            read_article_ids = Set(r.article.id 
                                for r 
                                in ReadArticleFeedback.objects(user_id = self.user.id).only("article"))
        else:
            read_article_ids = Set(read_article_ids)

        #Get all articles the user did not read.
        if unread_article_ids is None:
            ranked_article_ids = (a.article.id 
                               for a 
                               in RankedArticle.objects(user_id = self.user.id).only("article"))
            all_article_ids = Set(a.id 
                                  for a 
                                  in Article.objects(id__in = ranked_article_ids).only("id"))
            unread_article_ids = all_article_ids - read_article_ids
        
        classifiers = [lambda: svm.SVC(kernel='rbf'), 
                       lambda: svm.SVC(kernel='rbf'),
                       lambda: svm.SVC(kernel='rbf'),
                       lambda: svm.SVC(kernel='rbf'),
                       lambda: svm.SVC(kernel='rbf'),
                       GaussianNB, 
                       GaussianNB, 
                       GaussianNB, 
                       GaussianNB]
        
        parameters = [#SVM
                      {'read_article_ids': read_article_ids, 
                       'unread_article_ids': unread_article_ids,
                       'p_synthetic_samples': 100,
                       'p_majority_samples': 200,
                       'k': 10},
                      {'read_article_ids': read_article_ids, 
                       'unread_article_ids': unread_article_ids,
                       'p_synthetic_samples': 200,
                       'p_majority_samples': 300,
                       'k': 10},
                      {'read_article_ids': read_article_ids, 
                       'unread_article_ids': unread_article_ids,
                       'p_synthetic_samples': 300,
                       'p_majority_samples': 400,
                       'k': 10},
                      {'read_article_ids': read_article_ids, 
                       'unread_article_ids': unread_article_ids,
                       'p_synthetic_samples': 400,
                       'p_majority_samples': 500,
                       'k': 10},
                      {'read_article_ids': read_article_ids, 
                       'unread_article_ids': unread_article_ids,
                       'p_synthetic_samples': 500,
                       'p_majority_samples': 600,
                       'k': 10},
                      #Naive Bayes
                      {'read_article_ids': read_article_ids, 
                       'unread_article_ids': unread_article_ids,
                       'p_synthetic_samples': 100,
                       'p_majority_samples': 100,
                       'k': 10},
                      {'read_article_ids': read_article_ids, 
                       'unread_article_ids': unread_article_ids,
                       'p_synthetic_samples': 100,
                       'p_majority_samples': 200,
                       'k': 10},
                      {'read_article_ids': read_article_ids, 
                       'unread_article_ids': unread_article_ids,
                       'p_synthetic_samples': 300,
                       'p_majority_samples': 500,
                       'k': 10},
                      {'read_article_ids': read_article_ids, 
                       'unread_article_ids': unread_article_ids,
                       'p_synthetic_samples': 600,
                       'p_majority_samples': 600,
                       'k': 10}]
        
        self._call_classifiers(classifiers, parameters)
Esempio n. 9
0
    )

    # Load feature extractor
    # feature_extractor = EsaFeatureExtractor(prefix = config_['prefix'])
    # feature_extractor = TfidfFeatureExtractor(prefix = config_['prefix'])
    # feature_extractor = LdaFeatureExtractor(prefix = config_['prefix'])
    # feature_extractor = LdaBowFeatureExtractor(prefix = config_['prefix'])
    feature_extractor = cEsaFeatureExtractor(prefix=config_["prefix"])

    # get user
    user = User.objects(email=u"*****@*****.**").first()

    ranked_article_ids = (a.article.id for a in RankedArticle.objects(user_id=user.id).only("article"))
    all_article_ids = set(a.id for a in Article.objects(id__in=ranked_article_ids).only("id"))

    read_article_ids = set(a.article.id for a in ReadArticleFeedback.objects(user_id=user.id).only("article"))

    unread_article_ids = all_article_ids - read_article_ids

    for p_synthetic in xrange(100, 700, 100):
        for p_majority in xrange(100, 700, 100):

            logger.info("Synthetic over-sampling %d and majority undersampling %d" % (p_synthetic, p_majority))

            # run test N_ITERATIONS
            precisions_read = np.zeros((N_ITERATIONS))
            recalls_read = np.zeros((N_ITERATIONS))
            f1_scores_read = np.zeros((N_ITERATIONS))
            precisions_unread = np.zeros((N_ITERATIONS))
            recalls_unread = np.zeros((N_ITERATIONS))
            f1_scores_unread = np.zeros((N_ITERATIONS))
Esempio n. 10
0
    #feature_extractor = LdaBowFeatureExtractor(prefix = config_['prefix'])
    feature_extractor = cEsaFeatureExtractor(prefix = config_['prefix'])
    
    #get user
    user = User.objects(email=u"*****@*****.**").first()
    
    ranked_article_ids = (a.article.id 
                          for a 
                          in RankedArticle.objects(user_id = user.id).only("article"))
    all_article_ids = Set(a.id 
                          for a 
                          in Article.objects(id__in = ranked_article_ids).only("id"))
    
    read_article_ids = Set(a.article.id 
                           for a 
                           in ReadArticleFeedback.objects(user_id = user.id).only("article"))
    
    unread_article_ids = all_article_ids - read_article_ids

    for p_synthetic in xrange(100, 700, 100):
        for p_majority in xrange(100, 700, 100): 
            
            logger.info("Synthetic over-sampling %d and majority undersampling %d" %
                        (p_synthetic, p_majority))
            
            #run test N_ITERATIONS
            precisions_read = np.zeros((N_ITERATIONS))
            recalls_read = np.zeros((N_ITERATIONS))
            f1_scores_read = np.zeros((N_ITERATIONS))
            precisions_unread = np.zeros((N_ITERATIONS))
            recalls_unread = np.zeros((N_ITERATIONS))
Esempio n. 11
0
    def test_constructor_with_file_wikicorpus(self):
        
        #load tf-idf model
        tfidf_model = tfidfmodel.TfidfModel.load("/media/sdc1/test_dump/result/test_tfidf.model")
        extractor = TfidfFeatureExtractor("/media/sdc1/test_dump/result/test")
        
        #load tf-idf corpus
        tfidf_corpus = MmCorpus('/media/sdc1/test_dump/result/test_tfidf_corpus.mm')
        
        #load lda corpus
        #lda_corpus = MmCorpus('/media/sdc1/test_dump/result/test_lda_corpus.mm')
        
        #load dictionary
        id2token = Dictionary.load("/media/sdc1/test_dump/result/test_wordids.dict")
        
        #load article titles
        document_titles = DocumentTitles.load("/media/sdc1/test_dump/result/test_articles.txt")
        
        #Connect to mongo database
        connect(self.config_['database']['db-name'], 
                username= self.config_['database']['user'], 
                password= self.config_['database']['passwd'], 
                port = self.config_['database']['port'])
        
        #Load articles as test corpus
        user = User.objects(email=u"*****@*****.**").first()
        
        ranked_article_ids = (a.article.id 
                              for a 
                              in RankedArticle.objects(user_id = user.id).only("article"))
        all_article_ids = Set(a.id 
                              for a 
                              in Article.objects(id__in = ranked_article_ids).only("id"))
        
        read_article_ids = Set(a.article.id 
                               for a 
                               in ReadArticleFeedback.objects(user_id = user.id).only("article"))
        
        unread_article_ids = all_article_ids - read_article_ids

        #sample test articles
        X, y = get_samples(extractor, read_article_ids, unread_article_ids)
        
        s,f = X.shape
        logger.debug("Traning with %d samples, %d features, %d marks" % 
                     (s,f, len(y)))

        #train esa model
        esa_model = CosineEsaModel(tfidf_corpus, 
                                   document_titles = document_titles,
                                   test_corpus = X, 
                                   test_corpus_targets = y, 
                                   num_test_corpus = len(y),
                                   num_best_features = 15,
                                   num_features = len(id2token))
        
        print esa_model
        
        esa_model.save('/media/sdc1/test_dump/result/test_cesa.model')
        
        tmp_esa = CosineEsaModel.load('/media/sdc1/test_dump/result/test_cesa.model') 
        print tmp_esa