示例#1
0
def newChat():
    new_id = max(collection.distinct("idChat")) + 1
    name = str(request.forms.get("name", f"chat{new_id}"))
    new_chat = {"id_Chat": new_id, "name": name}
    collection.insert_one(new_chat)
    print("Chat created")
    database, collection = connectCollection('chats', 'chateo')
示例#2
0
def chatsWant(x, y):
    x = int(x)
    y = int(y)
    database, collection = connectCollection('chats', 'chateo')
    chats_want = []
    for a in range(x, y):
        chats_want.append(
            dumps(collection.find({"idChat": a}, {
                "userName": 1,
                "text": 1
            })))
    return chats_want
示例#3
0
def createUser():
    database, collection = connectCollection('chats', 'users')
    name = str(request.forms.get("name"))
    ids = collection.distinct("idUser")[-1] + 1
    user_new = {"idUser": ids, "userName": name}
    takenNames = list(collection.aggregate([{'$project': {'userName': 1}}]))
    if user_new['userName'] in [e['userName'] for e in takenNames]:
        return {"Error!": "Username already in use"}
    else:
        user_id = collection.insert_one(user_new)
        return {
            'NEW USER CREATED!'
            'userName': name,
            'UserId': int(user_new['idUser'])
        }
示例#4
0
def recommending_user(userName):
    database, collection =connectCollection('chats','chateo')
    query = list(collection.find({},{'userName':1,"text":1,'_id':0}))
    diccionario = getting_every_sentence(query)   
    recommendation_dict=dict()
    count_vectorizer=CountVectorizer(stop_words='english')
    sparse_matrix = count_vectorizer.fit_transform(diccionario.values())
    doc_term_matrix = sparse_matrix.todense()
    df = pd.DataFrame(doc_term_matrix, columns=count_vectorizer.get_feature_names(), index=diccionario.keys())
    similarity_matrix = distance(df, df)
    sim_df = pd.DataFrame(similarity_matrix, columns=diccionario.keys(), index=diccionario.keys())
    np.fill_diagonal(sim_df.values, 0)
    final_matrix=sim_df.idxmax()
    recommended = list(sim_df.sort_values(by=userName, ascending = False).index[0:3])
    return json.dumps(recommended)
示例#5
0
def getChats():
    database, collection = connectCollection('chats', 'chateo')
    all_chats = dumps(collection.find().distinct("text")[0:])
    return all_chats
示例#6
0
def getUsers():
    database, collection = connectCollection('chats', 'users')
    return dumps(collection.find({}).distinct("userName"))
示例#7
0
def getAllUsers():
    database, collection = connectCollection('chats', 'chateo')
    """Returns all users"""
    return dumps(collection.find({}, {'userName': 1, "text": 1, '_id': 0}))
示例#8
0
import dns
from bson.json_util import dumps, loads
import re
from sklearn.metrics.pairwise import cosine_similarity as distance
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from bottle import get, run
import numpy as np
from connect import connectCollection
import json

load_dotenv()
url=os.getenv('URLMONGO')
client = MongoClient(url)

database, collection =connectCollection('chats','chateo')

def getting_every_sentence(lista):
    users_dict=dict()
    for dicc in lista:
        if dicc['userName'] not in users_dict:
            users_dict[dicc['userName']]=dicc['text']
        else:
            users_dict[dicc['userName']]+=' ' +dicc['text']
    for e in users_dict:
        users_dict[e]=re.sub(r"[^a-zA-Z0-9]+", ' ', users_dict[e])
    return users_dict


@get('/recommendation/user=<userName>')
def recommending_user(userName):