Exemplo n.º 1
0
import json
import logging

from bottle import (delete, error, get, install, post, put, redirect, request,
                    response, route, run, static_file)

from config import Config
from contact import Contact, MalformedContactError
from database import (ConflictError, ContactExistsError, Database,
                      UnauthorizedError)

COOKIE_MAX_AGE = 2592000

cfg = Config()
db = Database(cfg)


# https://stackoverflow.com/questions/17262170/
class EnableCors(object):
    name = 'enable_cors'
    api = 2

    def apply(self, fn, context):
        def _enable_cors(*args, **kwargs):
            # set CORS headers
            response.headers['Access-Control-Allow-Origin'] = '*'
            response.headers['Access-Control-Allow-Methods'] = 'DELETE, GET, POST, PUT, OPTIONS'
            response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
            response.headers['Access-Control-Allow-Credentials'] = 'true'

            if request.method != 'OPTIONS':
Exemplo n.º 2
0
def _standalone():
    db = Database()
    ibge = IbgeTracker()
    ibge.track(db)
Exemplo n.º 3
0
def _get_namad_history_row_data_offline(id):
    db = Database()
    h = db.get_history(id)
    if h:
        return h.history
    return h
Exemplo n.º 4
0
greet_trainer = ChatterBotCorpusTrainer(greetbot)
blockPrint()
greet_trainer.train("chatterbot.corpus.english.greetings",
                    "chatterbot.corpus.english.conversations")

database_path = os.path.dirname(
    os.path.realpath(inspect.getfile(Database.__init__)))
training_database = json.load(
    open(database_path + "/" + "query_adapter_training.json"))['data']

training_data = [(data, int(classe)) for classe in ["0", "1"]
                 for data in training_database[classe]]

nb_clf = NaiveBayesClassifier(training_data)

qdata = Database('query_data', database_path, parse_db=True)

fields = Database('fields', database_path)

clf_trained, vectorizer, class_names = train_feature_finder(
    fields.db, RandomForestClassifier(n_estimators=20))


def run():
    say_it("Can i help you find joining dates of personnel ?")

    recognizer = sr.Recognizer()
    microphone = sr.Microphone(device_index=1)

    guess = recognize_speech_from_mic(recognizer, microphone)
Exemplo n.º 5
0
 def setUp(self):
     self.temp_database_file = tempfile.NamedTemporaryFile()
     self.temp_movie_file = tempfile.NamedTemporaryFile(suffix="12345")
     self.settings = Settings({'interactive': False})
     self.database = Database(self.temp_database_file.name, self.settings)
Exemplo n.º 6
0
from models.patch import Patch
from models.spoiler import Spoiler
from models.result import Result

logging.basicConfig(level=logging.DEBUG, filename="output.log")

app = Flask(__name__)
app.config['CORS_HEADERS'] = 'Content-Type'
app.json_encoder = JSONEncoder

cors = CORS(app, resources={
    r"/v1/*": {"origins": "*"}
})

config = Config()
database = Database(logging, config)

@app.route("/v1/seed/generate", methods=["POST"])
@expects_json(SeedRequest.schema)
def generateSeed() -> Response:
    request_data = SeedRequest(request.get_json())
    settings = Settings(request_data.seed, request_data.difficulty, request_data.goal,
                        request_data.logic,
                        request_data.statues, request_data.enemizer, request_data.start_location, request_data.firebird,
                        request_data.ohko, request_data.red_jewel_madness, request_data.allow_glitches,
                        request_data.boss_shuffle, request_data.open_mode, request_data.z3_mode, request_data.overworld_shuffle, request_data.entrance_shuffle)

    randomizer = Randomizer("./data/gaia.bin")
    result = __generate(randomizer, settings, request_data.generate_race_rom, 0)
    if result is None:
        return make_response("Failed to generate a seed", 500)
Exemplo n.º 7
0
import socket
from pathlib import Path
from utils import extract_route, read_file
from views import index, edit, notfound
from database import Database, Note

CUR_DIR = Path(__file__).parent
print(CUR_DIR)
SERVER_HOST = '0.0.0.0'
SERVER_PORT = 8080
db = Database('banco')

server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((SERVER_HOST, SERVER_PORT))
server_socket.listen()

print(
    f'Servidor escutando em (ctrl+click): http://{SERVER_HOST}:{SERVER_PORT}')

while True:
    client_connection, client_address = server_socket.accept()

    request = client_connection.recv(1024).decode()

    route = extract_route(request)
    filepath = CUR_DIR / route
    print('route', route)
    if filepath.is_file():
        response = read_file(filepath)
    elif route == '':
Exemplo n.º 8
0
 def add_library(self, view):
     ''' Add entry to view table in jellyfin database.
     '''
     with Database('jellyfin') as jellyfindb:
         jellyfin_db.JellyfinDatabase(jellyfindb.cursor).add_view(
             view['Id'], view['Name'], view['Media'])
Exemplo n.º 9
0
 def __init__(self, nome, cpf, data_nascimento, multa=0.0):
     self.nome = nome
     self.cpf = cpf
     self.data_nascimento = data_nascimento
     self.multa = multa
     self.db = Database()
Exemplo n.º 10
0
    def get_nodes(self):
        ''' Set up playlists, video nodes, window prop.
        '''
        node_path = xbmc.translatePath("special://profile/library/video")
        playlist_path = xbmc.translatePath("special://profile/playlists/video")
        index = 0

        # Kodi 19 doesn't seem to create this directory on it's own
        if not os.path.isdir(node_path):
            os.makedirs(node_path)

        with Database('jellyfin') as jellyfindb:
            db = jellyfin_db.JellyfinDatabase(jellyfindb.cursor)

            for library in self.sync['Whitelist']:

                library = library.replace('Mixed:', "")
                view = db.get_view(library)

                if view:
                    view = {
                        'Id': library,
                        'Name': view.view_name,
                        'Tag': view.view_name,
                        'Media': view.media_type
                    }

                    if view['Media'] == 'mixed':
                        for media in ('movies', 'tvshows'):

                            temp_view = dict(view)
                            temp_view['Media'] = media
                            self.add_playlist(playlist_path, temp_view, True)
                            self.add_nodes(node_path, temp_view, True)

                        index += 1  # Compensate for the duplicate.
                    else:
                        if view['Media'] in ('movies', 'tvshows',
                                             'musicvideos'):
                            self.add_playlist(playlist_path, view)

                        if view['Media'] not in ('music', ):
                            self.add_nodes(node_path, view)

                    index += 1

        for single in [{
                'Name': translate('fav_movies'),
                'Tag': "Favorite movies",
                'Media': "movies"
        }, {
                'Name': translate('fav_tvshows'),
                'Tag': "Favorite tvshows",
                'Media': "tvshows"
        }, {
                'Name': translate('fav_episodes'),
                'Tag': "Favorite episodes",
                'Media': "episodes"
        }]:

            self.add_single_node(node_path, index, "favorites", single)
            index += 1

        self.window_nodes()
Exemplo n.º 11
0
    def window_nodes(self):
        ''' Just read from the database and populate based on SortedViews
            Setup the window properties that reflect the jellyfin server views and more.
        '''
        self.window_clear()
        self.window_clear('Jellyfin.wnodes')

        with Database('jellyfin') as jellyfindb:
            libraries = jellyfin_db.JellyfinDatabase(
                jellyfindb.cursor).get_views()

        libraries = self.order_media_folders(libraries or [])
        index = 0
        windex = 0

        try:
            self.media_folders = self.get_libraries()
        except IndexError as error:
            LOG.exception(error)

        for library in libraries:
            view = {
                'Id': library.view_id,
                'Name': library.view_name,
                'Tag': library.view_name,
                'Media': library.media_type
            }

            if library.view_id in [
                    x.replace('Mixed:', "") for x in self.sync['Whitelist']
            ]:  # Synced libraries

                if view['Media'] in ('movies', 'tvshows', 'musicvideos',
                                     'mixed'):

                    if view['Media'] == 'mixed':
                        for media in ('movies', 'tvshows'):

                            for node in NODES[media]:

                                temp_view = dict(view)
                                temp_view['Media'] = media
                                temp_view['Name'] = "%s (%s)" % (
                                    view['Name'], translate(media))
                                self.window_node(index, temp_view, *node)
                                self.window_wnode(windex, temp_view, *node)

                            # Add one to compensate for the duplicate.
                            index += 1
                            windex += 1
                    else:
                        for node in NODES[view['Media']]:

                            self.window_node(index, view, *node)

                            if view['Media'] in ('movies', 'tvshows'):
                                self.window_wnode(windex, view, *node)

                        if view['Media'] in ('movies', 'tvshows'):
                            windex += 1

                elif view['Media'] == 'music':
                    self.window_node(index, view, 'music')
            else:  # Dynamic entry
                if view['Media'] in ('homevideos', 'books', 'playlists'):
                    self.window_wnode(windex, view, 'browse')
                    windex += 1

                self.window_node(index, view, 'browse')

            index += 1

        for single in [{
                'Name': translate('fav_movies'),
                'Tag': "Favorite movies",
                'Media': "movies"
        }, {
                'Name': translate('fav_tvshows'),
                'Tag': "Favorite tvshows",
                'Media': "tvshows"
        }, {
                'Name': translate('fav_episodes'),
                'Tag': "Favorite episodes",
                'Media': "episodes"
        }]:

            self.window_single_node(index, "favorites", single)
            index += 1

        window('Jellyfin.nodes.total', str(index))
        window('Jellyfin.wnodes.total', str(windex))
Exemplo n.º 12
0
def generate_changes_database(db, feature_list=feature_list):
    """
    this function will generate the changes database
    the database should be generated before the call of this function
    we will keep users who visit more than 3 times
    """
    browserid = 'browserid'

    feature_list = db.get_column_names('pandas_features')
    df = db.load_data(feature_list=["*"], table_name="pandas_features")
    df = filter_less_than_n(df, 3)

    # add label changes to database
    if 'label' not in feature_list:
        feature_list.append('label')

    maps = {}
    for feature in feature_list:
        maps[feature] = {
            'browserid': [],
            "clientid": [],
            "IP": [],
            "from": [],
            "to": [],
            "fromtime": [],
            "totime": [],
            "browser": [],
            "os": []
        }

    grouped = df.groupby(browserid)
    pre_fingerprint = ""
    pre_row = []
    for cur_key, cur_group in tqdm(grouped):
        if cur_group['browserfingerprint'].nunique() == 1:
            continue
        pre_fingerprint = ""
        for idx, row in cur_group.iterrows():
            if pre_fingerprint == "":
                pre_fingerprint = row['browserfingerprint']
                pre_row = row
                continue
            for feature in feature_list:
                if feature not in row:
                    continue
                if pre_row[feature] != row[feature]:
                    maps[feature]['browserid'].append(row[browserid])
                    maps[feature]['clientid'].append(row['clientid'])
                    maps[feature]['IP'].append(row['IP'])
                    maps[feature]["from"].append(pre_row[feature])
                    maps[feature]['to'].append(row[feature])
                    maps[feature]['fromtime'].append(pre_row['time'])
                    maps[feature]['totime'].append(row['time'])
                    maps[feature]['browser'].append(row['browser'])
                    maps[feature]['os'].append(get_os_from_agent(row['agent']))
            pre_row = row
            # why previously we dont have this update
            pre_fingerprint = row['browserfingerprint']
    db = Database('filteredchanges{}'.format(browserid))
    for feature in feature_list:
        print(feature)
        try:
            df = pd.DataFrame.from_dict(maps[feature])
            db.export_sql(df, '{}changes'.format(feature))
            print('success')
        except:
            print(len(maps[feature]['from']), len(maps[feature]['to']),
                  len(maps[feature]['fromtime']), len(maps[feature]['totime']))
    return maps
Exemplo n.º 13
0
import os

import psycopg2.errors

from database import Database
from database.config import tables


def generate_tables(database):
    for (name, table) in tables.items():
        if not database.find_table(name):
            try:
                database.create_table(name, table)
                print(f"Table {name} created.")
            except psycopg2.errors.SyntaxError as error:
                print(f"Fail: {error}.")
                return
        else:
            print(f"Table {name} already exists.")


if __name__ == "__main__":
    db = Database(os.environ["DATABASE_URL"])
    generate_tables(db)
Exemplo n.º 14
0
    def save(self, cursor):
        querry = cursor.mogrify(
            "INSERT INTO messages(from_id,to_id,text,creation_date,title) VALUES(%s, %s, %s, %s, %s);",
            (self.from_id, self.to_id, self.text, self.creation_date,
             self.title))
        cursor.execute(querry)
        return cursor.statusmessage != ""

    def __str__(self):
        s = f"""
            Message from: \t{self.from_id}
            \t\tto: \t{self.to_id}
            Posted at: \t{self.creation_date}
            Message:  \t#__{self.title}__#
            \t\t{self.text}
            """
        return s


if __name__ == '__main__':
    d = Database()
    d.configure_connection('postgres', 'dupa', database='messenger')
    d.connect()
    d.enable_cursor()
    #m = Message()
    #m.load_from_list([1,2,'Już cię więcej nie kocham seksistowska świnio!', datetime.now(), 'Nieudana miłość'])
    #print(m.save(d.cursor))
    for m in Message.load_all_messages(d.cursor):
        print(m)
Exemplo n.º 15
0
        return "\"" + string + "\""
    else:
        return string


outfile = time.strftime("%Y-%m-%d_%H:%M:%S_dump.csv.lz4", time.gmtime())
dldir = "static/downloads/"

print("Deleting existing dumps")
for file in os.listdir(dldir):
    if file.endswith("_dump.csv.lz4"):
        os.remove(os.path.join(dldir, file))

print("Export started, connecting to databases...")

db = Database(config.DB_CONN_STR)
es = ElasticSearchEngine("od-database")

docs_with_url = db.join_website_url(es.stream_all_docs())

print("Connected, writing to csv")

with lz4.frame.open(outfile + ".part", mode='wb',
                    compression_level=9,
                    block_size=lz4.frame.BLOCKSIZE_MAX4MB) as fp:
    fp.write((",".join(
        ["website_id", "website_url", "path", "name", "ext", "size", "mtime"]
    ) + "\n").encode())

    for doc in docs_with_url:
        try:
Exemplo n.º 16
0
 def db_query():
     db = Database()
     students = db.list_students()
     return students
Exemplo n.º 17
0
from pyrogram.errors import FloodWait, InputUserDeactivated, UserIsBlocked, PeerIdInvalid
from pyrogram.errors.exceptions.bad_request_400 import UserNotParticipant, UsernameNotOccupied, ChatAdminRequired, PeerIdInvalid
from configs import Config
from database import Database

## --- Sub Configs --- ##
BOT_USERNAME = Config.BOT_USERNAME
BOT_TOKEN = Config.BOT_TOKEN
API_ID = Config.API_ID
API_HASH = Config.API_HASH
DB_CHANNEL = Config.DB_CHANNEL
ABOUT_BOT_TEXT = Config.ABOUT_BOT_TEXT
ABOUT_DEV_TEXT = Config.ABOUT_DEV_TEXT
HOME_TEXT = Config.HOME_TEXT
BOT_OWNER = Config.BOT_OWNER
db = Database(Config.DATABASE_URL, BOT_USERNAME)
broadcast_ids = {}
Bot = Client(BOT_USERNAME,
             bot_token=BOT_TOKEN,
             api_id=API_ID,
             api_hash=API_HASH)


async def send_msg(user_id, message):
    try:
        await message.forward(chat_id=user_id)
        return 200, None
    except FloodWait as e:
        await asyncio.sleep(e.x)
        return send_msg(user_id, message)
    except InputUserDeactivated:
Exemplo n.º 18
0
 def db_query():
     db = Database()
     classes = db.list_classes()
     return classes
Exemplo n.º 19
0
    if chat_id in USERS_CONTEXTS:
        USERS_CONTEXTS[chat_id].selected_month = month
        USERS_CONTEXTS[chat_id].selected_year = year


def deleteLastCalendarMessage(chat_id):
    if chat_id in USERS_CONTEXTS and USERS_CONTEXTS[
            chat_id].last_calendar_message is not None:
        try:
            BOT.deleteMessage(
                (chat_id, USERS_CONTEXTS[chat_id].last_calendar_message))
        except telepot.exception.TelegramError as e:
            print(
                'Erro ao apagar a mensagem. Provavelmente já foi apagada. Ignorando comando...'
            )


def setLastCalendarMessageId(chat_id, message_id):
    if chat_id in USERS_CONTEXTS:
        USERS_CONTEXTS[chat_id].last_calendar_message = message_id


BOT_DATABASE = Database()
API_TOKEN = sys.argv[1]
BOT = telepot.Bot(API_TOKEN)
MessageLoop(BOT, handle).run_as_thread()
print('Working...')

while 1:
    time.sleep(10)
Exemplo n.º 20
0
def track_ibge():
    logger.info('Iniciando tracking do IBGE...')
    db = Database()
    ibge = IbgeTracker()
    ibge.track(db)
    logger.info('Finalizou o tracking do IBGE')
#Imported Classes
from database import Database
#Creates the Object to database
edit = Database()

#Used for basic constsruction of the database, ensuring that all primary and foreign keys are set in place.
#As well as making sure that the tables are independant and reach the 3SF
#Makes handling the database information much easier and more efficent
def create_relationships():
    edit.delete_column("users","ProfileID")

    edit.delete_column("profiles","History")

    edit.create_primarykey("users","UserID")

    edit.create_primarykey("profiles","ProfileID")

    edit.create_table("usersprofiles", "UserID INT(10) NOT NULL, ProfileID INT(10) NOT NULL, "
                                       "PRIMARY KEY (UserID, ProfileID), "
                                       "FOREIGN KEY (UserID) REFERENCES users(UserID), "
                                       "FOREIGN KEY (ProfileID) REFERENCES profiles(ProfileID)")

    edit.create_table("history", "HistoryID INT(10) NOT NULL, Filename VARCHAR(255), "
                                       "PRIMARY KEY (HistoryID)")

    edit.create_table("profileshistory", "ProfileID INT(10) NOT NULL, HistoryID INT(10) NOT NULL, "
                                       "PRIMARY KEY (ProfileID, HistoryID), "
                                       "FOREIGN KEY (ProfileID) REFERENCES profiles(ProfileID), "
                                         "FOREIGN KEY (HistoryID) REFERENCES history(HistoryID)")

    edit.delete_user("0")
Exemplo n.º 22
0
import time
import env
from database import Database

db = Database(
    host=env.DATABASE_HOST,
    port=env.DATABASE_PORT,
    user=env.DATABASE_USER,
    password=env.DATABASE_PASSWORD,
    databaseName=env.DATABASE_NAME
)
db.connect()

for i in range(50, 100):
    query = "INSERT INTO tb_buku(nama_buku) VALUES ('%s')" % ("Buku-"+str(i+1))
    print(query)
    db.insert(query)
Exemplo n.º 23
0
Author: Nick Russo
Purpose: A simple Flask web app that demonstrates the Model View Controller
(MVC) pattern in a meaningful and somewhat realistic way.
"""

from flask import Flask, render_template, request
from database import Database

# Create Flask object
app = Flask(__name__)

# Toggle between JSON, YAML, and XML for testing
# path = "data/db.yml"
# path = "data/db.xml"
path = "data/db.json"
db = Database(path)


@app.route("/", methods=["GET", "POST"])
def index():
    """
    This is a view function which responds to requests for the top-level
    URL. It serves as the "controller" in MVC as it accesses both the
    model and the view.
    """

    # The button click within the view kicks off a POST request ...
    if request.method == "POST":

        # This collects the user input from the view. The controller's job
        # is to process this information, which includes using methods from
Exemplo n.º 24
0
 def __init__(self):
     self.func = Functions()
     self.db = Database('server.db')
Exemplo n.º 25
0
import jieba.posseg as pseg
import codecs
from gensim import corpora, models, similarities
from database import Database
from demo import Demo
import datetime

demo = Demo()
today = datetime.date.today()
print(today)
preone_time = today + datetime.timedelta(days=-1)
preone_time_nyr = preone_time.strftime('%Y-%m-%d')  #格式化输出
print(preone_time_nyr)

#连接数据库
db = Database()
db.connect('crawl_data')
sql_saved = "select content from finance_news where date >= '%s'" % (
    preone_time_nyr)
saved_files = list(db.query(sql_saved))
print(saved_files)

sql_craw = "select * from finance_old_news where date >= '%s'" % (
    preone_time_nyr)
#craw_file = list(db.query(sql_craw))
craw_file = db.query(sql_craw)
print(len(craw_file))
db.close()

#构建停用词
stop_words = 'stop_words_ch.txt'
Exemplo n.º 26
0
from database import Database
from table import createTable
import requests
import json
import pprint
import threading
import time
import re
import pyttsx3

API_KEY = "tfA5-hX-EHFT"
PROJECT_TOKEN = "tkAYM1OPy9vX"
RUN_TOKEN = "tM1AQmiR0Vwo"
db = Database('store.db')
graph_result = {}


class DataAPI:
    def __init__(self, api_key, project_token):
        self.api_key = api_key
        self.project_token = project_token
        self.params = {"api_key": self.api_key}

        self.data = self.get_data()

    def get_data(self):
        response = requests.get(
            f'https://www.parsehub.com/api/v2/projects/{self.project_token}/last_ready_run/data',
            params=self.params)

        data = json.loads(response.text)
Exemplo n.º 27
0
import sys, threading
from web.crockpi.controller import Controller

from database import Database

if len(sys.argv) != 2:
    print("usage:", sys.argv[0], "<target temp (degrees fahrenheit)>")
    sys.exit(0)

target_temp = int(sys.argv[1])
controller = Controller(database=Database(threading.Lock()))
controller.run(target_temp)

Exemplo n.º 28
0
import json
from random import randint


async_mode = None
app = Flask(__name__)

app.config['SECRET_KEY'] = 'secret!'

# https://stackoverflow.com/questions/43801884/how-to-run-python-socketio-in-thread
# switching to threading mode in Flask + python-socketio as documented here:
# http://python-socketio.readthedocs.io/en/latest/#standard-threading-library
socketio = SocketIO(app, async_mode='threading')
thread = None

myDataBase = Database()
myModuleList = []


@app.route('/')
def index():
    json_list = []
    for x in range(0, myModuleList.__len__()):
        module = myModuleList[x]
        json_list.append(module.get_property_list())

    return render_template('index.html', modules_property_list=json.dumps(json_list))


def background_thread():
    """Example of how to send server generated events to clients."""
Exemplo n.º 29
0
 def lazy_create(self, monster_name):
     db = Database()
     db.query('INSERT INTO rpg_characters (character_name, username) VALUES (?, NULL)', [monster_name])
     db.commit()
Exemplo n.º 30
0
def callback_feeder(bot, job):
    db = Database()
    db.update_post()
    logging.info("Bot already reload new feed to db")