Beispiel #1
0
import os
import re
from console_logging.console import Console
console = Console()
import datetime
import time


def make_sitemap(urls):
    entries = []
    timestamp = datetime.datetime.fromtimestamp(time.time())
    gmtoffset = timestamp.astimezone().utcoffset().seconds
    timestamp = "{year}-{month}-{day}T{hour}:{minute}:{second}+{timeshift}".format(
        year=timestamp.year,
        month=timestamp.month,
        day=timestamp.day,
        hour=timestamp.hour,
        minute=timestamp.minute,
        second=timestamp.second,
        timeshift='%02d:%02d' % (gmtoffset // 3600, (gmtoffset % 3600) // 60))
    #Fix this region widespread mess. [Figure out what the f**k sitemap.XML is.]
    for url in urls:
        sitemap_entry = "<url>\n<loc>{url}</loc>\n<lastmod>{timestamp}</lastmod>\n<priority>0.8</priority></url>".format(
            url='http://masq.gq%s' % url, timestamp=timestamp)
        entries.append(sitemap_entry)
    sitemap_xml = '''<?xml version="1.0" encoding="UTF-8"?>
                        <urlset
                            xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
                            xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
                            xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9
                                    http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd">
from streaming_event_compliance.services import setup
from streaming_event_compliance.services.visualization import visualization_deviation_automata
from streaming_event_compliance.services.compliance_check import case_thread_cc
from streaming_event_compliance.objects.variable.globalvar import gVars, CCM, CTM
from streaming_event_compliance import app
import threading
from streaming_event_compliance.database import dbtools
from streaming_event_compliance.objects.exceptions.exception import ThreadException
from streaming_event_compliance.objects.logging.server_logging import ServerLogging
import traceback
import json
import os
import sys
from console_logging.console import Console

console = Console()
console.setVerbosity(5)
MAXIMUN_WINDOW_SIZE = app.config['MAXIMUN_WINDOW_SIZE']
THRESHOLD = app.config['THRESHOLD']
CLEINT_DATA_PATH = app.config['CLEINT_DATA_PATH']
AUTOMATA_FILE = app.config['AUTOMATA_FILE']
FILE_TYPE = app.config['FILE_TYPE']
threads_index = 0


def compliance_checker(client_uuid, event):
    """
    Description:
        This function will do compliance checking for each event from the streaming data provided from client_uuid.
        It will first check the global variable 'autos', to check if tt's status is true,
        if it's false, that means the automata has not built, return this information into user;
Beispiel #3
0
import dataset
from voiceit2 import VoiceIt2

from console_logging.console import Console
import os
console = Console()

console.log("Stating....")

apiKey = "   "  #
apiToken = "  "

my_voiceit = VoiceIt2(apiKey, apiToken)

try:
    #ENDPOINT_DB = os.getenv('ENDPOINT_DB')
    #db = dataset.connect(ENDPOINT_DB)
    db = dataset.connect('sqlite:///tovivo.db')

except:
    db = dataset.connect('sqlite:///tovivo.db')


class CRUD:
    @staticmethod
    def cadastrar(data):
        table = db['user']
        user = my_voiceit.create_user()
        print(user)
        data['userId'] = user['userId']
        table.insert(data)
Beispiel #4
0
import json
import os
from lxml import html
import requests
import unicodedata
from console_logging.console import Console
console = Console()

job_data = None

with open('jobs.json') as f:
    job_data = json.load(f)
    console.info("Crawling %d career pages." % len(job_data))
    i = 0
    for job_entry in job_data:
        try:
            url = job_entry['link']
            page = requests.get(url)
            tree = html.fromstring(page.content)
            links = tree.xpath('//a')
            job_postings = []
            for link in links:
                job_title = link.text_content().strip().lstrip()
                if 'intern' in job_title: # only test if intern position
                    res = requests.post(
                        'http://127.0.0.1:8000/predict', json={'title': job_title})
                    prediction = res.text.strip().lstrip()
                    if prediction in ['IT/Software Development', 'Engineering']:
                        job_postings.append(job_title)
            job_entry['positions'] = job_postings
        except Exception as e:
Beispiel #5
0
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import numpy as np
import os
import sys
from console_logging.console import Console
from sys import argv

usage = "\nUsage:\npython neuralnet/main.py path/to/dataset.csv path/to/crossvalidation_dataset.csv #MAX_GPA #MAX_TEST_SCORE\n\nExample:\tpython main.py harvard.csv 6.0 2400\n\nThe dataset should have one column of GPA and one column of applicable test scores, no headers."

console = Console()
console.setVerbosity(3)  # only logs success and error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

try:
    script, dataset_filename, test_filename, maxgpa, maxtest = argv
except:
    console.error(str(sys.exc_info()[0]))
    print(usage)
    exit(0)

dataset_filename = str(dataset_filename)
maxgpa = float(maxgpa)
maxtest = int(maxtest)

if dataset_filename[-4:] != ".csv":
    console.error("Filetype not recognized as CSV.")
    print(usage)
    exit(0)

# Data sets
Beispiel #6
0
import numpy as np
import os
import re
import pickle as pkl
from console_logging.console import Console
console = Console()
'''
Preprocessing:
remove everything except lettes spaces exclamations question marks @symbol

Features:
one hot encoded words
one hot encoded capital words (if no capitals, 0)
count of exlamation (!) and question mark (?)
Later: one hot encoded mentions (@username)
'''

# Debugging
console.setVerbosity(4)
# Training
# console.setVerbosity(3)
# Staging
# console.setVerbosity(2)
# Production
# console.mute()
# Neater logging inside VS Code
console.timeless()
console.monotone()

DATASET_FILEPATH = 'data/text_emotion.csv'
dataset_path = os.path.join(os.getcwd(), DATASET_FILEPATH)
Beispiel #7
0
from voiceit2 import VoiceIt2
from console_logging.console import Console
console = Console()

console.log("Stating....")

# developerId : ff4a62e3f7014748b75085b17dde1f01

apiKey = "key_aaf0da565b3b41ac8f6de78213f93e52"
apiToken = "tok_d096d530e9374df481ffbe966dfdbd44"

my_voiceit = VoiceIt2(apiKey,apiToken)

id_user = '******'

cadastro_img = "https://observatoriodocinema.uol.com.br/wp-content/uploads/2021/01/Renato-Aragao-1.jpg"

verifica_img = "https://stcotvfoco.com.br/2021/01/renato-aragao-didi-carreira-trapalhoes-filmes-1.jpg"

image_fake = "https://conexao.segurosunimed.com.br/wp-content/uploads/2021/01/Capa-idoso-2.0.jpg"

voz_url = "https://to-vivo-app.s3.amazonaws.com/users/usr_54fbb7f880214222958ce92aef0f22f2/output+(2).flac"
#print(my_voiceit.check_user_exists(id_user))

#print(my_voiceit.create_face_enrollment_by_url(id_user, cadastro_img))

console.info("Verifica...do......")

r = my_voiceit.face_verification_by_url(id_user, verifica_img)
console.info(r['faceConfidence'])
Beispiel #8
0
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import numpy as np
import os
from sys import argv
from console_logging.console import Console

console = Console()

usage = "You shouldn't be running this file."

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
console.setVerbosity(3)  # only error, success, log

script = 'predict.py'
dataset_filename = './neuralnet/corpus/carnegie_mellon.csv'
maxgpa = 5.0
maxtest = 2400
dataset_filename = str(dataset_filename)
maxgpa = float(maxgpa)
maxtest = int(maxtest)
if dataset_filename[-4:] != ".csv":
    console.error("Filetype not recognized as CSV.")
    print(usage)
    exit(0)

# Data sets
DATA_TRAINING = dataset_filename
DATA_TEST = dataset_filename
''' We are expecting features that are floats (gpa, sat, act) and outcomes that are integers (0 for reject, 1 for accept) '''
Beispiel #9
0
import utils
from classifiers import JobTitle
from console_logging.console import Console

console = Console()

train = utils.load_dataset('features')
console.info("Loaded training dataset.")
test = utils.load_dataset('test')
console.info("Loaded testing dataset.")
pipe = JobTitle.pipe(train)
console.success("Finished training pipe.")

t = [_['title'] for _ in test]
e = [_['categories'][0] for _ in test]

accuracy = utils.evaluate(pipe, t, e)
console.success("%f accuracy" % accuracy)


def get_analytics():
    analytics = utils.analyze(pipe, t, e, utils.categories(test))
    # console.log('\n'+str(analytics))
    return analytics
Beispiel #10
0
import train_jobtitle
pipe = train_jobtitle.pipe

from console_logging.console import Console
console = Console()

from sanic import Sanic
from sanic.response import json, text
app = Sanic(__name__)


@app.route('/')
async def hello(request):
    return text('', status=200)


@app.route('/predict', methods=['POST'])
async def predict(request):
    try:
        return text(str(pipe.predict([request.json['title']])[0]))
    except Exception as e:
        console.error(e)
        return text(e, status=500)


@app.route('/predict_many', methods=['POST'])
async def predict_many(request):
    try:
        return json(list(pipe.predict(request.json['titles'])))
    except Exception as e:
        console.error(e)
Beispiel #11
0
from flask import Flask
from requests import post
from flask_restful import Resource, Api, reqparse
from lsuinox.Banco.Database import *
from console_logging.console import Console
console = Console()

#db = dataset.connect('sqlite:///:memory:')

db = Banco()
app = Flask(__name__)
api = Api(app)


class CADASTRAR_USUARIO(Resource):
    def post(self):
        argumentos = reqparse.RequestParser()
        argumentos.add_argument("id")
        argumentos.add_argument("nome")
        argumentos.add_argument("cpf")
        argumentos.add_argument("plano")
        argumentos.add_argument("cidade")
        argumentos.add_argument("bairro")
        argumentos.add_argument("rua")
        argumentos.add_argument("cep")
        argumentos.add_argument("quant_porcos")
        argumentos.add_argument("datetime")
        argumentos.add_argument("carencia")
        argumentos.add_argument("status_solicitacao")
        argumentos.add_argument("Forma_pagamento")
        argumentos.add_argument("Fatura")
Beispiel #12
0
import os
import pickle

from console_logging.console import Console
from kombu import Connection, Exchange, Queue
from kombu.mixins import ConsumerMixin

import dataset

print("import dataset heheheheh")
# https://dataset.readthedocs.io/en/latest/ setar para falar com banco
db = dataset.connect('mysql://*****:*****@192.168.0.108:49153/COUNTER_TBL')
tabela = db['EVENTOS']

console = Console()
queue = "contador-carro-exchange"
exchange = "contador-carro-exchange"
routing_key = "contador-carro-exchange"
rabbit_url = "amqp://*****:*****@192.168.0.108:5672//"

# Rabbit config
conn = Connection(rabbit_url)
channel_ = conn.channel()
exchange_ = Exchange(exchange, type="direct", delivery_mode=1)


class Worker(ConsumerMixin):
    def __init__(self, connection, queues):
        self.connection = connection
        self.queues = queues
Beispiel #13
0
from sanic import Sanic
import json as j
app = Sanic()
from sanic.response import json
from console_logging.console import Console
console = Console()

routing_table = dict()
with open('paths.json') as f:
    for d in j.load(f):
        routing_table[d["passkey"]] = d["url"]

console.info("Compiled routing table of %d routes." %
             len(routing_table.keys()))


@app.middleware('response')
async def all_cors(r, s):
    s.headers['Access-Control-Allow-Origin'] = '*'
    s.headers['Access-Control-Allow-Headers'] = '*'


@app.route("/knock", methods=['POST', 'OPTIONS'])
async def whos_there(r):
    if r.method == 'OPTIONS': return json({}, status=200)
    if 'name' not in r.json.keys(): return json({}, status=500)
    console.log("%s@%s is knocking." % (r.json['name'], r.ip))
    if r.json['name'] in routing_table.keys():
        p = routing_table[r.json['name']]
        console.log("%s is answering." % p)
        return json({"url": p}, status=200)
Beispiel #14
0
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from time import sleep
import numpy as np
from console_logging.console import Console

console = Console()
import json
import os

curated_lists = []

browser = webdriver.Chrome()
console.info("Initialized Chrome Webdriver.")


def get_repos(pages=10):

    console.log("Now entering signup process.")

    # Page 1 of Signup

    browser.get('https://github.com/')

    input('Log in, then press ENTER.')

    browser.get(
        'https://github.com/search?o=desc&p=1&q=curated+list&s=stars&type=Repositories&utf8=%E2%9C%93'
from streaming_event_compliance import app, db
import time, traceback
from streaming_event_compliance.objects.logging.server_logging import ServerLogging
from streaming_event_compliance.objects.exceptions.exception import ThreadException, ReadFileException
from console_logging.console import Console
import sys
# resource.setrlimit(resource.RLIMIT_NOFILE, (2000, -1))
console = Console()
console.setVerbosity(5)

if __name__ == '__main__':
    func_name = sys._getframe().f_code.co_name
    try:
        ServerLogging().log_info(func_name, "Created all db tables")
        db.create_all()
    except Exception as ec:
        console.error('Error: Database connection!' + str(ec.__class__) + traceback.format_exc())
        ServerLogging().log_error(func_name, "Database connection error!")
        exit(1)

    from streaming_event_compliance.objects.variable.globalvar import gVars
    from streaming_event_compliance.services import setup
    from streaming_event_compliance.services.build_automata import build_automata
    from streaming_event_compliance.database import dbtools

    dbtools.empty_tables()
    setup.init_automata()
    if gVars.auto_status == 0:
        start = time.clock()
        console.secure("Start time: ", start)
        try:
Beispiel #16
0
import autogluon as ag
from autogluon import ObjectDetection as task
from console_logging.console import Console
console = Console()

console.log("Baixando Dataset...")
root = './'
filename_zip = ag.download(
    'https://autogluon.s3.amazonaws.com/datasets/tiny_motorbike.zip',
    path=root)
filename = ag.unzip(filename_zip, root=root)

console.log("Criando TASK TRAIN ")
import os
data_root = os.path.join(root, filename)
dataset_train = task.Dataset(data_root, classes=('motorbike', ))

console.info("TRAINING DATA MODEL...")
time_limits = 5 * 60 * 60  # 5 hours
epochs = 30
detector = task.fit(dataset_train,
                    num_trials=2,
                    epochs=epochs,
                    lr=ag.Categorical(5e-4, 1e-4),
                    ngpus_per_trial=1,
                    time_limits=time_limits)
console.success("TRAINING DONE !")
console.log("START TEST MODEL ")
dataset_test = task.Dataset(data_root,
                            index_file_name='test',
                            classes=('motorbike', ))
from colorama import Fore
from datetime import datetime, timedelta
from console_logging.console import Console

import schedule
import time
import platform
import os
import json

# @todo: implement uploading json file to confluence with embed confluence.py and then automatically delete
# @todo: the file after x days
# @todo: Look into implementing a way to create a new directory to store the image and html files
# @todo: see if it's possible to add job id's to track and log for job queue

console = Console()


def get_recent_epic():

    total_epic = 0
    new_issues = {}

    now = datetime.now()
    week_ago = now - timedelta(days=7)

    """
    #################################################################
    #### Search for new story epics since last week from today   ####
    #################################################################
    """
Beispiel #18
0
import time
import boto3
import cv2
from uuid import uuid4
from requests import get
from voiceit2 import VoiceIt2
from console_logging.console import Console
import cv2 as cv
import numpy as np
import argparse
from random import choice
from uuid import uuid4
import cvlib
from cvlib.object_detection import draw_bbox

console = Console()

apiKey = "key_aaf0da565b3b41ac8f6de78213f93e52"
apiToken = "tok_d096d530e9374df481ffbe966dfdbd44"

BODY_PARTS = {
    "Nose": 0,
    "Neck": 1,
    "RShoulder": 2,
    "RElbow": 3,
    "RWrist": 4,
    "LShoulder": 5,
    "LElbow": 6,
    "LWrist": 7,
    "RHip": 8,
    "RKnee": 9,
Beispiel #19
0
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import os, configparser, re
from console_logging.console import Console
console = Console()
console.setVerbosity(5)
app = Flask(__name__)

# Default Configuration:
deploy = True
if deploy:
    DATABASE_PATH = 'mysql+pymysql://compliancechecker:compliancechecker@mysqldb:3306/compliancechecker'
    app.config['BASE_DIR'] = '/StreamingEventCompliance/'

else:
    DATABASE_PATH = 'mysql+pymysql://compliancechecker:compliancechecker@localhost/compliancechecker'
    app.config['BASE_DIR'] = os.path.dirname(__file__) + os.sep + '..' + os.sep

app.config['LOG_LEVEL'] = 'DEBUG'
app.config['LOG_FORMAT'] = '%(asctime)-15s %(message)s'

app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_PATH
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['AUTOS_DEFAULT'] = False
app.config['THRESHOLD'] = 0.2
app.config['AUTOMATA_FILE'] = 'automata'
app.config['CLEINT_DATA_PATH'] = app.config['BASE_DIR'] + 'p_automata' + os.sep
app.config['FILE_TYPE'] = '.pdf'
app.config['TRAINING_EVENT_LOG_PATH'] = app.config['BASE_DIR'] + 'data' + os.sep + \
                                        'Simple_Training2.xes'
app.config['WINDOW_SIZE'] = list(map(int, re.findall(r"\d+", '[1,2,3,4]')))