Ejemplo n.º 1
0
    def __init__(self, todo_settings={}):
        """
        Todo constructor.
        """
        # Create logger
        self.log = log_util.get_logger(logger_name="Todo", level="INFO")

        # get path to sublime package folder
        path_todo_package = todo_settings.get('path_todo_package', '')

        # init path to resources
        self.settings['path_pom'] = osp.join(path_todo_package,
                                             self.settings['name_pom'])
        self.settings['path_db'] = osp.join(path_todo_package,
                                            self.settings['name_db'])
        self.settings['path_menu'] = osp.join(path_todo_package,
                                              self.settings['name_menu'])
        self.settings['path_base_menu'] = osp.join(
            path_todo_package, self.settings['name_base_menu'])

        # add user settings (possibly overwrite path to resources)
        self.settings.update(todo_settings)

        # load/create Todo db
        self.todo_db_link()
Ejemplo n.º 2
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import sys
import flask
from flask import make_response, render_template, request, redirect, url_for
from kafka_util import KafkaHelper
import log_util

app = flask.Flask(__name__)
log = log_util.get_logger(tag='kafka_console')


def get_kafka_topic_view(consumer, topic):
    log.info("consumer: %s, topic: %s", consumer, topic)
    with KafkaHelper() as h:
        lags = []
        lag = 0
        log.info('offset, logsize, lag')
        partitions = h.get_partitions(topic)
        for p in partitions:
            offset = h.consumer_offset(consumer, topic, p)
            logsize = h.current_offset(topic, p)
            lag = logsize - offset
            log.info("%s, %s, %s, %s", p, offset, logsize, lag)
            lags.append(lag)

        log.info("total lags: %s", sum(lags))


@app.route("/reset/<consumer_group>/<topic>")
# Copyright (c) 2021, Oracle and/or its affiliates.
# All rights reserved. The Universal Permissive License (UPL), Version 1.0 as shown at http://oss.oracle.com/licenses/upl

from os import environ

import cx_Oracle

from log_util import get_logger

logger = get_logger(__name__, environ.get('LOG_LEVEL'))


def atp_setup(connection, username, password):

    cursor = connection.cursor()
    cursor.execute(f"""
    SELECT * FROM dba_tables
    WHERE table_name = 'MESSAGES' and owner = '{username.upper()}'
    """)
    rows = cursor.fetchall()

    if len(rows) == 0:
        sql = f"""CREATE USER {username}
        IDENTIFIED BY "{password}"
        QUOTA UNLIMITED ON DATA"""
        cursor.execute(sql)
        connection.commit()
        sql = f"""
        CREATE TABLE {username}.messages (
            id RAW(16) DEFAULT SYS_GUID() NOT NULL PRIMARY KEY,
            rcvd_at_ts TIMESTAMP WITH TIME ZONE,
Ejemplo n.º 4
0
# coding=utf-8
"""
Thread Task
Author: Seaven
"""

import threading
import log_util
import traceback

task_log = log_util.get_logger('ThreadTask')

TASK_STATUS_READY = 1
TASK_STATUS_RUNNING = 2
TASK_STATUS_FINISH = 3
TASK_STATUS_ERROR = 4


class ThreadTask(object):
    """
    build a task, run a group function by multi thread
    """
    def __init__(self, run_func_list, end_func=lambda: "Task End!"):
        self.end_func = end_func
        self.thread_list = list()
        self.__init_threads(run_func_list)

    def __init_threads(self, run_func_list):
        for i in xrange(len(run_func_list)):
            self.thread_list.append(ExecuteThread(i, run_func_list[i]))
Ejemplo n.º 5
0
"""A Python module that provides the API client component for the elsapy package.
    Additional resources:
    * https://github.com/ElsevierDev/elsapy
    * https://dev.elsevier.com
    * https://api.elsevier.com"""

import requests, json, time, urllib
import log_util
from abc import ABCMeta, abstractmethod

logger = log_util.get_logger(__name__)


class ElsClient:
    """A class that implements a Python interface to api.elsevier.com"""

    # class variables
    __url_base = "https://api.elsevier.com/"    ## Base URL for later use
    __user_agent = "elsapy-v0.4.6"			        ## Helps track library use
    __min_req_interval = 0.23                    ## Min. request interval in sec
    __ts_last_req = time.time()                 ## Tracker for throttling

    # constructors
    def __init__(self, api_key, inst_token = None, num_res = 25):
        """Initializes a client with a given API Key and, optionally, institutional
            token, number of results per request, and local data path."""
        self.api_key = api_key
        self.inst_token = inst_token
        self.num_res = num_res

    # properties
Ejemplo n.º 6
0
# This has a vocab id, which is used to pad the encoder input, decoder input
# and target sequence
PAD_TOKEN = '[PAD]'
# This has a vocab id, which is used to represent out-of-vocabulary words
UNKNOWN_TOKEN = '[UNK]'
# This has a vocab id, which is used at the start of every decoder input
# sequence
START_DECODING = '[START]'
# This has a vocab id, which is used at the end of untruncated target sequences
STOP_DECODING = '[STOP]'

# Note: none of <s>, </s>, [PAD], [UNK], [START], [STOP] should appear in
# the vocab file.

LOGGER = get_logger('pointer.generator.data')


class Vocab(object):
    def __init__(self, vocab_file, max_size):
        self._word_to_id = {}
        self._id_to_word = {}
        # keeps track of total number of words in the Vocab
        self._count = 0

        # [UNK], [PAD], [START] and [STOP] get the ids 0,1,2,3.
        for w in [UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:
            self._word_to_id[w] = self._count
            self._id_to_word[self._count] = w
            self._count += 1
Ejemplo n.º 7
0
import torch
from torch.autograd import Variable

from data_util import config, data
from data_util.batcher import Batcher
from data_util.data import Vocab
from data_util.utils import rouge_eval, rouge_log, write_for_rouge
from log_util import get_logger
from model import Model
from train_util import get_input_from_batch

# sys.setdefaultencoding('utf8')

use_cuda = config.use_gpu and torch.cuda.is_available()
LOGGER = get_logger('pointer.generator.decode')


class Beam(object):
    def __init__(self, tokens, log_probs, state, context, coverage):
        self.tokens = tokens
        self.log_probs = log_probs
        self.state = state
        self.context = context
        self.coverage = coverage

    def extend(self, token, log_prob, state, context, coverage):
        return Beam(
            tokens=self.tokens + [token],
            log_probs=self.log_probs + [log_prob],
            state=state,
Ejemplo n.º 8
0
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR):
    os.makedirs(LOG_DIR)

OUTPUT_DIR = os.path.join(LOG_DIR, 'test_results')
if not os.path.exists(OUTPUT_DIR):
    os.makedirs(OUTPUT_DIR)

VIS_DIR = os.path.join(LOG_DIR, 'vis_results')
if FLAGS.verbose and not os.path.exists(VIS_DIR):
    os.makedirs(VIS_DIR)

if os.path.exists('test.py'):
    os.system('cp test.py %s' % LOG_DIR)

logger = get_logger(__file__, LOG_DIR, 'log_inference.txt')
logger.info(str(FLAGS) + '\n')

MAX_NUM_POINT = 4096
NUM_CLASSES = 13
NEW_NUM_CLASSES = 13

HOSTNAME = socket.gethostname()

EXT_LEN = 4
if FILE_TYPE == 'hdf5':
    EXT_LEN = 3
elif FILE_TYPE == 'numpy':
    EXT_LEN = 4
else:
    raise Exception('Not support file type')
Ejemplo n.º 9
0
MAX_NUM_POINT = 4096
NUM_CLASSES = 13

BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99

HOSTNAME = socket.gethostname()

# backup model
os.system('cp model.py {}'.format(LOG_DIR))
os.system('cp train.py {}'.format(LOG_DIR))

logger = get_logger(__file__, LOG_DIR, 'log_train.txt')
logger.info(str(FLAGS) + '\n')


def get_learning_rate(batch):
    learning_rate = tf.train.exponential_decay(
        BASE_LEARNING_RATE,  # Base learning rate.
        batch,  # Current index into the dataset.
        DECAY_STEP,  # Decay step.
        DECAY_RATE,  # Decay rate.
        staircase=True)
    learning_rate = tf.maximum(learning_rate,
                               0.00001)  # CLIP THE LEARNING RATE!!
    return learning_rate

Ejemplo n.º 10
0
import datetime
import logging
import os

import pyrouge
import tensorflow as tf

from log_util import get_logger

LOGGER = get_logger('pointer.generator.utils')

intervals = (
    ('w', 604800),  # 60 * 60 * 24 * 7
    ('d', 86400),  # 60 * 60 * 24
    ('h', 3600),  # 60 * 60
    ('m', 60),
    ('s', 1),
)


def display_time(seconds, granularity=2):
    result = []

    for name, count in intervals:
        value = int(seconds) / count
        if value:
            seconds -= value * count
            result.append("{}{}".format(value, name))
    return ' '.join(result[:granularity]) or '0s'

Ejemplo n.º 11
0
import torch
from torch.nn.utils import clip_grad_norm_

from custom_adagrad import AdagradCustom
from data_util import config
from data_util.batcher import Batcher
from data_util.data import Vocab
from data_util.utils import (calc_running_avg_loss, get_time,
                             time_diff_as_minutes)
from decode import BeamSearch
from log_util import get_logger
from model import Model
from train_util import get_input_from_batch, get_output_from_batch

USE_CUDA = config.use_gpu and torch.cuda.is_available()
LOGGER = get_logger('pointer.generator.train')


class Train(object):
    def __init__(self):
        self.vocab = Vocab(config.vocab_path, config.vocab_size)
        self.batcher = Batcher(config.train_data_path,
                               self.vocab,
                               mode='train',
                               batch_size=config.batch_size,
                               single_pass=False)
        time.sleep(15)

        train_dir = os.path.join(config.log_root,
                                 'train_%d' % (int(time.time())))
        if not os.path.exists(train_dir):
Ejemplo n.º 12
0
 def __init__(self):
     self.helper = KafkaHelper()
     self.msg_logger = log_util.get_logger(tag="kafka_msg")
Ejemplo n.º 13
0
import sys
import time

import tensorflow as tf
import torch

from data_util import config
from data_util.batcher import Batcher
from data_util.data import Vocab
from data_util.utils import calc_running_avg_loss
from log_util import get_logger
from model import Model
from train_util import get_input_from_batch, get_output_from_batch

use_cuda = config.use_gpu and torch.cuda.is_available()
LOGGER = get_logger('pointer.generator.eval', run_type='evaluation')


class Evaluate(object):
    def __init__(self, model_file_path):
        self.vocab = Vocab(config.vocab_path, config.vocab_size)
        self.batcher = Batcher(config.eval_data_path,
                               self.vocab,
                               mode='eval',
                               batch_size=config.batch_size,
                               single_pass=True)
        time.sleep(15)
        model_name = os.path.basename(model_file_path)

        eval_dir = os.path.join(config.log_root, 'eval_%s' % (model_name))
        if not os.path.exists(eval_dir):
Ejemplo n.º 14
0
# Copyright (c) 2021, Oracle and/or its affiliates.
# All rights reserved. The Universal Permissive License (UPL), Version 1.0 as shown at http://oss.oracle.com/licenses/upl

import ssl
from time import sleep
from os import environ

import cx_Oracle
from kafka import KafkaConsumer

from log_util import get_logger


# Override kafka logger
kafka_logger = get_logger('kafka', environ.get('KAFKA_LOG_LEVEL'))
# set local logger
logger = get_logger(__name__, environ.get('LOG_LEVEL'))


def get_consumer():
    sasl_mechanism = 'PLAIN'
    security_protocol = 'SASL_SSL'

    # Create a new context using system defaults, disable all but TLS1.2
    context = ssl.create_default_context()
    context.options &= ssl.OP_NO_TLSv1
    context.options &= ssl.OP_NO_TLSv1_1
    message_endpoint = environ.get('messageEndpoint')
    kafka_brokers = f"{message_endpoint}:9092"
    username = environ.get('USERNAME')
    stream_pool_id = environ.get('streamPoolId')
Ejemplo n.º 15
0
Visualization
'''

import imageio
import matplotlib
import matplotlib.pylab as plt
import matplotlib.patches as mpatches
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import visdom
from log_util import get_logger
from tile_images import tile_raster_images

visualizer = None
matplotlib.use('Agg')
logger = get_logger('visdom')

_options = dict(use_tanh=False,
                quantized=False,
                img=None,
                label_names=None,
                is_caption=False,
                is_attribute=False)

CHAR_MAP = {
    0: '_',
    1: '\n',
    2: ' ',
    3: '!',
    4: '"',
    5: '%',