Ejemplo n.º 1
0
import logging

_instance = logging.Logger("CS", logging.DEBUG)
log_fmt = "%(asctime)s   %(name)s\t%(levelname)5s   %(message)s"
logging.basicConfig(filename="/tmp/bt_edison.log", level=logging.DEBUG, format=log_fmt)


def Logger(name):
    """
    defines a logger
    :param name: name of the logger
    :type name: str
    :return: the logger instance
    :rtype: Logger
    """
    return _instance.getChild(name)

Ejemplo n.º 2
0
import os
import djcelery
import logging
djcelery.setup_loader()

logger = logging.Logger(name='kepi')

# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

ROOT_URLCONF = 'kepi.kepi.urls'

TEMPLATES = [
    {
        'BACKEND': 'django.template.backends.django.DjangoTemplates',
        'DIRS': [],
        'APP_DIRS': True,
        'OPTIONS': {
            'context_processors': [
                'django.template.context_processors.debug',
                'django.template.context_processors.request',
                'django.contrib.auth.context_processors.auth',
                'django.contrib.messages.context_processors.messages',
            ],
        },
    },
]

WSGI_APPLICATION = 'kepi.kepi.wsgi.application'

# SECURITY WARNING: keep the secret key used in production secret!
Ejemplo n.º 3
0
import argparse
import csv
import collections
import random
import sys
import math
import logging
import os
import copy
import itertools

import util

log = logging.Logger('lookup')
ch = logging.StreamHandler()
#ch.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
ch.setFormatter(logging.Formatter('%(message)s'))
fh = logging.FileHandler("make_split.log")
#fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
fh.setFormatter(logging.Formatter('%(message)s'))

log.setLevel(logging.DEBUG)
log.addHandler(ch)
log.addHandler(fh)

TRAIN = "train"
TEST = "test"
VALIDATION = "validation"

config = {}
Ejemplo n.º 4
0
    help="Set a logging level from 0,1,2,3,4,5"
)
parser.add_argument(
    '-v',
    '--version',
    action='store_true',
    help="Display guiscrcpy version"
)
args = parser.parse_args()

# set argument debug level
if args.debug:
    logging_priority = int(args.debug) * 10
else:
    logging_priority = 30
logger = logging.Logger('guiscrcpy', logging_priority)

# try using pynput, if exception handling not done here, it might fail in CI
try:
    from pynput import keyboard
except Exception as e:
    logger.warning("Running from tty, pass. E:{}".format(e))
    keyboard = None

logger.debug("Received flag {}".format(args.start))

Header(VERSION)

if args.version:
    sys.exit(0)
Ejemplo n.º 5
0
from gmailmanager import GmailClient
from main_form import Ui_MainWindow
from MsgBox import MsgBox
from preview import PreviewForm
from scannermanager import ScannerManager
from Settings import SettingsManager

# Global vars
ERR_STYLE = "background-color: rgb(255, 0, 0); color: rgb(255, 255, 255);"
COLUMN_COUNT = 7
MAILBOX = 0
PROJECTS = 1
SCAN_TARGET = "*.pdf"
SCAN_SOURCE = "*.png"

logging.Logger("dannyslog")
logging.basicConfig(
    filename="scampy.log.txt",
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    level=logging.DEBUG)


# Main GUI class for SCAMPY
class SCaMPy(QMainWindow, Ui_MainWindow):
    # Private values for last inserted record id's
    _lastIndexOfMail = 0
    _lastIndexOfProject = 0
    _lastIndexOfScans = 0

    # Name of scan target base folder
    _scan_base_folder = ""
Ejemplo n.º 6
0
def train(
    datasetdir="data/Electronics/",
    train_file="/training_set",
    test_file="/test_set",
    uid_voc="/uid_voc.pkl",
    mid_voc="/mid_voc.pkl",
    cat_voc="/cat_voc.pkl",
    batch_size=128,
    maxlen=30,
    matrix_width=36,
    test_iter=100,
    save_iter=4000000,
    model_type='DRIIN',
    seed=2,
):
    train_file = datasetdir + train_file
    test_file = datasetdir + test_file
    uid_voc = datasetdir + uid_voc
    mid_voc = datasetdir + mid_voc
    cat_voc = datasetdir + cat_voc

    model_path = datasetdir + "/dnn_save_path/ckpt_noshuff" + model_type + str(
        seed)
    best_model_path = datasetdir + "/dnn_best_model/ckpt_noshuff" + model_type + str(
        seed)
    gpu_options = tf.GPUOptions(allow_growth=True)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:

        train_data = DataIterator(train_file,
                                  uid_voc,
                                  mid_voc,
                                  cat_voc,
                                  batch_size,
                                  maxlen,
                                  shuffle_each_epoch=True,
                                  datasetdir=datasetdir)
        test_data = DataIterator(test_file,
                                 uid_voc,
                                 mid_voc,
                                 cat_voc,
                                 batch_size,
                                 maxlen,
                                 datasetdir=datasetdir)
        n_uid, n_mid, n_cat = train_data.get_n()

        model = Model(n_uid,
                      n_mid,
                      n_cat,
                      EMBEDDING_DIM,
                      matrix_width=matrix_width)
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        start_time = time.time()

        file1 = logging.FileHandler(
            filename=datasetdir + '/my_logs/' + "model_" +
            str(time.asctime(time.localtime(start_time))) + '.txt',
            mode='a',
            encoding='utf-8')
        logger_accuracy = logging.Logger(name='name_accuracy',
                                         level=logging.INFO)
        logger_accuracy.addHandler(file1)

        logger_accuracy.info("start_time:" +
                             time.asctime(time.localtime(start_time)) + "\r\n")
        logger_accuracy.info(model_type + " " + datasetdir + " maxlen:" +
                             str(maxlen) + " batch_size:" + str(batch_size) +
                             "\r\n")

        file2 = logging.FileHandler(
            filename=datasetdir + '/loss_logs/' + "model_test_" +
            str(time.asctime(time.localtime(start_time))) + '.txt',
            mode='a',
            encoding='utf-8')
        logger_test_loss = logging.Logger(name='name_loss', level=logging.INFO)
        logger_test_loss.addHandler(file2)

        file3 = logging.FileHandler(
            filename=datasetdir + '/loss_logs/' + "model_train_" +
            str(time.asctime(time.localtime(start_time))) + '.txt',
            mode='a',
            encoding='utf-8')
        logger_train_loss = logging.Logger(name='name_loss',
                                           level=logging.INFO)
        logger_train_loss.addHandler(file3)

        iter = 0
        lr = 0.01
        global best_auc
        breakflag = False

        test_auc_log, loss_sum_log, accuracy_sum_log, aux_loss_sum_log, loss_without_aux = eval_DRIIN(
            sess, test_data, model, best_model_path, maxlen)
        logger_accuracy.info(
            'test_auc: %.4f - test_loss: %.4f - test_accuracy: %.4f - test_aux_loss: %.4f - loss_without_aux: %.4f *best_auc: %.4f \r\n'
            % (test_auc_log, loss_sum_log, accuracy_sum_log, aux_loss_sum_log,
               loss_without_aux, best_auc))
        # writer.add_summary(summary, iter)
        print(
            'test_auc: %.4f - test_loss: %.4f - test_accuracy: %.4f - test_aux_loss: %.4f - loss_without_aux: %.4f  *best_auc: %.4f'
            % (test_auc_log, loss_sum_log, accuracy_sum_log, aux_loss_sum_log,
               loss_without_aux, best_auc))
        logger_test_loss.info(
            '%d,%.4f,%.4f,%.4f' % \
            (iter, loss_sum_log, aux_loss_sum_log, loss_without_aux))

        logger_train_loss.info(
            '%d,%.4f,%.4f,%.4f' % \
            (iter, loss_sum_log, aux_loss_sum_log, loss_without_aux))

        for epoch in range(5):
            loss_sum = 0.0
            accuracy_sum = 0.
            aux_loss_sum = 0.
            if breakflag:
                break
            print("epoch:", epoch)
            logger_accuracy.info('epoch: %d\r\n' % epoch)
            for src, tgt in train_data:
                iter += 1
                uids, mids, cats, mid_his, cat_his, mid_mask, target, sl, noclk_mids, noclk_cats = prepare_data_DRIIN(
                    src, tgt, maxlen, return_neg=True)
                loss, acc, aux_loss = model.train(sess, [
                    uids, mids, cats, mid_his, cat_his, mid_mask, target, sl,
                    lr, noclk_mids, noclk_cats
                ])
                loss_sum += loss
                accuracy_sum += acc
                aux_loss_sum += aux_loss

                sys.stdout.flush()
                if (iter % test_iter) == 0:
                    logger_accuracy.info(
                        'iter: %d ----> train_loss: %.4f ---- train_accuracy: %.4f ---- tran_aux_loss: %.4f \r\n' % \
                        (iter, loss_sum / test_iter, accuracy_sum / test_iter, aux_loss_sum / test_iter))
                    print('iter: %d ----> train_loss: %.4f ---- train_accuracy: %.4f ---- tran_aux_loss: %.4f' % \
                          (iter, loss_sum / test_iter, accuracy_sum / test_iter, aux_loss_sum / test_iter))

                    logger_train_loss.info(
                        '%d,%.4f,%.4f,%.4f' % \
                        (iter, loss_sum / test_iter, aux_loss_sum / test_iter, (loss_sum - aux_loss_sum) / test_iter, ))

                    test_auc_log, loss_sum_log, accuracy_sum_log, aux_loss_sum_log, loss_without_aux = eval_DRIIN(
                        sess, test_data, model, best_model_path, maxlen)
                    logger_accuracy.info(
                        'test_auc: %.4f -test_loss: %.4f -test_accuracy: %.4f -test_aux_loss: %.4f -loss_without_aux: %.4f *best_auc: %.4f \r\n'
                        % (test_auc_log, loss_sum_log, accuracy_sum_log,
                           aux_loss_sum_log, loss_without_aux, best_auc))
                    print(
                        'test_auc: %.4f - test_loss: %.4f - test_accuracy: %.4f - test_aux_loss: %.4f - loss_without_aux: %.4f  *best_auc: %.4f'
                        % (test_auc_log, loss_sum_log, accuracy_sum_log,
                           aux_loss_sum_log, loss_without_aux, best_auc))

                    logger_test_loss.info(
                        '%d,%.4f,%.4f,%.4f' % \
                        (iter, loss_sum_log, aux_loss_sum_log, loss_without_aux))

                    loss_sum = 0.0
                    accuracy_sum = 0.0
                    aux_loss_sum = 0.0
                    # if test_auc_log > 0.87:
                    #     test_iter = 10
                    # if iter >= test_iter:
                    #     test_iter = 10
                # if iter == 2500:
                #     test_iter = 100
                # if iter == 6000:
                #     breakflag = True
                #     break

                if (iter % save_iter) == 0:
                    print('save model iter: %d' % (iter))
                    model.save(sess, model_path + "--" + str(iter))
                # if iter == 3000:
                #     lr *= 2

            test_time = time.time()
            print("test interval: " + str((test_time - start_time) / 60.0) +
                  " min")
            logger_accuracy.info("test interval: " +
                                 str((test_time - start_time) / 60.0) +
                                 " min" + "\r\n")

        logger_accuracy.info("end_time:" +
                             time.asctime(time.localtime(time.time())) +
                             "\r\n")
Ejemplo n.º 7
0
import xml.etree.cElementTree as ET
from xml.dom import minidom
from zeep import Client
from lxml import etree

from medicamentos.models import Product, Med, Warehouse
import logging

logger = logging.Logger(__name__)

# client = Client('https://webservices3.ocp.pt/banco_saude/inventory?wsdl')


def soap_auth_header():
    auth = etree.Element("Authentication", xmlns="http://service.bs.ocp.com/")
    username = etree.SubElement(auth, "Username", xmlns="")
    password = etree.SubElement(auth, "Password", xmlns="")

    username.text = "000000"
    password.text = "UeE9BD1"
    return auth


AUTH_HEADER = soap_auth_header()


def get_inventory_level_OCP(med_id="", warehouse_id=""):
    # return works like json object
    '''Example return
    {
    'TransactionState': {
Ejemplo n.º 8
0
import logging
from typing import List

import utils_logging
from eval_db.database import Database

INSERT_STATEMENT = "INSERT INTO single_image_based_distances ('setting_id', 'row_id', 'is_crash', 'vae_loss', 'cae_loss', 'dae_loss', 'sae_loss', 'deeproad_loss') values (?,?,?,?,?,?,?,?);"
logger = logging.Logger("SingleImgDistance")
utils_logging.log_info(logger)


class SingleImgDistance:
    def __init__(self, setting_id: int, row_id: int, is_crash: bool,
                 vae_loss: float, cae_loss: float, dae_loss: float,
                 sae_loss: float, deeproad_loss: float):
        self.setting_id = setting_id
        self.row_id = row_id
        self.is_crash = is_crash
        self.vae_loss = vae_loss
        self.cae_loss = cae_loss
        self.dae_loss = dae_loss
        self.sae_loss = sae_loss
        self.deeproad_loss = deeproad_loss
        self.true_label = None
        self.count_to_crash = None

    def insert_into_db(self, db: Database) -> None:
        int_is_crash = 0
        if self.is_crash:
            int_is_crash = 1
        db.cursor.execute(
Ejemplo n.º 9
0
        return all(
            getattr(self, field) == getattr(other, field) for field in fields)

    def __ne__(self, other):
        return not self.__eq__(other)

    @property
    def enabled(self):
        return self._enabled

    @enabled.setter
    def enabled(self, value):
        self._enabled = bool(value)


CONTEXT_LOGGER = logging.Logger("%s.Context" % __name__)


class Context(object):
    def __init__(self, variables=None):
        if not variables:
            variables = {}
        self._variables = dict(variables)
        self._logger = CONTEXT_LOGGER  # This object could be replaced if required
        self._log_target = None

    @classmethod
    def from_variables(cls, **variables):
        """
        Create Context from variables.
Ejemplo n.º 10
0
import subprocess
import os
import mako
import time
import threading
import logging
import weakref
import base64
import traceback
import shutil
import socket
import uuid

from src import widgets

logger = logging.Logger("plugins.dlnarender")

templateGetter = TemplateLookup(os.path.dirname(__file__))

defaultSubclassCode = """
class CustomDeviceType(DeviceType):
    pass
"""


class DLNARenderAgent(devices.Device):
    deviceTypeName = 'DLNARenderAgent'
    readme = os.path.join(os.path.dirname(__file__), "README.md")
    defaultSubclassCode = defaultSubclassCode
    description = "Create an instance of gmediarender to recieve media. Audio is piped to JACK."
Ejemplo n.º 11
0
import logging
from robotlab.dev_utils.plotink import ebb_motion, ebb_serial

from robotlab.math_utils import Vector

slide_logger = logging.Logger("main.device.slide")


class SlideMixIn(object):
    def __init__(self, name):
        self.name = name
        self.port = ebb_serial.openPort()
        if self.port is None:
            slide_logger.error("Can not open the port of slide.")

    def _move(self, v, t):
        if int(v.length) == 0:
            return
        x, y = (v * 100).astype(int)  # mm to um
        t = int(t * 1000)  # s to ms
        ebb_motion.doABMove(self.port, -x, y, t)

    def _up(self):
        ebb_motion.sendPenUp(self.port, 100)

    def _down(self):
        ebb_motion.sendPenDown(self.port, 100)
Ejemplo n.º 12
0
import subprocess


freemem_command = """free | awk 'FNR == 2 {print ($7/1048576)"GB / "($2/1048576)"GB" }'"""
loadavg_command = 'cat /proc/loadavg | cut -d" " -f1-3'
uptime_command = 'cat /proc/uptime | cut -d" " -f1'
diffindex = ['pst', 'prs', 'ftr']
diffindex_full = ['Past', 'Present', 'Future']
banned_userids = ['000000001', '000000002']

mplistener = Listener()
bot = Client(
    bot_name,
    bot_token=bot_token
)
logger = logging.Logger('arcbot')


def pickrandom(mode, filter, count=1, level=''):
    chosen = []
    if mode == 'diff':
        diff = filter
        while len(chosen) < count:
            dup = False
            song = random.choice(songlist)
            for s in chosen:
                if song['id'] == s[3]:
                    dup = True
            if not dup:
                chosen.append([song['title_ja'] if song['title_ja'] else song['title'],
                               diff, song['levels'][diffindex.index(diff)], song['id']])
Ejemplo n.º 13
0
    def __init__(self, config, model_args=[]):
        '''
        Initialize a TFModelABC.

        model_args is a list of strings specifying the names of required
        arguments specific to the model.
        '''

        # Validate and load args
        for arg_name in self.global_args + model_args:
            assert (arg_name in config.keys())

        self.config = deepcopy(config)

        # Set up logging
        self.checkpoint_dir = os.path.join(config['save_dir'], 'checkpoints/')
        self.tf_log_dir = os.path.join(config['save_dir'], 'tflogs/')
        os.makedirs(self.checkpoint_dir, exist_ok=True)
        os.makedirs(self.tf_log_dir, exist_ok=True)

        self.logger = logging.Logger(config['model_name'] + '_logger',
                                     level=logging.DEBUG)
        formatter = logging.Formatter(
            '%(asctime)s - %(levelname)s: %(message)s')
        log_fh = logging.FileHandler(
            os.path.join(config['save_dir'], 'logs.log'))
        log_fh.setLevel(logging.INFO)
        log_fh.setFormatter(formatter)
        debug_fh = logging.FileHandler(
            os.path.join(config['save_dir'], 'debug.log'))
        debug_fh.setLevel(logging.DEBUG)
        debug_fh.setFormatter(formatter)
        print_fh = logging.StreamHandler()
        print_fh.setFormatter(formatter)
        print_fh.setLevel(
            logging.DEBUG if self.config['debug_mode'] else logging.INFO)
        self.logger.addHandler(debug_fh)
        self.logger.addHandler(log_fh)
        self.logger.addHandler(print_fh)

        self.logger.debug('loading card ID mappings')
        map_base = 'datasets/code_mappings/{}_{}.pkl'.format(
            '{}', config['n_cards'])
        with open(map_base.format('encoding'), 'rb') as f:
            self.name_to_id = pickle.load(f)
        with open(map_base.format('decoding'), 'rb') as f:
            self.id_to_name = pickle.load(f)

        self.logger.debug('configuring session and base graph')
        self.graph = tf.Graph()
        tf.set_random_seed(config['random_seed'])

        session_config = tf.ConfigProto(gpu_options=tf.GPUOptions())

        with self.graph.as_default():
            self.sess = tf.Session(graph=self.graph, config=session_config)

            if self.config['debug_mode']:
                self.sess = tf_debug.LocalCLIDebugWrapperSession(self.sess)

            self.logger.debug('building base ops')
            self.global_step = tf.Variable(0,
                                           name='global_step',
                                           trainable=False)

            # TODO: remove when done
            with tf.name_scope('debug'):
                self.debug_feed = tf.placeholder(tf.int32, [], 'debug_feed')

            self.logger.debug('building model graph')
            self.build_graph()

            self.logger.debug('initializing all variables')
            self.sess.run(tf.global_variables_initializer())

            try:
                self.init()
                self.logger.debug('running model-defined init()')
            except NotImplementedError:
                self.logger.warning('no model-defined init() found')
Ejemplo n.º 14
0
    def __init__(self, settings):
        if not _Thread:
            _late_import()

        self.logger = logging.Logger("unique name", level=logging.INFO)
        self.logger.addHandler(make_log_from_settings(settings))
Ejemplo n.º 15
0
def create_connector(configuration, logger=None):
    """Creator function for the database connection. It necessitates the following information from
    the json_conf dictionary:

    - dbtype (one of sqlite, mysql, postgresql)
    - db (name of the database file, for sqlite, otherwise name of the database)

    If the database is MySQL/PostGreSQL, the method also requires:

    - dbuser
    - dbhost
    - dbpasswd
    - dbport

    These are controlled and added automatically by the json_utils functions.

    :param configuration: configuration dictionary

    :param logger: a logger instance
    :type logger: logging.Logger

    :rtype : MySQLdb.connect | sqlite3.connect | psycopg2.connect

    """

    if logger is None:
        # Create a default null handler
        logger = logging.Logger("null")
        logger.addHandler(logging.NullHandler())

    db_settings = configuration.db_settings
    assert isinstance(db_settings, DBConfiguration)

    func = None
    if db_settings.dbtype == "sqlite":
        if not database_exists("sqlite:///{}".format(db_settings.db)):
            logger.debug("No database found, creating a mock one")
            create_database("sqlite:///{}".format(db_settings.db))
        logger.debug("Connecting to %s", db_settings.db)
        func = sqlite3.connect(database=db_settings.db,
                               check_same_thread=False)
    elif db_settings.dbtype in ("mysql", "postgresql"):
        if db_settings.dbpasswd != '':
            passwd = ":{0}".format(db_settings.dbpasswd)
        else:
            passwd = ''
        url = "{dialect}://{user}{passwd}@{host}:{port}/{db}".format(
            dialect=db_settings.dbtype,
            host=db_settings.dbhost,
            user=db_settings.dbuser,
            passwd=passwd,
            db=db_settings.db,
            port=db_settings.dbport)
        if database_exists(url) is False:
            create_database(url)

        if db_settings.dbtype == "mysql":
            import MySQLdb
            logger.debug("Connecting to MySQL %s", db_settings.db)
            func = MySQLdb.connect(host=db_settings.dbhost,
                                   user=db_settings.dbuser,
                                   passwd=db_settings.dbpasswd,
                                   db=db_settings.db,
                                   port=db_settings.dbport)
        elif db_settings.dbtype == "postgresql":
            import psycopg2
            logger.debug("Connecting to PSQL %s", db_settings.db)
            func = psycopg2.connect(host=db_settings.dbhost,
                                    user=db_settings.dbuser,
                                    password=db_settings.dbpasswd,
                                    database=db_settings.db,
                                    port=db_settings.dbport)
    else:
        raise ValueError("DB type not supported! {0}".format(
            db_settings.dbtype))
    return func
Ejemplo n.º 16
0
 def __init__(self, name):
     self.name = name
     self.platform_log = logging.Logger(name)
     self.task_log = None
Ejemplo n.º 17
0
# Local Modules
import os, logging, signal
from datetime import datetime

# 3rd Party Modules
from pymongo import MongoClient

# Create our logging
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
log = logging.Logger('OCTODB')
log.setLevel(30)

# Create our database data
DB_NAME = os.environ['MONGO_NAME']
DB_HOST = 'ds053439.mlab.com'
DB_PORT = 53439
DB_USER = os.environ['MONGO_USER']
DB_PASS = os.environ['MONGO_PASS']

DATABASE_SCHEMA = {
    'username': None,
    'alias': None,
    'repos': None,
    # repo: { repo_name, repo_url }
    'repo_data': None,
    # repo_data: {'judymoses.github.io': {'.py': 50}}
    'badges': {},
    # badges: {'.py': (svg_file, datetime)}
    'sessions': {
        'email_code': (-1, datetime(1, 1, 1))
        # email_code: (code, datetime)
Ejemplo n.º 18
0
 def add_task_log(self, handler):
     if self.task_log:
         return
     self.task_log = logging.Logger(self.name)
     self.task_log.setLevel(logging.DEBUG)
     self.task_log.addHandler(handler)
Ejemplo n.º 19
0
def main():
    root_logger = logging.Logger("hang_analyzer", level=logging.DEBUG)

    handler = logging.StreamHandler(sys.stdout)
    handler.setFormatter(logging.Formatter(fmt="%(message)s"))
    root_logger.addHandler(handler)

    root_logger.info("Python Version: %s" % sys.version)
    root_logger.info("OS: %s" % platform.platform())

    try:
        distro = platform.linux_distribution()
        root_logger.info("Linux Distribution: %s" % str(distro))
    except AttributeError:
        root_logger.warning(
            "Cannot determine Linux distro since Python is too old")

    try:
        uid = os.getuid()
        root_logger.info("Current User: %s" % str(uid))
        current_login = os.getlogin()
        root_logger.info("Current Login: %s" % current_login)
    except OSError:
        root_logger.warning("Cannot determine Unix Current Login")
    except AttributeError:
        root_logger.warning(
            "Cannot determine Unix Current Login, not supported on Windows")

    interesting_processes = [
        "mongo", "mongod", "mongos", "_test", "dbtest", "python", "java"
    ]
    go_processes = []
    process_ids = []

    parser = OptionParser(description=__doc__)
    parser.add_option('-p',
                      '--process-names',
                      dest='process_names',
                      help='Comma separated list of process names to analyze')
    parser.add_option(
        '-g',
        '--go-process-names',
        dest='go_process_names',
        help='Comma separated list of go process names to analyze')
    parser.add_option(
        '-d',
        '--process-ids',
        dest='process_ids',
        default=None,
        help=
        'Comma separated list of process ids (PID) to analyze, overrides -p & -g'
    )
    parser.add_option('-c',
                      '--dump-core',
                      dest='dump_core',
                      action="store_true",
                      default=False,
                      help='Dump core file for each analyzed process')
    parser.add_option(
        '-s',
        '--max-core-dumps-size',
        dest='max_core_dumps_size',
        default=10000,
        help='Maximum total size of core dumps to keep in megabytes')
    parser.add_option(
        '-o',
        '--debugger-output',
        dest='debugger_output',
        action="append",
        choices=['file', 'stdout'],
        default=None,
        help="If 'stdout', then the debugger's output is written to the Python"
        " process's stdout. If 'file', then the debugger's output is written"
        " to a file named debugger_<process>_<pid>.log for each process it"
        " attaches to. This option can be specified multiple times on the"
        " command line to have the debugger's output written to multiple"
        " locations. By default, the debugger's output is written only to the"
        " Python process's stdout.")

    (options, args) = parser.parse_args()

    if options.debugger_output is None:
        options.debugger_output = ['stdout']

    if options.process_ids is not None:
        # process_ids is an int list of PIDs
        process_ids = [int(pid) for pid in options.process_ids.split(',')]

    if options.process_names is not None:
        interesting_processes = options.process_names.split(',')

    if options.go_process_names is not None:
        go_processes = options.go_process_names.split(',')
        interesting_processes += go_processes

    [ps, dbg, jstack] = get_hang_analyzers()

    if ps is None or (dbg is None and jstack is None):
        root_logger.warning("hang_analyzer.py: Unsupported platform: %s" %
                            (sys.platform))
        exit(1)

    all_processes = ps.dump_processes(root_logger)

    # Find all running interesting processes:
    #   If a list of process_ids is supplied, match on that.
    #   Otherwise, do a substring match on interesting_processes.
    if process_ids:
        processes = [(pid, pname) for (pid, pname) in all_processes
                     if pid in process_ids and pid != os.getpid()]

        running_pids = set([pid for (pid, pname) in all_processes])
        missing_pids = set(process_ids) - running_pids
        if missing_pids:
            root_logger.warning(
                "The following requested process ids are not running %s" %
                list(missing_pids))
    else:
        processes = [
            (pid, pname) for (pid, pname) in all_processes
            if any(pname.find(ip) >= 0
                   for ip in interesting_processes) and pid != os.getpid()
        ]
    root_logger.info("Found %d interesting processes %s" %
                     (len(processes), processes))

    max_dump_size_bytes = int(options.max_core_dumps_size) * 1024 * 1024

    # Dump all other processes including go programs, except python & java.
    for (pid, process_name) in [(p, pn) for (p, pn) in processes
                                if not re.match("^(java|python)", pn)]:
        process_logger = get_process_logger(options.debugger_output, pid,
                                            process_name)
        dbg.dump_info(
            root_logger, process_logger, pid, process_name, options.dump_core
            and check_dump_quota(max_dump_size_bytes, dbg.get_dump_ext()))

    # Dump java processes using jstack.
    for (pid, process_name) in [(p, pn) for (p, pn) in processes
                                if pn.startswith("java")]:
        process_logger = get_process_logger(options.debugger_output, pid,
                                            process_name)
        jstack.dump_info(root_logger, process_logger, pid, process_name)

    # Signal go processes to ensure they print out stack traces, and die on POSIX OSes.
    # On Windows, this will simply kill the process since python emulates SIGABRT as
    # TerminateProcess.
    # Note: The stacktrace output may be captured elsewhere (i.e. resmoke).
    for (pid, process_name) in [(p, pn) for (p, pn) in processes
                                if pn in go_processes]:
        root_logger.info(
            "Sending signal SIGABRT to go process %s with PID %d" %
            (process_name, pid))
        signal_process(root_logger, pid, signal.SIGABRT)

    # Dump python processes after signalling them.
    for (pid, process_name) in [(p, pn) for (p, pn) in processes
                                if pn.startswith("python")]:
        root_logger.info(
            "Sending signal SIGUSR1 to python process %s with PID %d" %
            (process_name, pid))
        signal_process(root_logger, pid, signal.SIGUSR1)
        process_logger = get_process_logger(options.debugger_output, pid,
                                            process_name)
        dbg.dump_info(root_logger,
                      process_logger,
                      pid,
                      process_name,
                      take_dump=False)

    root_logger.info("Done analyzing all processes for hangs")
Ejemplo n.º 20
0
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <https://www.gnu.org/licenses/>.
"""
An addon for development and test of the generic proxy mechanism
"""
import bpy
import logging
import os
import time

from mixer.blender_data import blenddata

logger = logging.Logger(__name__, logging.INFO)
default_test = "test_module.TestCase.test_name"


class DebugDataProperties(bpy.types.PropertyGroup):
    profile_cumulative: bpy.props.BoolProperty(name="ProfileCumulative",
                                               default=False)
    profile_callers: bpy.props.BoolProperty(name="ProfileCallers",
                                            default=False)
    test_names: bpy.props.StringProperty(name="TestNames",
                                         default=default_test)


proxy = None

Ejemplo n.º 21
0
import pika
import time
import json
import pandas as pd
import re
import os
from datetime import datetime
from downloadfile import download_file_from_google_drive
from send_result import send_analyze_result
import logging

logger = logging.Logger('catch_all')

time.sleep(10)
connection = pika.BlockingConnection(pika.ConnectionParameters(host='rabbitmq', heartbeat=0))
channel = connection.channel()

channel.queue_declare(queue='addNewCsv')

def generate_dataframe_from_file(filename):
  type_word = ['tweet', 'post', 'comment', 'reply', 'reply-comment', 'reply-reply-comment', 'news']
  def repl_func_start_content(matchobj):
      return f'''""'''
  def repl_func_end_content(matchobj):
      return f'"{matchobj.group()}'
  def repl_func_quote(matchobj):
      return f'""'
  def repl_func_content_comma(matchobj):
      return matchobj.group().replace(',', '')
  def repl_func_name(matchobj):
      text = matchobj.group().split(',')
Ejemplo n.º 22
0
    def from_chemsys(cls,
                     chemsys,
                     prefix="proto-dft-2/runs",
                     n_max_atoms=20,
                     agent=None,
                     analyzer=None,
                     experiment=None,
                     log_file="campaign.log",
                     cloudwatch_group="/camd/worker/dev/"):
        """
        Class factory method for constructing campaign from
        chemsys.

        Args:
            chemsys (str): chemical system for the campaign
            prefix (str): prefix for s3
            n_max_atoms (int): number of maximum atoms
            agent (Agent): agent for stability campaign
            analyzer (Analyzer): analyzer for stability campaign
            experiment (Agent): experiment for stability campaign
            log_file (str): log filename
            cloudwatch_group (str): cloudwatch group to log to

        Returns:
            (ProtoDFTCampaign): Standard proto-dft campaign from
                the chemical system

        """
        logger = logging.Logger("camd")
        logger.setLevel("INFO")
        file_handler = logging.FileHandler(log_file)
        cw_handler = CloudWatchLogHandler(log_group=cloudwatch_group,
                                          stream_name=chemsys)
        logger.addHandler(file_handler)
        logger.addHandler(cw_handler)
        logger.addHandler(logging.StreamHandler())

        logger.info(
            "Starting campaign factory from_chemsys {}".format(chemsys))
        s3_prefix = "{}/{}".format(prefix, chemsys)

        # Initialize s3
        dumpfn({
            "started": datetime.now().isoformat(),
            "version": __version__
        }, "start.json")
        s3_sync(s3_bucket=CAMD_S3_BUCKET, s3_prefix=s3_prefix, sync_path='.')

        # Get structure domain
        # Check cache
        cache_key = "protosearch_cache/v1/{}/{}/candidates.pickle".format(
            chemsys, n_max_atoms)
        # TODO: create test of isfile
        if s3_key_exists(bucket=CAMD_S3_BUCKET, key=cache_key):
            logger.info("Found cached protosearch domain.")
            candidate_data = pd.read_pickle("s3://{}/{}".format(
                CAMD_S3_BUCKET, cache_key))
            logger.info("Loaded cached {}.".format(cache_key))
        else:
            logger.info(
                "Generating domain with max {} atoms.".format(n_max_atoms))
            element_list = chemsys.split('-')
            max_coeff, charge_balanced = heuristic_setup(element_list)
            domain = StructureDomain.from_bounds(
                element_list,
                charge_balanced=charge_balanced,
                n_max_atoms=n_max_atoms,
                **{'grid': range(1, max_coeff)})
            candidate_data = domain.candidates()
            logger.info("Candidates generated")
            candidate_data.to_pickle("s3://{}/{}".format(
                CAMD_S3_BUCKET, cache_key))
            logger.info("Cached protosearch domain at {}.".format(cache_key))

        # Dump structure/candidate data
        candidate_data.to_pickle("candidate_data.pickle")
        s3_sync(s3_bucket=CAMD_S3_BUCKET, s3_prefix=s3_prefix, sync_path='.')

        # Set up agents and loop parameters
        agent = agent or AgentStabilityAdaBoost(
            model=MLPRegressor(hidden_layer_sizes=(84, 50)),
            n_query=10,
            hull_distance=0.2,
            exploit_fraction=1.0,
            uncertainty=True,
            alpha=0.5,
            diversify=True,
            n_estimators=20)
        analyzer = analyzer or StabilityAnalyzer(hull_distance=0.2)
        experiment = experiment or OqmdDFTonMC1(timeout=30000,
                                                prefix_append="proto-dft")
        seed_data = load_dataframe("oqmd1.2_exp_based_entries_featurized_v2")

        # Load cached experiments
        logger.info("Loading cached experiments")
        cached_experiments = experiment.fetch_cached(candidate_data)
        logger.info("Found {} experiments.".format(len(cached_experiments)))
        if len(cached_experiments) > 0:
            summary, seed_data = analyzer.analyze(cached_experiments,
                                                  seed_data)
            # Remove cached experiments from candidate_data
            candidate_space = candidate_data.index.difference(
                cached_experiments.index, sort=False).tolist()
            candidate_data = candidate_data.loc[candidate_space]
            logger.info("Cached experiments added to seed.")

        # Construct and start loop
        return cls(candidate_data=candidate_data,
                   agent=agent,
                   experiment=experiment,
                   analyzer=analyzer,
                   seed_data=seed_data,
                   heuristic_stopper=5,
                   s3_prefix=s3_prefix,
                   logger=logger)
Ejemplo n.º 23
0
import pathlib
import os
import logging
from dataclasses import dataclass, field
import uuid

import utils
import actions
from execution_graph import ExecutionGraph, GraphNode
from actions import Action
import node_parsers
import errors
from execution_context import ExecutionContext
import const

logger = logging.Logger(__file__)

SPECIAL_KEYS = [const.PYRS_DIR_NODE]

KLASS_MAP = {
    "file_syncs": actions.FileSync,
    "installation": actions.Installation,
    "environment_condition": actions.EnvironmentCondition,
    "installations": actions.Installation,
}

PARSER_MAP: Dict[Type, Callable] = {
    actions.FileSync: node_parsers.parse_file_sync,
    actions.Installation: node_parsers.parse_installation,
}
Ejemplo n.º 24
0
#!/usr/bin/env python
# -*- coding:utf-8 -*-

import sys
import os
import time
import datetime
import fabric2
import logging
from logging import handlers
import yaml


# 日志logger初始化
opslog = logging.Logger('deployer')
fmt = logging.Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s : %(message)s',
                        datefmt='%Y%m%d %H:%M:%S,')
hd = handlers.TimedRotatingFileHandler('logs/deployer.log', when='midnight')
hd.setLevel(logging.INFO)
hd.setFormatter(fmt)
hd.suffix='%Y%m%d-%H%M%S'
opslog.addHandler(hd)


#TODO get svn repo with url

#TODO maven  /opt/apache-maven-3.6.0/bin/mvn  clean package -Pqa  -Dmaven.test.skip=true  -Dmaven.compile.fork=true

#TODO deployer with host

class WSGIRequestHandlerLogging(WSGIRequestHandler, Logger.ClassLogger):
    """
    """
    def log_message(self, format, *args):
        """
        """
        try:
            self.trace("%s %s %s" % args)
        except BaseException:
            print(args)


if sys.version_info > (3, ):
    _my_logger = None
else:
    _my_logger = logging.Logger("LOG")
    _my_logger.setLevel(logging.INFO)
    _hnd = logging.StreamHandler(sys.stdout)
    _my_logger.addHandler(_hnd)
"""
Webservices routing
"""


class _WebServices(WSGI):
    # This allows * on all Handlers
    headers = [
        ("Access-Control-Allow-Origin", "*"),
        ("Access-Control-Allow-Methods", "*"),
        ("Access-Control-Allow-Headers",
         "X-Requested-With, Content-Type, Origin, Authorization, Accept, Client-Security-Token, Accept-Encoding"
Ejemplo n.º 26
0
 def __init__(self):
     self._configure_logger()
     self._logger = logging.Logger("Application")
     # TODO: Figure out why this is necessary
     self._logger.addHandler(stdout)
     self.loop = asyncio.get_event_loop()
Ejemplo n.º 27
0
import logging

import requests

from . import errors

logger = logging.Logger('yadisk-api')

_CODE_TO_ERROR = {
    401: errors.UnauthorizedError,
    403: errors.ForbiddenError,
    409: errors.DiskPathError,
    404: errors.NotFoundError,
    412: errors.PreconditionFailed,
    413: errors.PayloadTooLarge,
    500: errors.InternalServerError,
    503: errors.ServiceUnavailable,
    507: errors.InsufficientStorageError,
}

STATUS_OK = 200
STATUS_CREATED = 201
STATUS_ACCEPTED = 202
STATUS_NO_CONTENT = 204

OK_STATUSES = {
    STATUS_OK,
    STATUS_CREATED,
    STATUS_ACCEPTED,
    STATUS_NO_CONTENT,
}
Ejemplo n.º 28
0
from level_2_optionals_cdsu_options import project_id
from modules.level_1_a_data_acquisition import sql_retrieve_df
from modules.level_1_b_data_processing import null_analysis, options_scraping_v2, remove_zero_price_total_vhe, lowercase_column_conversion, remove_rows, remove_columns, string_replacer, color_replacement, new_column_creation, score_calculation, duplicate_removal, total_price, margin_calculation, new_features, column_rename
from modules.level_1_d_model_evaluation import data_grouping_by_locals_temp
from modules.level_1_e_deployment import sql_inject, sql_delete, sql_date_comparison
from modules.level_0_performance_report import performance_info_append, error_upload, log_record, project_dict, performance_info
pd.set_option('display.expand_frame_repr', False)

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s %(levelname)s %(message)s',
    datefmt='%H:%M:%S @ %d/%m/%y',
    filename=level_2_optionals_cdsu_options.log_files['full_log'],
    filemode='a')
logging.Logger('errors')
# logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))  # Allows the stdout to be seen in the console
logging.getLogger().addHandler(logging.StreamHandler(
    sys.stderr))  # Allows the stderr to be seen in the console

configuration_parameters = level_2_optionals_cdsu_options.selected_configuration_parameters

dict_sql_upload_flag = 0


def main():
    log_record('Projeto: Sugestão Encomenda CDSU - Viaturas', project_id)

    query_filters = {'NLR_CODE': '4R0', 'Franchise_Code_DW': '43'}

    df = data_acquisition(query_filters)
Ejemplo n.º 29
0
"""
Keepalive worker
"""

import logging
from ..utilities.address import Address
from ..utilities.status import Status
from ..utilities.tag import Tag
from ..utilities.data_indexes import SubprocessorIndex

_SubprocessorIndex = SubprocessorIndex()

logger = logging.Logger('workers.utim_worker_keepalive')


def process(utim, data):
    """
    Run process
    """

    logger.info('Got keepalive!')
    outbound_item = [
        Address.ADDRESS_UTIM, Address.ADDRESS_UHOST, Status.STATUS_PROCESS,
        Tag.UCOMMAND.KEEPALIVE_ANSWER
    ]
    return outbound_item
Ejemplo n.º 30
0
from sentry.tasks.base import instrumented_task, retry
from sentry.utils.http import absolute_uri
from sentry.api.serializers import serialize, AppPlatformEvent
from sentry.models import (
    SentryAppInstallation,
    Group,
    Project,
    Organization,
    User,
    ServiceHook,
    ServiceHookProject,
    SentryApp,
)
from sentry.models.sentryapp import VALID_EVENTS

logger = logging.Logger('sentry.tasks.sentry_apps')

TASK_OPTIONS = {
    'queue': 'app_platform',
    'default_retry_delay': (60 * 5),  # Five minutes.
    'max_retries': 3,
}

# We call some models by a different name, publically, than their class name.
# For example the model Group is called "Issue" in the UI. We want the Service
# Hook events to match what we externally call these primitives.
RESOURCE_RENAMES = {
    'Group': 'issue',
}

TYPES = {