Пример #1
0
def _check_dependency_version(name, version):
    from core.preferences import prefs
    from utils.logger import get_logger
    logger = get_logger(__name__)

    module = sys.modules[name]
    if not isinstance(module.__version__, basestring):  # mocked module
        return
    if not StrictVersion(module.__version__) >= StrictVersion(version):
        message = '%s is outdated (got version %s, need version %s)' % (name,
                                                                        module.__version__,
                                                                        version)
        if prefs.core.outdated_dependency_error:
            raise ImportError(message)
        else:

            logger.warn(message, 'outdated_dependency')
Пример #2
0
import sys
import os

sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True

__author__ = "bigfatnoob"

from store import base_store
from utils import lib, logger, cache
import properties

LOGGER = logger.get_logger(os.path.basename(__file__.split(".")[0]))


class InputStore(base_store.InputStore):
  def __init__(self, dataset, **kwargs):
    base_store.InputStore.__init__(self, dataset, **kwargs)

  def load_inputs(self, args_key):
    args_index = cache.load_json(lib.get_dataset_arg_index(self.dataset))
    if args_key not in args_index:
      return None
    arg_files_name = os.path.join(lib.get_dataset_args_folder(self.dataset), "%s.json" % args_index[args_key])
    arguments = cache.load_json(arg_files_name)
    assert len(arguments) == properties.FUZZ_ARGUMENT_SIZE
    if self.is_array(arguments):
      key_args = arguments
    else:
      key_args = [[] for _ in range(len(arguments[0]))]
      for i in range(len(arguments[0])):
Пример #3
0
from django.http.response import Http404
from django.shortcuts import get_object_or_404, redirect
from django.core.exceptions import PermissionDenied
from django.urls import reverse

from core import models as core_models
from review import models as review_models
from production import models as production_models
from submission import models
from copyediting import models as copyediting_models
from proofing import models as proofing_models
from security.logic import can_edit_file, can_view_file_history, can_view_file, is_data_figure_file
from utils import setting_handler
from utils.logger import get_logger

logger = get_logger(__name__)

# General role-based security decorators


def base_check(request):
    if request is None or request.user is None:
        return False

    if request.user.is_anonymous() or not request.user.is_active:
        return False

    return True


def editor_is_not_author(func):
Пример #4
0
    def __init__(self, args, queue=None, mode='train', elmo=None):
        self.logger = logger = get_logger(__name__)
        self.config = config = args.config
        self.elmo = elmo

        # Epoch variable and its update op
        self.epoch = tf.Variable(0, trainable=False)
        self.epoch_incr = self.epoch.assign(self.epoch + 1)
        self.queue = queue
        self.embedding_size = e_size = config.embedding_size
        self.num_classes = num_classes = config.num_classes
        # We can keep a larger batch size during evaluation to speed up computation
        if mode == 'train':
            self.batch_size = batch_size = config.batch_size
        else:
            self.batch_size = batch_size = config.eval_batch_size
        self.keep_prob = keep_prob = config.keep_prob
        self.clipped_norm = clipped_norm = config.clipped_norm

        # Learning rate variable and it's update op
        self.learning_rate = tf.get_variable(
            "lr", shape=[], dtype=tf.float32, trainable=False,
            initializer=tf.constant_initializer(config.lr)
        )
        self.global_step = tf.Variable(0, trainable=False)

        # Feeding inputs for evaluation
        self.inputs = tf.placeholder(tf.int64, [batch_size, args.config.seq_len])
        self.labels = tf.placeholder(tf.int64, [batch_size])
        self.segment_id = tf.placeholder(tf.int64, [batch_size])

        # Logic for embeddings
        self.w2v_embeddings = tf.placeholder(tf.float32, [args.vocab_size, e_size])
        if config.cnn_mode == 'static':
            embeddings = tf.get_variable(
                "embedding", [args.vocab_size, e_size],
                initializer=random_uniform(0.25),
                trainable=False
            )
        else:
            embeddings = tf.get_variable(
                "embedding", [args.vocab_size, e_size],
                initializer=random_uniform(0.25),
                trainable=True
            )
        # Used in the static / non-static configurations
        self.load_embeddings = embeddings.assign(self.w2v_embeddings)
        # Looking up input embeddings
        self.embedding_lookup = tf.nn.embedding_lookup(embeddings, self.inputs)

        if config.elmo is True:
            # Load the embeddings from the feed_dict
            self.input_strings = tf.placeholder(tf.string, [batch_size])
            self.embedding_lookup = elmo(self.input_strings, signature='default', as_dict=True)['elmo']
            self.input_vectors = input_vectors = tf.expand_dims(
                self.embedding_lookup, axis=3
            )
            self.embedding_size = e_size = 1024
        else:
            self.input_vectors = input_vectors = tf.expand_dims(self.embedding_lookup, axis=3)

        # Apply a convolutional layer
        conv_outputs = []
        self.debug = []
        for i, filter_specs in enumerate(config.conv_filters):
            size = filter_specs['size']
            channels = filter_specs['channels']
            debug = {}
            with tf.variable_scope("conv%d" % i):
                # Convolution Layer begins
                debug['filter'] = conv_filter = tf.get_variable(
                    "conv_filter%d" % i, [size, e_size, 1, channels],
                    initializer=random_uniform(0.01)
                )
                debug['bias'] = bias = tf.get_variable(
                    "conv_bias%d" % i, [channels],
                    initializer=tf.zeros_initializer()
                )
                debug['conv_out'] = output = tf.nn.conv2d(input_vectors, conv_filter, [1, 1, 1, 1], "VALID") + bias
                # Applying non-linearity
                output = tf.nn.relu(output)
                # Pooling layer, max over time for each channel
                debug['output'] = output = tf.reduce_max(output, axis=[1, 2])
                conv_outputs.append(output)
                self.debug.append(debug)

        # Concatenate all different filter outputs before fully connected layers
        conv_outputs = tf.concat(conv_outputs, axis=1)
        total_channels = conv_outputs.get_shape()[-1]

        # Adding a dropout layer during training
        # tf.nn.dropout is an inverted dropout implementation
        if mode == 'train':
            conv_outputs = tf.nn.dropout(conv_outputs, keep_prob=keep_prob)

        # Apply a fully connected layer
        with tf.variable_scope("full_connected"):
            self.W = W = tf.get_variable(
                "fc_weight", [total_channels, num_classes],
                initializer=random_uniform(math.sqrt(6.0 / (total_channels.value + num_classes)))
            )
            self.clipped_W = clipped_W = tf.clip_by_norm(W, clipped_norm)
            self.b = b = tf.get_variable(
                "fc_bias", [num_classes],
                initializer=tf.zeros_initializer()
            )
            self.logits = tf.matmul(conv_outputs, W) + b

        # Declare the vanilla cross-entropy loss function
        self.softmax = tf.nn.softmax(self.logits)
        self.one_hot_labels = tf.one_hot(self.labels, num_classes)

        # TODO :- For compatiblity with future versions, stop gradient for label tensors
        self.loss1 = tf.nn.softmax_cross_entropy_with_logits(
            logits=self.logits,
            labels=self.one_hot_labels
        )
        self.cost1 = tf.reduce_sum(self.loss1) / batch_size

        # Declare the soft-label distillation loss function
        self.soft_labels = tf.placeholder(tf.float32, [batch_size, num_classes])
        self.loss2 = tf.nn.softmax_cross_entropy_with_logits(
            logits=self.logits,
            labels=self.soft_labels
        )
        self.cost2 = tf.reduce_sum(self.loss2) / batch_size

        # Interpolate the loss functions
        self.l1_weight = tf.placeholder(tf.float32)
        if config.iterative is False:
            self.final_cost = self.cost1
        else:
            self.final_cost = self.l1_weight * self.cost1 + (1.0 - self.l1_weight) * self.cost2

        if config.optimizer == 'adadelta':
            opt = tf.train.AdadeltaOptimizer(
                learning_rate=self.learning_rate,
                rho=0.95,
                epsilon=1e-6
            )
        else:
            opt = tf.train.AdamOptimizer(
                learning_rate=self.learning_rate
            )

        if mode == 'train':
            for variable in tf.trainable_variables():
                logger.info("%s - %s", variable.name, str(variable.get_shape()))
        # Apply optimizer to minimize loss
        self.updates = opt.minimize(self.final_cost, global_step=self.global_step)

        # Clip fully connected layer's norm
        with tf.control_dependencies([self.updates]):
            self.clip = W.assign(clipped_W)

        self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
        self.best_saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
Пример #5
0
# -*- coding: utf-8 -*-
import torch
from torch.nn.utils.convert_parameters import vector_to_parameters, parameters_to_vector

from baselines import base
from utils.logger import get_logger
logger = get_logger()


class Agent(base.Agent):
    # References:
    # [1] Lillicrap T P, Hunt J J, Pritzel A, et al. Continuous control with deep reinforcement learning[J].
    #     arXiv preprint arXiv:1509.02971, 2015.
    def __init__(self,
                 actor,
                 target_actor,
                 optimizer_actor,
                 critic,
                 target_critic,
                 optimizer_critic,
                 loss,
                 replay_module,
                 noise_generator,
                 reward_gamma=0.99,
                 tau=1e-3,
                 warmup_size=100,
                 explore_fraction=0.3):
        """ Just follow the `Algorithm 1` in [1], suppose any element of action in [-1, 1]
        Args:
            actor: actor network
            target_actor: target actor network
Пример #6
0
이 기능은 로그스테시를 이용하여 처리할 수 있다.

"""
import pymongo
from elasticsearch import Elasticsearch

from newscrapy import settings
from utils.schedule import Scheduling
from utils.logger import get_logger
from utils.mongo import MongDAO

es = Elasticsearch(host=settings.ELASTIC_SERVER,
                   port=settings.ELASTIC_PORT,
                   timeout=60)

logger = get_logger('mig')

mongdao = MongDAO(settings)


def entries_to_remove(entries, the_dict):
    for key in entries:
        if key in the_dict:
            del the_dict[key]


def all_unset():
    mongdao.update_many('articles', {}, {"$set": {"elas": 0}})


cond = {'$or': [{'elas': None}, {'elas': {'$exists': False}}, {'elas': 0}]}
Пример #7
0
from PyQt5.QtMultimedia import QMediaPlayer, QMediaContent

from utils import logger

log = logger.get_logger(__name__)


player = QMediaPlayer(flags=QMediaPlayer.LowLatency)


def play(mp3_data: QMediaContent, volume: int = 100):
    try:
        player.setMedia(mp3_data)
        player.setVolume(volume)
        player.play()
    except Exception as e:
        log.warning(f"Unable to play sound. {e}", exc_info=True)
Пример #8
0
from annotators.conf import TITLE_TO_ID, ENDPOINTS
from utils.html import check_json_response, post_request
from utils.text import remove_illegal_chars
from utils.store import get_wiki_store
from utils.logger import get_logger


logger = get_logger()
wiki_id_db = get_wiki_store(TITLE_TO_ID["spotlight"])


def get_entities(text, conf=0.45):
    """
    A function to get annotations.
    :param text: str: text to annotate
    :param conf: float: confidence
    :return: list
    """

    payload = {"text": remove_illegal_chars(text), "confidence": conf}
    spotlight_headers = {'accept': 'application/json',
                         'content-type': 'application/x-www-form-urlencoded'}

    response = post_request(ENDPOINTS["spotlight"], payload, spotlight_headers)
    return check_json_response(response)


def format_data(json_response):
    """
    A function for formatting output data in a dexter-eval format.
    :param text: str: text to annotate
Пример #9
0
import numpy as np
from pyzbar.pyzbar import decode
from pyzbar.pyzbar import ZBarSymbol
import cv2

from utils.logger import get_logger
logger = get_logger('JFA REST Server Gateway')

import cv2
import multiprocessing as mp
from qr_scanner import decoder
import requests

from utils.logger import get_logger
logger = get_logger('JFA REST Server Gateway')

import multiprocessing
import time


class QR_Scanner(multiprocessing.Process):
    def __init__(self, task_queue, result_queue, camera_id):
        multiprocessing.Process.__init__(self)
        self.camera_id = camera_id
        self.task_queue = task_queue
        self.result_queue = result_queue

    @staticmethod
    def decode(image):
        gray_img = cv2.cvtColor(image, 0)
        barcode = decode(gray_img, symbols=[ZBarSymbol.QRCODE])
Пример #10
0
import torch.nn as nn
import torchvision.models as models

from utils.paths import Paths
from utils.logger import setup_logger, get_logger
from dataloader import DataLoader
from metric import Metric
from executor.trainer import Trainer

from .base_model import BaseModel
from .common.device import setup_device, data_parallel
from .common.criterion import make_criterion
from .common.optimizer import make_optimizer
from .common.ckpt import load_ckpt

LOG = get_logger(__name__)


class VGG(BaseModel):
    """VGG Model Class"""
    def __init__(self, config: Dict):
        super().__init__(config)
        self.model = None
        self.model_name = self.config.model.name
        self.n_classes = self.config.model.n_classes
        self.classes = self.config.data.classes
        self.batch_size = self.config.train.batch_size
        self.n_gpus = self.config.train.n_gpus
        self.resume = self.config.model.resume

        # dataloader
Пример #11
0
from .base_model import BaseModel
import pandas as pd
import numpy as np
sys.path.append('.')

# external
# TODO:
# get the model library for your project

# internal
from utils.logger import get_logger
from utils.dataloader import DataLoader
from executor.model_trainer import ModelTrainer
from executor.model_evaluator import ModelEvaluator

LOG = get_logger('modelname')


class ModelName(BaseModel):
    """Model Class"""
    def __init__(self, config):
        super().__init__(config)

        self.model = None
        self.init_model = None
        self.dataset = None
        self.info = None
        self.model = None
        self.X_pipeline = []
        self.y_pipeline = []
        self.X_train = None
Пример #12
0
from django.shortcuts import render
from rest_framework.response import Response

from assessment.models import AssessSurveyRelation, AssessProject, AssessUser, AssessSurveyUserDistribute
from front.models import PeopleSurveyRelation, SurveyInfo, UserQuestionAnswerInfo
from front.serializers import PeopleSurveyResultSimpleSerializer
from front.tasks import algorithm_task
from front.views import people_login
from survey.models import Survey
from utils.logger import get_logger
from utils.response import ErrorCode
from utils.views import WdTemplateView, AuthenticationExceptView
from wduser.models import People, AuthUser
from wduser.user_utils import UserAccountUtils

logger = get_logger("operation")


class SurveyAnswerCheckView(AuthenticationExceptView, WdTemplateView):
    # 问卷答题检查页面
    template_name = 'answer_check.html'


class OperationIndexView(AuthenticationExceptView, WdTemplateView):
    # 问卷答题检查页面
    template_name = 'ops_index.html'


def answer_check_rst(request):
    ctx = []
    if request.POST:
Пример #13
0
    def __init__(self, node_name, *a, **kw):
        self.node_name = node_name
        self.log = logger.get_logger('http', node=self.node_name)

        super(BOSNetHTTPServerRequestHandler, self).__init__(*a, **kw)
Пример #14
0
import datetime
from django.core.management import BaseCommand
from django.db import transaction

from assessment.models import AssessOrganization, AssessProject
from front.models import PeopleSurveyRelation, UserQuestionAnswerInfo, SurveyInfo, SurveyQuestionInfo
from front.serializers import PeopleSurveySerializer
from front.tasks import algorithm_task, get_report
from question.models import Question
from research.models import ResearchDimension, ResearchSubstandard, ResearchModel
from survey.models import SurveyQuestionRelation, Survey, SurveyQuestionResult
from utils import time_format
from utils.logger import get_logger
from wduser.models import People, PeopleOrganization, EnterpriseInfo, Organization

logger = get_logger("ops_etl")


def utf8_more_info(more_info):
    mi = json.loads(more_info)
    nmi = []

    for o in mi:
        nmi.append({"key_name": o["key_name"], "key_value": o["key_value"]})
    return json.dumps(nmi, ensure_ascii=False)


def etl_people_info(project_id, survey_id, enterprise_info, t, limit_num=None):
    if not project_id:
        return
    ename = enterprise_info.cn_name
Пример #15
0
import time
import requests

from utils.logger import get_logger

# log api calls
logger_api = get_logger('api')

BASE_URL = 'https://api.helium.io/v1'

headers = {'user-agent': 'python-requests/2.25.1'}


def get_account(account_address):
    '''
  gets information on an account to retrieve the current balance and the current height
  
  args: 
  account_address: string of account address
  
  returns:
  account: dict of account, balance and height are inside
  '''

    url = f'{BASE_URL}/accounts/{account_address}'

    i = 0
    timeout = 0
    while i < 10:
        # relax
        time.sleep(max(2**i - 1, timeout / 1000))
Пример #16
0
 def __init__(self, *args, **kwargs):
     super(CommonHandler, self).__init__(*args, **kwargs)
     self.logger = get_logger('wineshop')
     self._session = None
Пример #17
0
# -*- coding: utf-8 -*-
import argparse
import os
import pandas as pd
import yaml
from tqdm import tqdm

from utils.db_connection import get_connection
from utils.create_dir import create_dir
from utils.logger import get_logger
from utils.read_file import read_file

logging = get_logger()


def download_dataset(sql_file, conn, output_file):
    sql = read_file(sql_file)
    logging.info(sql)

    df = pd.read_sql_query(sql, con=conn, chunksize=100000)
    chunk = next(df)
    chunk.drop_duplicates().to_csv(output_file, mode='w', index=None)
    for chunk in tqdm(df):
        chunk.drop_duplicates().to_csv(output_file, mode='a', index=None, header=None)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='')
    parser.add_argument('--config_file', type=str, default='./etc/foo.cfg')

    args = parser.parse_args()
Пример #18
0
import sys

CURRENTDIR = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
PARENTDIR = os.path.dirname(CURRENTDIR)
sys.path.insert(0, PARENTDIR)

import django

os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
django.setup()

from utils import utils
from utils.logger import get_logger


Logger = get_logger("workflow")


def main():
    """
        Create worker event for workflow
    """
    Logger.debug(unicode("Starting follow_the_sun"))
    utils.default_queue.enqueue("ticket.follow_the_sun")
    Logger.debug(unicode("Starting update_waiting"))
    utils.default_queue.enqueue("ticket.update_waiting")
    Logger.debug(unicode("Starting update_paused"))
    utils.default_queue.enqueue("ticket.update_paused")


if __name__ == "__main__":
Пример #19
0
import sys

CURRENTDIR = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
PARENTDIR = os.path.dirname(CURRENTDIR)
sys.path.insert(0, PARENTDIR)

import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
django.setup()

import logutils
from utils import utils
from utils.logger import get_logger


Logger = get_logger('threshold')


def main():
    """
        Create worker event for report threshold
    """
    Logger.debug(unicode('Starting report threshold checks'))
    utils.default_queue.enqueue('report.create_ticket_with_threshold')

    for handler in Logger.handlers:
        if isinstance(handler, logutils.queue.QueueHandler):
            handler.queue.join()

if __name__ == "__main__":
    main()
Пример #20
0
        self.__dict__.update(kwds)


try:
    ###
    # workaround for solving the issue of multi-worker
    # https://github.com/pytorch/pytorch/issues/973
    import resource

    rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
    resource.setrlimit(resource.RLIMIT_NOFILE, (10000, rlimit[1]))
    ###
except:
    pass

logger = get_logger('exp_logger')
__all__ = ['GranRunner', 'compute_edge_ratio', 'get_graph', 'evaluate']

NPR = np.random.RandomState(seed=1234)


def compute_edge_ratio(G_list):
    num_edges_max, num_edges = .0, .0
    for gg in G_list:
        num_nodes = gg.number_of_nodes()
        num_edges += gg.number_of_edges()
        num_edges_max += num_nodes**2

    ratio = (num_edges_max - num_edges) / num_edges
    return ratio
Пример #21
0
import torch
from theconf import Config as C, ConfigArgumentParser
from torch import nn, optim
from torch.nn.parallel.data_parallel import DataParallel
from tqdm import tqdm
from warmup_scheduler import GradualWarmupScheduler

from data.data import get_dataloaders
from models.networks import get_model, num_class
from utils.config import *
from utils.logger import get_logger
from utils.lr_scheduler import adjust_learning_rate_resnet
from utils.metrics import accuracy, Accumulator

logger = get_logger('Athena')
logger.setLevel(logging.INFO)


def run_epoch(model,
              loader,
              loss_fn,
              optimizer,
              desc_default='',
              epoch=0,
              writer=None,
              verbose=1,
              scheduler=None):
    tqdm_disable = bool(os.environ.get('TASK_NAME',
                                       ''))  # KakaoBrain Environment
    if verbose:
Пример #22
0
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
sys.path.append(str(project_dir / "src"))
from utils.path import get_suffix
from utils.logger import create_logger, get_logger


@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def raw2feather(input_filepath, output_filepath):
    if get_suffix(input_filepath) in [
            'csv', 'tsv', 'gzip', 'bz2', 'zip', 'xz'
    ]:
        assert ("Unsupported file format.")
    pd.read_csv(input_filepath).to_feather(output_filepath)


def main():
    raw2feather()


if __name__ == '__main__':
    logfile = "raw2feather"
    create_logger(logfile)
    get_logger(logfile).info("Convert file")
    main()
    get_logger(logfile).info("Saved")
Пример #23
0
from django.http import HttpResponseRedirect
from rest_framework.response import Response

from WeiDuAdmin import settings
from assessment.models import AssessSurveyRelation, AssessProject, AssessUser, AssessSurveyUserDistribute
from front.models import PeopleSurveyRelation
from front.tasks import send_one_user_survey
from front.views import people_login
from survey.models import Survey
from utils.logger import get_logger
from utils.response import ErrorCode
from utils.views import WdTemplateView, AuthenticationExceptView
from wduser.models import People, AuthUser
from wduser.user_utils import UserAccountUtils

logger = get_logger("front")


def open_people_assess_survey_user_distribute(assess_id, survey_id, people_id):
    # 项目问卷分发数组
    try:
        a = AssessSurveyUserDistribute.objects.get(assess_id=assess_id, survey_id=survey_id)
        a_list = json.loads(a.people_ids)
        if people_id not in a_list:
            a_list.append(people_id)
            a. people_ids = json.dumps(a_list)
            a.save()
    except:
        a = AssessSurveyUserDistribute.objects.create(assess_id=assess_id, survey_id=survey_id)
        a.people_ids = json.dumps([people_id])
        a.save()
Пример #24
0
import functools
import multiprocessing
import pathlib
import re
from typing import Dict

import requests

from utils import downloader, logger
from . import reader as flickr_reader

logger = logger.get_logger('flickr.utils')


def get_author_id(url):
    """Get flick author id from url.
    Parameters
    ----------
    url : str
        Image url.
    Returns
    -------
    str
        Flick author id
    """
    regex = re.compile(r'(photos)(\/)([a-zA-Z0-9]+([@_ -]?[a-zA-Z0-9])*)(\/)')
    return regex.search(url).group(3)


def get_author_info(url, api_url, api_key):
    """Get flick author info from url.
Пример #25
0
                'Dr',
                'Major',
                'Rev',
                'Sir',
                'Jonkheer',
                'Dona'
            ], 'Rare') \
            .replace(['Mlle', 'Ms', 'Mme'], ['Miss', 'Miss', 'Mrs'])
        self.test['Title'] = self.test['Title'].map(title_mapping).fillna(0)
        self.train['Title'] = self.train['Title']
        self.test['Title'] = self.test['Title']
train = None
test = None
@click.command()
@click.argument('train_file', type=click.Path(exists=True))
@click.argument('test_file', type=click.Path(exists=True))
@click.option('--force', is_flag=True)
def main(train_file, test_file, force):
    global train
    global test
    train = pd.read_feather(train_file)
    test = pd.read_feather(test_file)
    features = list(get_features(globals()))
    generate_features(features, force)
    
VERSION="0.0.1"
if __name__ == '__main__':
    create_logger(VERSION)
    get_logger(VERSION).info("Start feature generation.")
    main()
Пример #26
0
def identity_loader(app):
    @identity_loaded.connect_via(app)
    def on_identity_loaded(sender, identity):
        # Set the identity user object
        identity.user = current_user
        # Add the UserNeed to the identity
        if hasattr(current_user, 'id'):
            identity.provides.add(UserNeed(current_user.id))
        # Assuming the User model has a list of roles, update the
        # identity with the roles that the user provides
        if hasattr(current_user, 'role_id'):
            identity.provides.add(
                RoleNeed(Role.get(current_user.role_id, 'staff')))


record_logger = get_logger('record')


def request_record(app):
    @app.before_request
    def record():
        record_logger.info(request.path)


def permission_check(app):
    @app.before_request
    def check_perm():
        is_perm_required = Permission.query.filter_by(
            content_type=request.path).first()
        if is_perm_required:
            permissions = set()
Пример #27
0
def main():
    args = parser.parse_args()
    config = get_config(args.config)

    # CUDA configuration
    cuda = config['cuda']
    device_ids = config['gpu_ids']
    if cuda:
        os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
            str(i) for i in device_ids)
        device_ids = list(range(len(device_ids)))
        config['gpu_ids'] = device_ids
        cudnn.benchmark = True

    # Configure checkpoint path
    checkpoint_path = os.path.join(
        'checkpoints', config['dataset_name'],
        config['mask_type'] + '_' + config['expname'])
    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)
    shutil.copy(args.config,
                os.path.join(checkpoint_path, os.path.basename(args.config)))
    writer = SummaryWriter(logdir=checkpoint_path)
    logger = get_logger(
        checkpoint_path)  # get logger and configure it at the first call

    logger.info("Arguments: {}".format(args))
    # Set random seed
    if args.seed is None:
        args.seed = random.randint(1, 10000)
    logger.info("Random seed: {}".format(args.seed))
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    if cuda:
        torch.cuda.manual_seed_all(args.seed)

    # Log the configuration
    logger.info("Configuration: {}".format(config))

    try:  # for unexpected error logging
        # Load the dataset
        logger.info("Training on dataset: {}".format(config['dataset_name']))
        train_dataset = Dataset(data_path=config['train_data_path'],
                                with_subfolder=config['data_with_subfolder'],
                                image_shape=config['image_shape'],
                                random_crop=config['random_crop'])
        # val_dataset = Dataset(data_path=config['val_data_path'],
        #                       with_subfolder=config['data_with_subfolder'],
        #                       image_size=config['image_size'],
        #                       random_crop=config['random_crop'])
        train_loader = torch.utils.data.DataLoader(
            dataset=train_dataset,
            batch_size=config['batch_size'],
            shuffle=True,
            num_workers=config['num_workers'])
        # val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
        #                                           batch_size=config['batch_size'],
        #                                           shuffle=False,
        #                                           num_workers=config['num_workers'])

        # Define the trainer
        trainer = Trainer(config)
        logger.info("\n{}".format(trainer.netG))
        logger.info("\n{}".format(trainer.localD))
        logger.info("\n{}".format(trainer.globalD))

        if cuda:
            trainer = nn.parallel.DataParallel(trainer, device_ids=device_ids)
            trainer_module = trainer.module
        else:
            trainer_module = trainer

        # Get the resume iteration to restart training
        start_iteration = trainer_module.resume(
            config['resume']) if config['resume'] else 1

        iterable_train_loader = iter(train_loader)

        time_count = time.time()

        for iteration in range(start_iteration, config['niter'] + 1):
            try:
                ground_truth = next(iterable_train_loader)
            except StopIteration:
                iterable_train_loader = iter(train_loader)
                ground_truth = next(iterable_train_loader)

            # Prepare the inputs
            bboxes = random_bbox(config, batch_size=ground_truth.size(0))
            x, mask = mask_image(ground_truth, bboxes, config)
            if cuda:
                x = x.cuda()
                mask = mask.cuda()
                ground_truth = ground_truth.cuda()

            ###### Forward pass ######
            compute_g_loss = iteration % config['n_critic'] == 0
            losses, inpainted_result, offset_flow = trainer(
                x, bboxes, mask, ground_truth, compute_g_loss)
            # Scalars from different devices are gathered into vectors
            for k in losses.keys():
                if not losses[k].dim() == 0:
                    losses[k] = torch.mean(losses[k])

            ###### Backward pass ######
            # Update D
            trainer_module.optimizer_d.zero_grad()
            losses['d'] = losses[
                'wgan_d'] + losses['wgan_gp'] * config['wgan_gp_lambda']
            losses['d'].backward()
            trainer_module.optimizer_d.step()

            # Update G
            if compute_g_loss:
                trainer_module.optimizer_g.zero_grad()
                losses['g'] = losses['l1'] * config['l1_loss_alpha'] \
                              + losses['ae'] * config['ae_loss_alpha'] \
                              + losses['wgan_g'] * config['gan_loss_alpha']
                losses['g'].backward()
                trainer_module.optimizer_g.step()

            # Log and visualization
            log_losses = ['l1', 'ae', 'wgan_g', 'wgan_d', 'wgan_gp', 'g', 'd']
            if iteration % config['print_iter'] == 0:
                time_count = time.time() - time_count
                speed = config['print_iter'] / time_count
                speed_msg = 'speed: %.2f batches/s ' % speed
                time_count = time.time()

                message = 'Iter: [%d/%d] ' % (iteration, config['niter'])
                for k in log_losses:
                    v = losses.get(k, 0.)
                    writer.add_scalar(k, v, iteration)
                    message += '%s: %.6f ' % (k, v)
                message += speed_msg
                logger.info(message)

            if iteration % (config['viz_iter']) == 0:
                viz_max_out = config['viz_max_out']
                if x.size(0) > viz_max_out:
                    viz_images = torch.stack([
                        x[:viz_max_out], inpainted_result[:viz_max_out],
                        offset_flow[:viz_max_out]
                    ],
                                             dim=1)
                else:
                    viz_images = torch.stack(
                        [x, inpainted_result, offset_flow], dim=1)
                viz_images = viz_images.view(-1, *list(x.size())[1:])
                vutils.save_image(viz_images,
                                  '%s/niter_%03d.png' %
                                  (checkpoint_path, iteration),
                                  nrow=3 * 4,
                                  normalize=True)

            # Save the model
            if iteration % config['snapshot_save_iter'] == 0:
                trainer_module.save_model(checkpoint_path, iteration)

    except Exception as e:  # for unexpected error logging
        logger.error("{}".format(e))
        raise e
Пример #28
0
    from builtins import map
    from builtins import object
    reload(sys)
    sys.setdefaultencoding('utf8')

import requests
import os
import configparser
import argparse
import re
from collections import OrderedDict
from bs4 import BeautifulSoup
import logging

import utils.logger as log_manager
logger = log_manager.get_logger(__name__)
# downgrading logging level for requests
logging.getLogger("requests").setLevel(logging.WARNING)


#################################-MAIN CLASSES-###########################################
class PacktAccountData(object):
    """Contains all needed urls, creates a http session and logs int your account"""
    def __init__(self, cfgFilePath):
        self.cfgFilePath = cfgFilePath
        self.configuration = configparser.ConfigParser()
        if not self.configuration.read(self.cfgFilePath):
            raise configparser.Error('{} file not found'.format(
                self.cfgFilePath))
        self.bookInfoDataLogFile = self.__getEbookExtraInfoLogFilename()
        self.packtPubUrl = "https://www.packtpub.com"
Пример #29
0
def run(args):
    logger = get_logger()

    news_list_path = "./results/dataset/news_list_%s.json" % args.target

    if args.list_crawl_again and path.exists(news_list_path):
        os.remove(news_list_path)

    if not path.exists(news_list_path):
        logger.info("[Crawl::News List] Crawling news list.")

        list_parser = NewsListParser(logger)

        if args.list_start:
            start_date = datetime.strptime(args.list_start, "%Y%m%d")
            parsed_list = list_parser.parse_until(args.limit, start_date)

        else:
            parsed_list = list_parser.parse_until(args.limit)

        f = open(news_list_path, 'w')
        f.write(json.dumps(parsed_list))
        f.close()

    else:
        logger.info(
            "[Crawl] Using existing news list. Please add --force flag to re-crawl"
        )

    news_crawler = NewsParser(logger)

    f = open(news_list_path, 'r')
    news_list = json.loads(f.read())
    f.close()

    start_index = 0

    if args.info_start:
        start_index = news_list.index(args.info_start)

    total_amount = len(news_list) - start_index

    for news_index in range(start_index, len(news_list)):
        news = news_list[news_index]

        logger.info('[Crawl::News Info] Parsing info of %s (%d / %d)' %
                    (news, news_index, total_amount))

        file_location = "./results/dataset/%s/%s.json" % (args.target, news)

        if path.exists(file_location):
            logger.info(
                "[Crawl::News Info] Skipping already crawled news: %s" % news)
            continue

        news_info = news_crawler.parse(news)

        if news_info is None:
            continue

        file = open(file_location, "w")
        file.write(json.dumps(news_info))
        file.close()
Пример #30
0
class ClipboardWriter(object):
    # Clipboard's timer countdown
    CLIP_TIMER = 1
    logger = get_logger('ClipboardWriter')

    def __init__(self, filepath):
        self._filepath = filepath
        self._temp_clip = ""
        # clear clipboard
        self.root = tk.Tk()
        self.root.withdraw()
        self.root.selection_clear()

    def run(self):
        """Run clipboard writer.
        Read and write url from clipboard every CLIP_TIMER tick.

        """
        try:
            while True:
                if self.is_has_new_clipboard(self._temp_clip):
                    url = self.read_clipboard()
                    self.logger.info("Has new url: %s", url)
                    self.write_to_file(url)
                self.logger.info("Waiting...")
                time.sleep(self.CLIP_TIMER)
        except KeyboardInterrupt:
            pass

    def is_has_new_clipboard(self, temp_clip):
        """Detech new url from clipboard.

        Parameters
        ----------
        temp_clip : str
            Url from clipboard.
        """
        try:
            return temp_clip != self.root.selection_get()
        except tk.TclError:
            return False

    def read_clipboard(self):
        """Set new clipboard to temp

        Returns
        -------
        _temp_clip : str
            Temporary clipboard
        """
        self._temp_clip = self.root.selection_get()
        return self._temp_clip

    def write_to_file(self, url):
        """Write url to file  

        Parameters
        ----------
        url : str
            Url
        """
        try:
            with open(self._filepath, 'a') as f:
                f.write(url + '\n')
                f.close()
        except Exception as ex:
            self.logger.error(ex)
Пример #31
0
CURRENTDIR = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
PARENTDIR = os.path.dirname(CURRENTDIR)
sys.path.insert(0, PARENTDIR)

import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
django.setup()

from django.conf import settings
from redis import StrictRedis
from rq import Connection, Queue, Worker

from utils.logger import get_logger

Logger = get_logger(os.path.basename('worker'))


def main():
    """
        Init workers
    """
    parser = argparse.ArgumentParser()
    parser.add_argument('--queues', nargs='+', type=unicode, dest='queues', required=True)
    args = parser.parse_args()

    with Connection(connection=StrictRedis(**settings.REDIS)):

        qs = map(Queue, args.queues) or [Queue()]
        worker = Worker(qs)
        worker.work()
#!/usr/bin/env python
import Command
import batoceraFiles
from generators.Generator import Generator
from utils.logger import get_logger
import controllersConfig
import os

eslog = get_logger(__name__)
BIN_PATH = "/userdata/bios/pico-8/pico8"
CONTROLLERS = "/userdata/system/.lexaloffle/pico-8/sdl_controllers.txt"


# Generator for the official pico8 binary from Lexaloffle
class LexaloffleGenerator(Generator):
    def generate(self, system, rom, playersControllers, gameResolution):
        if not os.path.exists(BIN_PATH):
            eslog.error(
                "Lexaloffle official pico-8 binary not found at {}".format(
                    BIN_PATH))
            return -1
        if not os.access(BIN_PATH, os.X_OK):
            eslog.error("File {} is not set as executable".format(BIN_PATH))
            return -1

        # the command to run
        commandArray = [BIN_PATH]
        commandArray.extend(["-desktop",
                             "/userdata/screenshots"])  # screenshots
        commandArray.extend(["-windowed", "0"])  # full screen
        # Display FPS
Пример #33
0
PARENTDIR = os.path.dirname(CURRENTDIR)
sys.path.insert(0, PARENTDIR)

import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
django.setup()

from django.conf import settings

from adapters.services.storage.abstract import StorageServiceException
from factory.factory import ImplementationFactory
from utils import utils
from utils.logger import get_logger


Logger = get_logger(os.path.basename(__file__))

CHARSETS = ('iso-8859-1', 'iso-8859-15', 'utf-8', 'ascii', 'utf-16', 'windows-1252', 'cp850', 'iso-8859-11')
HOST = settings.EMAIL_FETCHER['host']
PORT = settings.EMAIL_FETCHER['port']
USER = settings.EMAIL_FETCHER['login']
PASS = settings.EMAIL_FETCHER['pass']


def push_email(uid, email, queue):
    """
        Push to Storage Service
        Add a worker task
    """
    filename = hashlib.sha256(email).hexdigest()
    Logger.debug(unicode('New mail - UID %s - HASH %s' % (uid, filename)), extra={'hash': filename})
Пример #34
0
    with open(args.config) as fp:
        cfg = yaml.load(fp)

    if cfg['resume']:
        clean_dir(cfg['checkpoint_dir'])

    print(cfg)

    # Setup the log
    run_id = random.randint(1, 100000)
    logdir = os.path.join(cfg['checkpoint_dir'],
                          os.path.basename(args.config)[:-4] + str(run_id))
    ensure_dir(logdir)
    print("RUNDIR: {}".format(logdir))
    shutil.copy(args.config, logdir)
    logger = get_logger(logdir)
    logger.info("Let the games begin")

    # Setup the Visualizer
    if cfg['vis']['use']:
        vis = Visualizer(cfg['vis']['env'])
    else:
        vis = None

    torch.multiprocessing.freeze_support()
    if cfg['model'] in ['rescan', 'did_mdn']:
        train(cfg, logger, vis)
    elif cfg['model'] in ['pix2pix']:
        train_gan(cfg, logger, vis)
Пример #35
0
from django.contrib.auth.models import User
from django.core.exceptions import (FieldError, ObjectDoesNotExist,
                                    ValidationError)
from django.core.validators import validate_ipv46_address
from django.db import IntegrityError
from django.db.models import Count, FieldDoesNotExist, Q
from django.forms.models import model_to_dict

from abuse.models import (AbusePermission, Category, MassContact,
                          MassContactResult, Profile, Report, ReportItem,
                          Resolution, Tag, Ticket, Operator, Role)
from factory.factory import TicketSchedulingAlgorithmFactory
from utils import logger, utils
from worker import database

Logger = logger.get_logger(__name__)
CRYPTO = utils.Crypto()

DASHBOARD_STATUS = {
    'idle': ('Open', 'Reopened'),
    'waiting': ('WaitingAnswer', 'Paused'),
    'pending': ('Answered', 'Alarm'),
}

CHECK_PERM_DEFENDANT_LEVEL = ('Beginner', 'Advanced', 'Expert')
MASS_CONTACT_REQUIRED = ('{{ service }}', '{{ publicId }}', '{% if lang ==')

SEARCH_EXTRA_FIELDS = ['defendantTag', 'providerTag', 'defendant', 'defendantCountry', 'providerEmail', 'item', 'fulltext']
SEARCH_REPORT_FIELDS = list(set([f.name for f in Report._meta.fields] + SEARCH_EXTRA_FIELDS + ['reportTag']))
SEARCH_TICKET_FIELDS = list(set([f.name for f in Ticket._meta.fields] + SEARCH_EXTRA_FIELDS + ['ticketTag', 'attachedReportsCount', 'ticketIds']))
Пример #36
0
import time
from console.models import CleanTask, SurveyOverview
from console.tasks import etl_start
# from console.ssh_linux import con_linux
from utils.cache.cache_utils import NormalHashSet
from survey.models import Survey, SurveyQuestionRelation
from front.models import PeopleSurveyRelation, UserQuestionAnswerInfo
from utils.logger import get_logger
from wduser.models import People, Organization, EnterpriseAccount, EnterpriseInfo
from assessment.models import AssessProject
from research.models import ResearchModel, ResearchDimension, ResearchSubstandard
from console.write_hive_utils import etl_people_info,etl_answer_question_info,etl_company_info,\
    etl_dimension_substandard_info,etl_model_info,etl_org_info,etl_people_survey_result,\
    etl_project_info,etl_survey_info,etl_write_file,utf8_more_info,etl_write_people_info_file,etl_write_sign_info_file

logger = get_logger("etl")


class EtlBase(object):
    u"""ETL基类"""
    STATUS_WAITING = u"waiting"
    STATUS_ONGOING = u"ongoing"
    STATUS_STOP = u"stop"
    STATUS_FINISHED = u"finished"
    STATUS_FAILED = u"failed"

    def __init__(self, etl_key, **kwargs):
        self.etl_key = etl_key
        self.kwargs = kwargs
        self.etl_store = NormalHashSet(etl_key)
Пример #37
0
# -*- coding: utf-8 -*-
import cx_Oracle
from pyhive import hive

from utils.decorators import static_vars
from utils.load_config import load_config
from utils.logger import get_logger

logging = get_logger()


@static_vars(oracle_conn=None, hive_conn=None)
def get_connection(db_type='oracle'):

    logging.info("Connecting to db.")
    config = load_config()

    if db_type == 'oracle':
        if get_connection.oracle_conn is None:
            config = config['oracle_db']
            dsn_config = config['dsn']
            dsn_tns = cx_Oracle.makedsn(**dsn_config)

            connection_config = config['connection']
            connection_config['dsn'] = dsn_tns
            get_connection.oracle_conn = cx_Oracle.connect(**connection_config)
        return get_connection.oracle_conn
    elif db_type == 'hive':
        if get_connection.hive_conn is None:
            config = config['hive_db']
            get_connection.hive_conn = hive.Connection(**config)
Пример #38
0
# -*- coding: utf-8 -*-
# Time   : 2019/10/30 10:44 下午
# Author : Eylaine
# File   : install.py

from threading import Thread

from utils.logger import get_logger
from utils.adb import Adb
from config import Config
from setup.step import Step
from setup import Android
from utils.shell import Shell

logger = get_logger("install")


class Install(Thread):
    def __init__(self, device_id, apk_name, brand):
        super(Install, self).__init__()
        self.device_id = device_id
        self.apk_name = apk_name
        self.brand = brand
        self.package = Config.get_package_info()[0]

    def run(self) -> None:
        Adb.install(self.device_id, self.apk_name)
        Shell.start_appium()
        driver = Android(self.device_id).get_driver()
        Step.start_page(driver)
        Step.permission(driver)
Пример #39
0
        else:

            logger.warn(message, 'outdated_dependency')


def _check_dependency_versions():
    for name, version in [('numpy',  '1.10'),
                          ('sympy',  '0.7.6'),
                          ('jinja2', '2.7')]:
        _check_dependency_version(name, version)

_check_dependency_versions()

# Initialize the logging system
BrianLogger.initialize()
logger = get_logger(__name__)

# Check the caches
def _get_size_recursively(dirname):
    import os
    total_size = 0
    for dirpath, _, filenames in os.walk(dirname):
        for fname in filenames:
            total_size += os.path.getsize(os.path.join(dirpath, fname))
    return total_size

#: Stores the cache directory for code generation targets
_cache_dirs_and_extensions = {}

def check_cache(target):
    cache_dir, _ = _cache_dirs_and_extensions.get(target, (None, None))
 def open_spider(self, spider):
     self.data = []
     self.dbUtils = DbHelper()
     self.logger = get_logger(self.__class__.__name__)