예제 #1
0
from collections import namedtuple, Sequence
from functools import wraps
from time import time, sleep

from PIL import ImageOps, Image

from helpers import setup_logger

logger = setup_logger(__name__, "info")

to_be_foreground_warnings = []


def to_be_foreground(func):
    """ A safety check wrapper so that certain functions can't possibly be called
    if UI element is not the one active"""
    @wraps(func)
    def wrapper(self, *args, **kwargs):
        bypass = False
        if "bypass_to_be_foreground" in kwargs:
            if kwargs.pop("bypass_to_be_foreground"):
                bypass = True
        if bypass or self.in_foreground:
            return func(self, *args, **kwargs)
        else:
            data = (self.__class__.__name__, func.__name__,
                    getattr(self, "name", None))
            if data not in to_be_foreground_warnings:
                to_be_foreground_warnings.append(data)
                logger.warning("{}.{} (UI el {}) was prevented from being executed " \
                               "by to_be_foreground!".format(*data) )
예제 #2
0
파일: server.py 프로젝트: Issam-b/triple-T
#! /usr/bin/python3

import socket
import ssl
from helpers import setup_logger
from helpers import game_config

logger = setup_logger('settings.conf', 'server')
KEY = game_config.get('SSL', 'KEY')
CERT = game_config.get('SSL', 'CERT')


class Server:
    """Server class to handle connection related calls."""
    def __init__(self):
        """Initializes the server object with a server socket."""
        self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

    def bind_server(self, host, port):
        """bind and listen to the the given port."""
        while True:
            try:
                self.server_socket.bind((host, int(port)))
                logger.info('Bind successful to port ' + str(port))
                self.server_socket.listen(5)
                self.socket_ssl = ssl.wrap_socket(self.server_socket,
                                                  keyfile=KEY,
                                                  certfile=CERT,
                                                  server_side=True)
                logger.info('Listening on port ' + str(port))
                break
예제 #3
0
from helpers import setup_logger
from pydbus import SystemBus

from operator import itemgetter

logger = setup_logger(__name__, "warning")

# See D-Bus documentation here:
#     https://www.freedesktop.org/wiki/Software/systemd/dbus/

bus = SystemBus()
systemd = bus.get(".systemd1")


def list_units(unit_filter_field=None, unit_filter_values=[]):
    """
    This function lists units, optionally filtering them by a certain parameter.
    It returns a list of dictionaries, with following fields:

    * ``"name"``: Full unit name, in the "name.type" format - i.e. "zpui.service"
    * ``"basename"``: Unit name without unit type - i.e. "zpui"
    * ``"unit_type"``: Unit type - i.e. "service", "socket" or "target"
    * ``"description"``: Unit's human-readable description
    * ``"active"``: Whether the unit is now active - i.e. "active", "failed", "inactive"
    * ``"load"``: Whether the unit is now loaded - i.e. "loaded", "masked", "not found"
    * ``"sub"``: Type-specific unit state - i.e.  "running", "listening", "mounted"
    * ``"follower"``: A unit that is being followed in its state by this unit, if there is any, otherwise an empty string.
    * ``"unit_object_path"``: The unit object path
    * ``"job_queued"``: If there is a job queued for the job unit - the numeric job id, 0 otherwise
    * ``"job_object_path"``: The job object path
    * ``"job_type"``: The job type as string
예제 #4
0
#! /usr/bin/python3

import threading
import time
import sys
from helpers import setup_logger
from helpers import game_config
from helpers import cmd
from server import Server
from server_player import Player
import socket
import ssl

logger = setup_logger("settings.conf", "server")
ECHO_FREQUENCY = int(game_config.get("OTHER", "ECHO_FREQUENCY"))
TIMEOUT = int(game_config.get("OTHER", "TIMEOUT"))
echo_enable = game_config.get("OTHER", "echo")
server2_address = game_config.get("connection", "address2")
server2_port = game_config.get("connection", "port2")
DEBUG = True if game_config.get("DEBUG", "DEBUG") == "True" else False
message_length = int(game_config.get("OTHER", "message_length"))
CERT = game_config.get("SSL", "CERT")


class GameServer(Server):
    """Handle game start and clients management."""
    run_once = False
    players_waitlist = []
    active_players = []
    all_players_count = 0
    active_games = []
예제 #5
0
import logging
from helpers import setup_logger

from image_category_tokenizer import ImageCategoryTokenizer
from image_category_vectorizer import ImageCategoryVectorizer

from generation_environment import GenerationEnvironment, WikipediaBigGANGenerationEnviornment

setup_logger()
logger = logging.getLogger(__name__)


class ImageCategories:
    def __init__(self, tokenizer=None, vectorizer=None, gen_env=None):
        logger.debug('Started initialization of Lyric class.')

        if not gen_env:
            logger.info(
                'No passed enviornment class. Defaulting to Wikipedia2Vec and BigGAN.'
            )

            gen_env = WikipediaBigGANGenerationEnviornment()

        else:
            if not isinstance(gen_env, GenerationEnvironment):
                logger.error(
                    'Argument gen_env is not a GenerationEnvironment instance. You must pass the appropriate object here.'
                )
                raise ValueError('Not a Generation Environment instance.')
            else:
                logger.info('Custom enviornment class passed.')
예제 #6
0
        return False


# Return a list of image names that should be uploaded right now
def uploadable_images():
    # Make sure directory exists before searching it
    if not config["image_folder"] in os.listdir():
        os.mkdir(config["image_folder"])

    images = os.listdir(config["image_folder"])
    valid_images = [image for image in images if valid_image(image)]
    return valid_images


if __name__ == "__main__":
    log = helpers.setup_logger(os.path.basename(__file__))
    log.info("Starting execution")

    if os.path.exists("/boot/trailpi_config.json"):
        config_file = "/boot/trailpi_config.json"
    else:
        config_file = "trailpi_config.json"

    config = json.load(open(config_file))

    while True:
        for image_name in uploadable_images():
            response = upload_image(image_name)

            # Delete image if successfully uploaded
            if response.status_code == 200:
예제 #7
0
파일: app.py 프로젝트: soffokl/api
from functools import wraps

from ip import mask_ip_partially
from models import db, Node, Session, NodeAvailability, Identity
from datetime import datetime
import json
import helpers
import logging

from queries import filter_active_nodes
from signature import (recover_public_address, ValidationError as
                       SignatureValidationError)
import base64
import settings

helpers.setup_logger()
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://{}:{}@{}/{}'.format(
    settings.USER, settings.PASSWD, settings.DB_HOST, settings.DB_NAME)

migrate = Migrate(app, db)


def is_json_dict(data):
    try:
        json_data = json.loads(data)
    except ValueError:
        return False
    if not isinstance(json_data, dict):
        return False
    return True
예제 #8
0
파일: tryKeras.py 프로젝트: cs16s027/DL
def trainModel():
    # Parse args
    parser = argparse.ArgumentParser(description='Train the CNN')
    parser.add_argument('--expt_dir', default='./logs',
                        help='save dir for experiment logs')
    parser.add_argument('--train', default='./data',
                        help='path to training set')
    parser.add_argument('--val', default='./data',
                        help='path to validation set')
    parser.add_argument('--test', default='./data',
                        help='path to test set')
    parser.add_argument('--save_dir', default='./models',
                        help='path to save model')
    parser.add_argument('--arch', default='models/cnn.json',
                        help = 'path to model architecture')
    parser.add_argument('--model_name', default = 'model',
                        help = 'name of the model to save logs, weights')
    parser.add_argument('--lr', default = 0.001,
                        help = 'learning rate')
    parser.add_argument('--init', default = '1',
                        help = 'initialization')
    parser.add_argument('--batch_size', default = 20,
                        help = 'batch_size')
    args = parser.parse_args()

    # Load data
    train_path, valid_path, test_path = args.train, args.val, args.test
    logs_path = args.expt_dir
    model_path, model_name = args.save_dir, args.model_name
    model_path = os.path.join(model_path, model_name)
    if not os.path.isdir(model_path):
        os.mkdir(model_path)
    lr, batch_size, init = float(args.lr), int(args.batch_size), int(args.init)

    data = loadData(train_path, valid_path, test_path)
    train_X, train_Y, valid_X, valid_Y, test_X, test_Y = data['train']['X'], data['train']['Y'],\
                                                         data['valid']['X'], data['valid']['Y'],\
                                                         data['test']['X'], data['test']['Y'],


    # Logging
    train_log_name = '{}.train.log'.format(model_name)
    valid_log_name = '{}.valid.log'.format(model_name)
    train_log = setup_logger('train-log', os.path.join(logs_path, train_log_name))
    valid_log = setup_logger('valid-log', os.path.join(logs_path, valid_log_name))

    # Train
    num_epochs = 500
    num_batches = int(float(train_X.shape[0]) / batch_size)
    steps = 0
    patience = 100
    early_stop=0

    model = getModel(lr)
    loss_history = [np.inf]
    for epoch in range(num_epochs):
        print 'Epoch {}'.format(epoch)
        steps = 0
        indices = np.arange(train_X.shape[0])
        np.random.shuffle(indices)
        train_X, train_Y = train_X[indices], train_Y[indices]
        for batch in range(num_batches):
            start, end = batch * batch_size, (batch + 1) * batch_size
            x, y = Augment(train_X[range(start, end)]).batch, train_Y[range(start, end)]
            model.fit(x.reshape((-1, 1, 28, 28)), y, batch_size = batch_size, verbose = 0)
            steps += batch_size
            if steps % train_X.shape[0] == 0 and steps != 0:
                train_loss, train_acc = model.evaluate(train_X.reshape((-1, 1, 28, 28)), train_Y)
                train_log.info('Epoch {}, Step {}, Loss: {}, Accuracy: {}, lr: {}'.format(epoch, steps, train_loss, train_acc, lr))
                valid_loss, valid_acc = model.evaluate(valid_X.reshape((-1, 1, 28, 28)), valid_Y)
                valid_log.info('Epoch {}, Step {}, Loss: {}, Accuracy: {}, lr: {}'.format(epoch, steps, valid_loss, valid_acc, lr))
                if valid_loss < min(loss_history):
                    save_path = os.path.join(model_path, 'model')
                    model.save(save_path)
                    early_stop = 0
                early_stop += 1
                if (early_stop >= patience):
                    print "No improvement in validation loss for " + str(patience) + " steps - stopping training!"
                    print("Optimization Finished!")
                    return 1
                loss_history.append(valid_loss)
    print("Optimization Finished!")
#from dynamodb_json import json_util as json
import os
import boto3
from datetime import datetime
from helpers import setup_logger

logger = setup_logger()

dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ["DYNAMODB_TABLE"])


def import_user(github_username, slack_id):
    print('Executing import_user ...')
    user_json = {
        'ghusername': github_username,
        'slack_id': slack_id,
        'events_enabled': {
            'pull_request': True,
            'issue_comment': True,
            'pull_request_review': True,
            'pr_reminders': True
        },
        'reminder_window_hour': 14,
        'repo_blacklist': []
    }

    table.put_item(Item=user_json)


def delete_user(slack_id):
예제 #10
0
def trainModel():
    # Parse args
    parser = argparse.ArgumentParser(description='Train the CNN')
    parser.add_argument('--expt_dir',
                        default='./logs',
                        help='save dir for experiment logs')
    parser.add_argument('--train',
                        default='./data',
                        help='path to training set')
    parser.add_argument('--val',
                        default='./data',
                        help='path to validation set')
    parser.add_argument('--test', default='./data', help='path to test set')
    parser.add_argument('--save_dir',
                        default='./models',
                        help='path to save model')
    parser.add_argument('--arch',
                        default='models/cnn.json',
                        help='path to model architecture')
    parser.add_argument('--model_name',
                        default='model',
                        help='name of the model to save logs, weights')
    parser.add_argument('--lr', default=0.001, help='learning rate')
    parser.add_argument('--init', default='1', help='initialization')
    parser.add_argument('--batch_size', default=20, help='batch_size')
    args = parser.parse_args()

    # Load data
    train_path, valid_path, test_path = args.train, args.val, args.test
    logs_path = args.expt_dir
    model_path, model_arch, model_name = args.save_dir, args.arch, args.model_name
    model_path = os.path.join(model_path, model_name)
    if not os.path.isdir(model_path):
        os.mkdir(model_path)
    lr, batch_size, init = float(args.lr), int(args.batch_size), int(args.init)

    data = loadData(train_path, valid_path, test_path)
    train_X, train_Y, valid_X, valid_Y, test_X, test_Y = data['train']['X'], data['train']['Y'],\
                                                         data['valid']['X'], data['valid']['Y'],\
                                                         data['test']['X'], data['test']['Y'],

    # Load architecture
    arch = loadArch(model_arch)

    # Logging
    train_log_name = '{}.train.log'.format(model_name)
    valid_log_name = '{}.valid.log'.format(model_name)
    train_log = setup_logger('train-log',
                             os.path.join(logs_path, train_log_name))
    valid_log = setup_logger('valid-log',
                             os.path.join(logs_path, valid_log_name))

    # GPU config
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)

    # Train
    num_epochs = 100
    num_batches = int(float(train_X.shape[0]) / batch_size)
    steps = 0
    patience = 50
    early_stop = 0

    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as session:
        model = CNN(arch, session, logs_path, init, lr)
        loss_history = [np.inf]
        for epoch in range(num_epochs):
            print 'Epoch {}'.format(epoch)
            steps = 0
            indices = np.arange(train_X.shape[0])
            np.random.shuffle(indices)
            train_X, train_Y = train_X[indices], train_Y[indices]
            for batch in range(num_batches):
                start, end = batch * batch_size, (batch + 1) * batch_size
                x, y = Augment(train_X[range(start,
                                             end)]).batch, train_Y[range(
                                                 start, end)]
                try:
                    model.step(x, y)
                except MemoryError:
                    print 'Memory error in step'
                    exit()
                steps += batch_size
                if steps % train_X.shape[0] == 0 and steps != 0:
                    try:
                        train_loss, train_acc = testModel(
                            model, train_X, train_Y, batch_size)
                    except MemoryError:
                        print 'Memory error in test for train'
                        exit()
                    train_log.info(
                        'Epoch {}, Step {}, Loss: {}, Accuracy: {}, lr: {}'.
                        format(epoch, steps, train_loss, train_acc, model.lr))
                    try:
                        valid_loss, valid_acc = testModel(
                            model, valid_X, valid_Y, batch_size)
                    except MemoryError:
                        print 'Memory error in test for valid'
                        exit()
                    valid_log.info(
                        'Epoch {}, Step {}, Loss: {}, Accuracy: {}, lr: {}'.
                        format(epoch, steps, valid_loss, valid_acc, model.lr))
                    if valid_loss < min(loss_history):
                        save_path = os.path.join(model_path, 'model')
                        model.save(save_path)
                        early_stop = 0
                    early_stop += 1
                    if (early_stop >= patience):
                        print "No improvement in validation loss for " + str(
                            patience) + " steps - stopping training!"
                        print("Optimization Finished!")
                        return 1
                    loss_history.append(valid_loss)
        print("Optimization Finished!")
예제 #11
0
menu_name = "WiFi repair"

from time import sleep

from ui import LoadingBar, PrettyPrinter as Printer, TextReader, Listbox, DialogBox
from helpers import setup_logger

from libs import dmesg, dkms_debug
from libs.rpi import vcgencmd, rpiinfo, config as rpi_config

import sdio_debug

logger = setup_logger(__name__, "debug")

try:
    import kmodpy
except:
    kmodpy = None
    logger.exception("Kmodpy module not found!")

i = None
o = None

dkms_fail_text = """DKMS fail - driver not installed! Run:
   apt install --reinstall esp8089-dkms
to fix the problem (sorry, we can't do that automatically yet)."""

otp_fail_text = """Your Pi has an OTP fault!
Visit bit.ly/pi_bad_otp for more info.
Press LEFT to pick your hardware."""
예제 #12
0
    pretrained_path = args.pretrain

momentum = args.momentum
# Paths
train_path, valid_path, test_path = args.train, args.val, args.test
model_path = args.save_dir
logs_path = args.expt_dir

# Logging
train_log_name = '{}-{}-{}-{}-{}-{}-{}-{}.train.log'.format(
    num_hidden, ','.join([str(word) for word in sizes]), activation,
    output_choice, batch_size, loss, opt, lr)
valid_log_name = '{}-{}-{}-{}-{}-{}-{}-{}.valid.log'.format(
    num_hidden, ','.join([str(word) for word in sizes]), activation,
    output_choice, batch_size, loss, opt, lr)
train_log = setup_logger('train-log', os.path.join(logs_path, train_log_name))
valid_log = setup_logger('valid-log', os.path.join(logs_path, valid_log_name))
# Load data
data = loadData(train_path, valid_path, test_path)
train_X, train_Y, valid_X, valid_Y, test_X, test_Y = data['train']['X'], data['train']['Y'],\
                                                     data['valid']['X'], data['valid']['Y'],\
                                                     data['test']['X'], data['test']['Y'],

# Initialize network
np.random.seed(1234)
network = Network(num_hidden,
                  sizes,
                  activation_choice=activation,
                  output_choice=output_choice,
                  loss_choice=loss)
model_name = '{}-{}-{}-{}-{}-{}-{}-{}.npy'.format(
예제 #13
0
import pandas as pd
import pathlib
import scdb  # database class
import scwds  # wds class
import zipfile

WORK_DIR = str(pathlib.Path(__file__).parent.absolute())  # current script path

default_chart_json = WORK_DIR + "\\product_defaults.json"  # default chart info for specific products
products_to_merge_json = WORK_DIR + "\\products_to_merge.json"  # products to be merged to a single IndicatorThemeID

# Products w/ mixed geographies need special handling of reference periods (can/prov/region - all data, others 2017+)
# Note only the master product id is included here when it is a merged product. TODO --> find a cleaner way to do this
mixed_geo_justice_pids = [35100177, 35100002, 35100026, 35100068]

logger = h.setup_logger(WORK_DIR,
                        "etl_log")  # set up logging to file and console

arg = arguments.argParser()  # get CLI arguments
arg_status = arg.check_valid_parse_args()
if arg_status != "":
    arg.show_help_and_exit_with_msg("\nArgument Error: " + arg_status)
start_date = arg.get_arg_value("start")
end_date = arg.get_arg_value("end")
prod_id = arg.get_arg_value("prodid")
insert_new_table = arg.get_arg_value("insert_new_table")
from_file = arg.get_arg_value("from_file")
use_json_meta = arg.get_arg_value("use_json_meta")
use_db_pass = arg.get_arg_value("use_db_pass")
min_ref_year = arg.get_arg_value("minrefyear")

if __name__ == "__main__":
예제 #14
0
from datetime import datetime, timedelta, time, date
import re
import logging
import helpers

is1_date = {'fmt': '%Y-%m-%d', 'txt': 'ISO1 YYYY-MM-DD'}
is2_date = {'fmt': '%d-%m-%Y', 'txt': 'ISO2 YYYY-MM-DD'}
us_date = {'fmt': '%m-%d-%Y', 'txt': 'US MM-DD-YYYY'}

date_formats = (is1_date, is2_date, us_date)
default_format = date_formats[0]

logger = logging.getLogger(__name__)
helpers.setup_logger(logger, stdout=True)


def validate_date_format(sdate, default_format):
    if type(default_format) == str:
        default_format = {'fmt': default_format}

    valid_date = None
    for format in date_formats:
        try:
            valid_date = datetime.strptime(sdate, default_format['fmt'])
            break
        except ValueError:
            pass

    return valid_date

예제 #15
0
파일: client.py 프로젝트: hpagseddy/ZPUI
from libs.matrix_client.matrix_client.client import MatrixClient
from libs.matrix_client.matrix_client.api import MatrixRequestError
from libs.matrix_client.matrix_client.user import User
from requests.exceptions import MissingSchema

from helpers import setup_logger

logger = setup_logger(__name__, 'info')


class Client():
    def __init__(self,
                 username,
                 password=None,
                 token=None,
                 server="matrix.org"):

        self.username = username
        self.server = server
        self.server_url = "https://{}".format(self.server)
        self.token = None
        self.logged_in = True

        # Create the matrix client
        if token == None and password != None:
            self.matrix_client = MatrixClient(self.server_url)

            # Try logging in the user
            try:
                self.token = self.matrix_client.login(username=username,
                                                      password=password)
예제 #16
0
#! /usr/bin/python3

import ssl
import socket
import time
from helpers import setup_logger
import client_game as cg
import helpers as constants
from helpers import cmd
from helpers import game_config

logger = setup_logger("settings.conf", "client")
CERT = game_config.get("SSL", "CERT")
DEBUG = True if game_config.get("DEBUG", "DEBUG") == "True" else False
message_length = int(game_config.get("OTHER", "message_length"))
TIMEOUT = int(game_config.get("OTHER", "TIMEOUT"))


class ClientConnection():
    """class to handle connection to server, receive and send"""
    # buffer for received data
    cmd_buffer = constants.cmd_buffer

    def __init__(self):
        """ini the class objects"""
        # 1st parameter: IPv4 networking
        # 2nd parameter: socket type, SOCK_STREAM = TCP
        # self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

    def client_connect(self):
        """connect to server function"""