コード例 #1
0
ファイル: ConDeBot.py プロジェクト: DasFranck/ConDeBot
    def __init__(self, *args, **kwargs):
        self.NAME = config.NAME
        self.SHME = config.SHORT_NAME
        self.DESC = config.DESCRIPTION
        self.PREF = config.CMD_PREFIX
        self.DATA_PATH = config.DATA_PATH
        self.OPS_FILE_PATH = config.DATA_PATH + "/ops.json"
        self.CDB_PATH = "./"
        self.VERSION = "1.1"

        self._reserved_keywords = {}
        self._plugin_metadata = {}

        super().__init__(*args, **kwargs)

        self.logger = Logger()
        self.plugin_manager = PluginManager(self)
        self.plugin_manager.load_all()
コード例 #2
0
    def test_log(self):
        logger = Logger()
        with pytest.raises(LoggerException) as ex:
            logger.log('wrong', 'test')
        assert "Module 'wrong' not allowed" in str(ex)

        assert 0 == self.db.fetch_one("SELECT COUNT(id) FROM logs")

        logger.log('main', 'test string')

        assert self.db.fetch_one("SELECT COUNT(id) FROM logs") == 1
        assert self.db.fetch_one("SELECT message FROM logs") == 'test string'
        assert self.db.fetch_one("SELECT module FROM logs") == 'main'
        assert int(self.db.fetch_one("SELECT `timestamp` FROM logs")) > 0
コード例 #3
0
ファイル: main.py プロジェクト: Sts0mrg0/ws-cli
module_name = sys.argv[2]
action = sys.argv[3]

base = WSBase()

logging.captureWarnings(True)

try:
    module = base.load_module(module_name)
except WSException:
    print " ERROR: Module '{0}' not exists!".format(module_name)
    exit(0)

if module.logger_enable:
    Registry().set('logger',
                   Logger(module.logger_name, module.logger_have_items))

if module.time_count:
    print "Started module work at " + time.strftime("%Y-%m-%d %H:%M:%S",
                                                    time.localtime())
    start_time = int(time.time())

try:
    module.prepare(action)
except WSException as e:
    print " " + str(e)
    exit(0)

Projects = ProjectsModel()
if not Projects.exists(project) and action not in ('add', 'list'):
    print " ERROR: Project '{0}' not exists!".format(project)
コード例 #4
0
ファイル: create.py プロジェクト: inhueman/auto-project
import os.path
import json
import sys

from classes.Logger import Logger
from selenium import webdriver
from time import sleep

log = Logger().log

if not os.path.exists('config/config.json'):
    log("Configuration file not found! Exiting...", 'red')
    exit()

with open('config/config.json') as config_file:
    log("Configuration file loaded.", 'green')
    config = json.load(config_file)


def create(project_name):

    log("Project Name: %s" % project_name, 'cyan')
    project_path = "{}\{}".format(config['defaults']['project_path'],
                                  project_name)
    try:
        os.mkdir(project_path)
        log("Project folder created at path: %s" % project_path, 'green')
    except Exception as e:
        log(e, 'red')
        exit()
コード例 #5
0
import os
from flask_cors import CORS
from flask_compress import Compress
from flask import Flask, send_from_directory

from classes.Logger import Logger

import views.api_calls
import views.index

logger = Logger().getLogger()

app = Flask(__name__)
CORS(app)
Compress(app)

app.config.from_pyfile('config.py')
app.secret_key = app.config['FLASK_SECRET_KEY']

app.register_blueprint(views.api_calls.app)
app.register_blueprint(views.index.app)


@app.after_request
def add_header(response):
    response.headers['X-Content-Type-Options'] = 'nosniff'
    response.headers['Content-Type'] = 'application/json'
    response.headers['Expires'] = '-1'
    response.headers['Pragma'] = 'no-cache'
    response.headers['X-XSS-Protection'] = '1; mode=block'
    response.headers[
コード例 #6
0
import os
import json
from flask import escape
from threading import Thread
from datetime import datetime
from classes.Logger import Logger
from classes.MongoManager import MongoManager

logger = Logger().getLogger()


class Operations():
    def __init__(self):
        self.mongo_client = MongoManager().getClient()
        self.petrol_collec = MongoManager().getCollec()

    def get_all_information(self):
        try:
            result = json.dumps(list(self.petrol_collec.find({}, {'_id': 0})),
                                ensure_ascii=False)
            self.mongo_client.close()

            return result

        except Exception:
            logger.exception()
            return

    def get_filter_distinct(self, filter):
        try:
            result = json.dumps(list(
コード例 #7
0
ファイル: ConDeBot.py プロジェクト: DasFranck/ConDeBot
class ConDeBot(discord.Client):
    def __init__(self, *args, **kwargs):
        self.NAME = config.NAME
        self.SHME = config.SHORT_NAME
        self.DESC = config.DESCRIPTION
        self.PREF = config.CMD_PREFIX
        self.DATA_PATH = config.DATA_PATH
        self.OPS_FILE_PATH = config.DATA_PATH + "/ops.json"
        self.CDB_PATH = "./"
        self.VERSION = "1.1"

        self._reserved_keywords = {}
        self._plugin_metadata = {}

        super().__init__(*args, **kwargs)

        self.logger = Logger()
        self.plugin_manager = PluginManager(self)
        self.plugin_manager.load_all()

    def add_plugin_metadata(self, metaname, metadata, plugin_name):
        try:
            self._plugin_metadata[plugin_name][metaname] = metadata
        except KeyError:
            self._plugin_metadata[plugin_name] = {metaname: metadata}

    def add_plugin_usage(self, usage, plugin_name):
        self.add_plugin_metadata("Usage", usage, plugin_name)

    def add_plugin_description(self, description, plugin_name):
        self.add_plugin_metadata("Description", description, plugin_name)

    def reserve_keyword(self, keyword, plugin_name):
        """ Manage reserved keywords """
        if keyword not in self._reserved_keywords:
            self._reserved_keywords[keyword] = [plugin_name]
        else:
            print("Warning: Conflicting with the plugins {} for the keyword {}".format(self._reserved_keywords[keyword], keyword))
            self._reserved_keywords[keyword].append(plugin_name)
        pass

    def unreserve_keyword(self, keyword):
        del self._reserved_keywords[keyword]

    def reserve_keywords(self, keyword_list, plugin_name):
        for keyword in keyword_list:
            self.reserve_keyword(keyword, plugin_name)

    def unreserve_keywords(self, keyword_list):
        for keyword in keyword_list:
            self.unreserve_keyword(keyword)

    # Aliases to self.logger functions
    def log_error_command(self, *args, **kwargs):
        self.logger.log_error_command(*args, **kwargs)

    def log_warn_command(self, *args, **kwargs):
        self.logger.log_warn_command(*args, **kwargs)

    def log_info_command(self, *args, **kwargs):
        self.logger.log_info_command(*args, **kwargs)

    async def on_ready(self):
        """Triggered when the bot is ready"""
        self.logger.info("Sucessfully connected as %s (%s)" % (self.user.name,
                                                               self.user.id))
        self.logger.info("------------")

    async def on_message(self, message):
        cmd = get_meta(self, message)
        if not cmd:
            return
        for plugin in self.plugins:
            self.loop.create_task(plugin.on_message(message, cmd))

    async def on_message_edit(self, before, after):
        for plugin in self.plugins:
            self.loop.create_task(plugin.on_message_edit(before, after))

    async def on_message_delete(self, message):
        for plugin in self.plugins:
            self.loop.create_task(plugin.on_message_delete(message))

    async def on_reaction_add(self, reaction, user):
        for plugin in self.plugins:
            self.loop.create_task(plugin.on_reaction_add(reaction, user))

    async def on_reaction_remove(self, reaction, user):
        for plugin in self.plugins:
            self.loop.create_task(plugin.on_reaction_remove(reaction, user))

    async def on_reaction_clear(self, message, reactions):
        for plugin in self.plugins:
            self.loop.create_task(plugin.on_reaction_clear(message, reactions))

    async def on_channel_create(self, channel):
        for plugin in self.plugins:
            self.loop.create_task(plugin.on_channel_create(channel))

    async def on_channel_update(self, before, after):
        for plugin in self.plugins:
            self.loop.create_task(plugin.on_channel_update(before, after))

    async def on_channel_delete(self, channel):
        for plugin in self.plugins:
            self.loop.create_task(plugin.on_channel_delete(channel))

    async def on_member_join(self, member):
        for plugin in self.plugins:
            self.loop.create_task(plugin.on_member_join(member))

    async def on_member_remove(self, member):
        for plugin in self.plugins:
            self.loop.create_task(plugin.on_member_remove(member))

    async def on_member_update(self, before, after):
        for plugin in self.plugins:
            self.loop.create_task(plugin.on_member_update(before, after))

    async def on_server_join(self, server):
        for plugin in self.plugins:
            self.loop.create_task(plugin.on_server_join(server))

    async def on_server_remove(self, server):
        for plugin in self.plugins:
            self.loop.create_task(plugin.on_server_remove(server))

    async def on_server_update(self, before, after):
        for plugin in self.plugins:
            self.loop.create_task(plugin.on_server_update(before, after))

    async def on_server_role_create(self, server, role):
        for plugin in self.plugins:
            self.loop.create_task(plugin.on_server_role_create(server, role))

    async def on_server_role_delete(self, server, role):
        for plugin in self.plugins:
            self.loop.create_task(plugin.on_server_role_delete(server, role))

    async def on_server_role_update(self, server, role):
        for plugin in self.plugins:
            self.loop.create_task(plugin.on_server_role_update(server, role))

    async def on_voice_state_update(self, before, after):
        pass

    async def on_member_ban(self, member):
        for plugin in self.plugins:
            self.loop.create_task(plugin.on_member_ban(member))

    async def on_member_unban(self, member):
        for plugin in self.plugins:
            self.loop.create_task(plugin.on_member_unban(member))

    async def on_typing(self, channel, user, when):
        pass

    def isop_user(self, user_id):
        """ Check if user is op """
        if (os.path.isfile(self.OPS_FILE_PATH)):
            with open(self.OPS_FILE_PATH, encoding="utf8") as ops_file:
                ops = json.load(ops_file)
            return (ops["global"] and user_id in ops["global"])
        else:
            return (False)
コード例 #8
0
ファイル: hbs.py プロジェクト: hack4sec/hbs-cli
                                       config['main']['hc_bin'])):
    print "ERROR: HC bin {0}/{1} is not exists!".format(
        config['main']['path_to_hc'], config['main']['hc_bin'])
    exit(0)

version_output = subprocess.check_output("{0}/{1} --version".format(
    config['main']['path_to_hc'], config['main']['hc_bin']),
                                         shell=True)
if version_output[0] != '2':
    print "ERROR: HBS support only HashCat v2, but you have {0} ({1})".format(
        version_output[0], version_output)
    exit(0)

Registry().set('db', db)

logger = Logger()
Registry().set('logger', logger)

Registry().get('logger').log("main", "Started")

threads = {}

hashlists_loader_thrd = HashlistsLoaderThread()
hashlists_loader_thrd.start()
threads['hashlists_loader_thrd'] = hashlists_loader_thrd

result_parse_thrd = ResultParseThread()
result_parse_thrd.start()
threads['result_parse_thrd'] = result_parse_thrd

hashlists_by_alg_loader_thrd = HashlistsByAlgLoaderThread()
コード例 #9
0
    # Model saving
    ckpt = tf.train.Checkpoint(transformer=transformer, optimizer=optimizer)

    # Load last checkpoint
    ckpt_manager = tf.train.CheckpointManager(ckpt,
                                              CHECKPOINT_PATH,
                                              max_to_keep=999)

    # If a checkpoint exists, restore the latest checkpoint.
    if ckpt_manager.latest_checkpoint:
        ckpt.restore(ckpt_manager.latest_checkpoint).expect_partial()
        print(f"Restored {CHECKPOINT_PATH} checkpoint!\n")

    # Set the logger to report to the correct file
    logger = Logger(MODEL_NAME)

    def evaluate(inp_sentence):
        start_token = [tokenizer_txt.vocab_size]
        end_token = [tokenizer_txt.vocab_size + 1]

        # The input is a MWP, hence adding the start and end token
        inp_sentence = start_token + \
            tokenizer_txt.encode(inp_sentence) + end_token
        encoder_input = tf.expand_dims(inp_sentence, 0)

        # The target is an equation, the first word to the transformer should be the
        # equation start token.
        decoder_input = [tokenizer_eq.vocab_size]
        output = tf.expand_dims(decoder_input, 0)
コード例 #10
0
ファイル: trainer.py プロジェクト: alex-ten/MSC
def main(_):

    config = Configs(
        batch_size=20,
        hidden_size=1500,
        init_scale=0.04,
        keep_prob=.35,
        learning_rate=1.0,
        lr_decay=1 / 1.15,
        max_epoch=14,
        max_grad_norm=10,
        max_max_epoch=55,
        model=FLAGS.arch.lower(
        ),  # Set of available models: 'LSTM', 'RNN', 'SRN'
        num_layers=1,
        num_steps=35,
        vocab_size=10000)
    eval_config = config.clone()
    eval_config.batch_size = 1
    eval_config.num_steps = 1

    if FLAGS.train_data: path = PDPATH('/train_data/' + FLAGS.train_data)
    else:
        print(
            'Provide path to training data, e.g: train.py --train_data=\'path\''
        )
        return

    logger = Logger()

    raw_data = reader.raw_data(path)
    train_data, valid_data, test_data, _ = raw_data

    with tf.Graph().as_default():
        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                    config.init_scale,
                                                    seed=None)
        with tf.name_scope("Train"):
            train_input = InputData(config=config,
                                    data=train_data,
                                    name="TrainInput")
            with tf.variable_scope("Model",
                                   reuse=None,
                                   initializer=initializer):
                m = get_model(config.model,
                              is_training=True,
                              config=config,
                              input_=train_input)
            print(m)
            tf.summary.scalar("Training Loss", m.cost)
            tf.summary.scalar("Learning Rate", m.lr)

        with tf.name_scope("Valid"):
            valid_input = InputData(config=config,
                                    data=valid_data,
                                    name="ValidInput")
            with tf.variable_scope("Model",
                                   reuse=True,
                                   initializer=initializer):
                mvalid = get_model(config.model,
                                   is_training=False,
                                   config=config,
                                   input_=valid_input)
            tf.summary.scalar("Validation Loss", mvalid.cost)

        with tf.name_scope("Test"):
            test_input = InputData(config=eval_config,
                                   data=test_data,
                                   name="TestInput")
            with tf.variable_scope("Model",
                                   reuse=True,
                                   initializer=initializer):
                mtest = get_model(config.model,
                                  is_training=False,
                                  config=eval_config,
                                  input_=test_input)

        logger.make_child_i(logger.logs_path, 'RNNlog')
        saver = tf.train.Saver(var_list=tf.get_collection(
            tf.GraphKeys.GLOBAL_VARIABLES, scope='Model'),
                               sharded=False,
                               write_version=tf.train.SaverDef.V2)
        sv = tf.train.Supervisor(logdir=logger.logs_child_path, saver=saver)
        train_log = []
        valid_log = []
        out = []

        # Session runs here
        # Setup session configs
        sess_config = tf.ConfigProto(log_device_placement=False)
        sess_config.gpu_options.allow_growth = True
        # Start session context manager by calling to tf.train.Supervisor's managed_session
        with sv.managed_session(config=sess_config) as session:
            print('Starting on: {} (GMT)'.format(str(
                datetime.datetime.today())))
            print(banner(s='begin'))
            start = time.time()
            if FLAGS.prog:
                printProgress(0,
                              config.max_max_epoch,
                              'Training',
                              'Complete',
                              barLength=60)
            for i in range(config.max_max_epoch):
                fin = i + 1
                valid_perplexity, _ = run_epoch(session, mvalid)
                valid_log.append(valid_perplexity)
                if len(valid_log) >= 2:
                    if valid_log[-1] > valid_log[-2]:
                        elapsed = time.time() - start
                        break

                lr_decay = config.lr_decay**max(i + 1 - config.max_epoch, 1)
                m.assign_lr(session, config.learning_rate * lr_decay)

                train_perplexity, _ = run_epoch(session, m, eval_op=m.train_op)
                train_log.append(train_perplexity)

                output_density = 10
                output_frequency = config.max_max_epoch // output_density
                if config.max_max_epoch >= output_frequency:
                    if (i % output_frequency
                            == 0) or i == config.max_max_epoch - 1:
                        print_(i, train_perplexity, valid_perplexity)
                else:
                    print_(i, train_perplexity, valid_perplexity)

                if i == config.max_max_epoch - 1:
                    elapsed = time.time() - start

                if FLAGS.prog:
                    printProgress(i + 1,
                                  config.max_max_epoch,
                                  'Training',
                                  'Complete',
                                  barLength=60)

            test_perplexity, outputs = run_epoch(session, mtest)
            print('\nStopped training on epoch {}:'.format(fin))
            print(
                "    Train PPL = {:.4f}\n    Valid PPL = {:.4f}\n    Test PPL  = {:.4f}"
                .format(train_perplexity, valid_perplexity, test_perplexity))
            print('    Stopped {} (GMT)'.format(str(
                datetime.datetime.today())))
            m, s = divmod(elapsed, 60)
            h, m = divmod(m, 60)
            print('    Elapsed time {}:{}:{}'.format(int(h), int(m), int(s)))

            if FLAGS.save_as:
                if FLAGS.name:
                    save_to = logger.make_child(logger.trained_path,
                                                FLAGS.name)
                else:
                    save_to = logger.make_child_i(logger.trained_path, 'model')

                spath = save_to + '/' + FLAGS.save_as
                print("\nSaving model to {}.".format(spath))
                saver.save(session, spath, global_step=sv.global_step)
                save_config(config, filename=spath)
                save_plot('Learning curves from {}'.format(FLAGS.save_as),
                          save_to, train_log, valid_log)
コード例 #11
0
ファイル: azureiot.py プロジェクト: bhupendra592/azureiot
import shlex
import threading
import can_send
import csv
import yaml
from classes.Logger import Logger  #import logger
from pymemcache.client import base
import config as config_param  #import configuration parameter file
from azure.iot.device import Message, IoTHubDeviceClient, MethodResponse
from azure.core.exceptions import AzureError
from azure.storage.blob import BlobClient
import json
import datetime

#Get the logger azureiot
log_obj = Logger("azureiot")
logger = log_obj.get()

#get memcache client from 0.0.0.0
memcache_client = base.Client(('0.0.0.0', 11211))

CONNECTION_STRING = config_param.CONNECTION_STRING  #connection string for auzre device
MESSAGE_TIMEOUT = config_param.MESSAGE_TIMEOUT  #message timeout setting

#timestamp for the canbus and gps
canbus_timestamp = 0
gps_timestamp = 0

in_port = config_param.zmq_out_port  #IN port for data communication between azure iot, canbus program
#out_port = "5557"
コード例 #12
0
class GameLogger:
    """
    Hardcoded column values whose key map towards a piece's UID so we can "random" access files for "random"-ness sake
    """
    __map = [
        119,
        124,
        129,
        134,
        139,
        144,
        149,
        154,
        159,
        164,
        169,
        174,
        179,
        184,
        189,
        194,  # Black pieces
        22,
        27,
        32,
        37,
        42,
        47,
        52,
        57,
        62,
        67,
        72,
        77,
        82,
        87,
        92,
        97
    ]  # White pieces

    def __init__(self):
        self.logger = Logger()

    def action_to_file(self, origin_piece, destination_piece=None):
        """
        Given 2 pieces, set their positions in the file

        :param origin_piece: Piece
        :param destination_piece: Piece
        :return: void
        """
        stream = self.logger.stream()
        offset = self.get_piece_offset(origin_piece)

        stream.seek(offset)
        stream.write(Visualiser.to_algebraic(*origin_piece.get_pos()))

        if destination_piece:
            """ Likely a capture here, but let's make sure it is """
            destination_piece_state = "XX"
            offset = self.get_piece_offset(destination_piece)

            if not (origin_piece.is_white() ^ destination_piece.is_white()):
                """ Highly likely castling in here. Set destination piece's state instead of marking them captured """
                destination_piece_state = Visualiser.to_algebraic(
                    *destination_piece.get_pos())

            stream.seek(offset)
            stream.write(destination_piece_state)

        # Reset seek
        stream.seek(0)

    def get_piece_offset(self, piece):
        """
        Get our piece's column offset and take into consideration all quirks
        :param piece:
        :return:
        """

        offset = self.__map[
            piece.
            uid] + 2  # "2" is an offset from piece name and the equal sign (e.g. K=e1)

        if not piece.is_white():
            """
            Black magic voodoo caused by \n needs an offset like this even though
            file.read() wont tell you a \n exists
            """
            offset += 2

        return offset

    def state_to_file(self, board: Board):
        """
        Persist current state to file in logger

        :param board: The current game board
        :return:
        """
        self.logger.clear()
        stream = self.logger.stream()

        char_offset = 5

        # Sort pieces nicely by camo and valuation
        [white, black] = self.group_by_camo(board.pieces)

        # Set seek column number
        col = 0

        for camo in [white, black]:
            # Set preface text to clearly indicate which camo the following pieces are
            preface = " (by valuation): "
            preface = ("White" if camo[0].is_white() else "Black") + preface
            stream.write(preface)
            col += len(
                preface
            )  # Increment col to take into account recently printed text

            for piece in self.sort_by_rank(camo):
                [x, y] = piece.get_pos()
                algebraic_pos = Visualiser.to_algebraic(*[x, y])
                string = piece.name.strip(
                ) + "=" + algebraic_pos  # Format name and algebraic position nicely

                stream.write(string.ljust(char_offset))

                col += char_offset

            # Space out next columns to make way for next camo
            stream.write("\n")

    @staticmethod
    def group_by_camo(pieces):
        """
        Group pieces by camo(uflage)

        :param pieces:
        :return: List containing the pieces. First array contains white pieces, black on the other
        """
        camos = [[], []]

        for piece in pieces:
            camo = 0 if piece.is_white() else 1

            camos[camo].append(piece)

        return camos

    @staticmethod
    def sort_by_rank(pieces):
        """
        Sort pieces by their valuations (rank)

        :param pieces:
        :return: Pieces sorted by valuation
        """
        return sorted(pieces, key=lambda piece: piece.value)
コード例 #13
0
 def __init__(self):
     self.logger = Logger()