Beispiel #1
0
def main():
    parser = OptionParser()
    parser.add_option("-d", "--debug", dest="debug_level",
                      help="set log level to LEVEL", metavar="LEVEL")
    parser.add_option("-c", "--conf", dest="conf_filename",
                      help="configuration FILENAME", metavar="FILENAME")
    parser.add_option("-k", "--check", dest="check", action="store_true",
                      default=False,
                      help="only checks that shell app works correctly")
    (options, args) = parser.parse_args()
    args = args
    if options.debug_level is not None:
        level = int(options.debug_level)
    else:
        level = logging.CRITICAL

    basicConfig(level=level, format=FORMAT)

    try:
        shell = PyZigBeeShell(conf_filename=get_conf_filename(options))
        if options.check:
            sys.exit(0)
        shell.cmdloop()
    except PyZigBeeException as error:
        print("Error: %s" % error)
        sys.exit(2)
    except KeyboardInterrupt:
        print("Bye!")
        sys.exit(0)
    except Exception as error:
        print("Uncaught error: %s" % error)
        sys.exit(1)
Beispiel #2
0
def start_logging():
    try:
        import colorlog
        colorlog.basicConfig(
            format='%(log_color)s%(levelname)s%(reset)s:%(name)s:%(message)s')
    except ImportError:
        import logging
        logging.basicConfig()
        del logging
def metatest(config, workers, log_level):
    colorlog.basicConfig(level=log_level.upper(), format=LOG_FORMAT)

    tasks = json.load(config)
    runner = TaskRunner([Task.from_description(t) for t in tasks])

    log = logging.getLogger('metatest')
    log.info('Running {} tasks with {} workers'.format(
        runner.tasks.qsize(), workers))

    for i in range(workers):
        threading.Thread(target=runner).start()
    runner.join()

    log.info('Finished running {} tasks'.format(runner.tasks_done.qsize()))
Beispiel #4
0
def main():
    colorlog.basicConfig(
        filename=None,
        level=logging.INFO,
        format="%(log_color)s[%(levelname)s:%(asctime)s]%(reset)s %(message)s",
        datafmt="%Y-%m-%d %H:%M:%S")

    if not os.path.exists(CAPTION_OUTPUT_PATH):
        colorlog.info("Create directory %s" % (CAPTION_OUTPUT_PATH))
        os.makedirs(CAPTION_OUTPUT_PATH)
    if not os.path.exists(HASHTAG_OUTPUT_PATH):
        colorlog.info("Create directory %s" % (HASHTAG_OUTPUT_PATH))
        os.makedirs(HASHTAG_OUTPUT_PATH)

    # Load raw data
    caption_train_json = load_json(CAPTION_TRAIN_JSON_FNAME)
    caption_test1_json = load_json(CAPTION_TEST1_JSON_FNAME)
    caption_test2_json = load_json(CAPTION_TEST2_JSON_FNAME)
    hashtag_train_json = load_json(HASHTAG_TRAIN_JSON_FNAME)
    hashtag_test1_json = load_json(HASHTAG_TEST1_JSON_FNAME)
    hashtag_test2_json = load_json(HASHTAG_TEST2_JSON_FNAME)

    # Tokenize all
    caption_counter, caption_train_tokens, caption_test1_tokens, \
        caption_test2_tokens = tokenize_all(
            caption_train_json,
            caption_test1_json,
            caption_test2_json,
            'caption'
        )
    hashtag_counter, hashtag_train_tokens, hashtag_test1_tokens, \
        hashtag_test2_tokens = tokenize_all(
            hashtag_train_json,
            hashtag_test1_json,
            hashtag_test2_json,
            'tags'
        )

    # Create vocabulary
    caption_vocab, caption_rev_vocab = create_vocabulary(
        caption_counter, CAPTION_VOCAB_FNAME, CAPTION_VOCAB_SIZE)
    hashtag_vocab, hashtag_rev_vocab = create_vocabulary(
        hashtag_counter, HASHTAG_VOCAB_FNAME, HASHTAG_VOCAB_SIZE)

    # Get tfidf weighted tokens
    caption_train_tfidf_tokens, caption_test1_tfidf_tokens, \
        caption_test2_tfidf_tokens = get_tfidf_words(
            caption_train_tokens,
            caption_test1_tokens,
            caption_test2_tokens,
            caption_vocab,
            caption_rev_vocab
        )
    hashtag_train_tfidf_tokens, hashtag_test1_tfidf_tokens, \
        hashtag_test2_tfidf_tokens = get_tfidf_words(
            hashtag_train_tokens,
            hashtag_test1_tokens,
            hashtag_test2_tokens,
            hashtag_vocab,
            hashtag_rev_vocab
        )

    # Save data
    save_data((caption_train_tokens, caption_train_tfidf_tokens),
              (caption_test1_tokens, caption_test1_tfidf_tokens),
              (caption_test2_tokens, caption_test2_tfidf_tokens),
              CAPTION_OUTPUT_PATH, caption_rev_vocab)
    save_data((hashtag_train_tokens, hashtag_train_tfidf_tokens),
              (hashtag_test1_tokens, hashtag_test1_tfidf_tokens),
              (hashtag_test2_tokens, hashtag_test2_tfidf_tokens),
              HASHTAG_OUTPUT_PATH, hashtag_rev_vocab)
Beispiel #5
0
#   MAIN
formatter = colorlog.ColoredFormatter(
    "%(log_color)s%(levelname)-8s %(message)s %(reset)s",
    datefmt=None,
    reset=True,
    log_colors={
        'DEBUG': 'cyan',
        'INFO': 'green',
        'WARNING': 'yellow',
        'ERROR': 'red',
        'CRITICAL': 'red,bg_white',
    },
    secondary_log_colors={},
    style='%')
if ('--debug' in sys.argv):
    colorlog.basicConfig(stream=sys.stdout, level=logging.DEBUG)
else:
    colorlog.basicConfig(stream=sys.stdout,
                         level=logging.INFO)  # default log level

stream = colorlog.root.handlers[0]
stream.setFormatter(formatter)

if (len(sys.argv) < 2):
    logging.error("Expected input file path as parameter")
    exit(1)

# read Pcolony from file
pObj = sim.readInputFile(sys.argv[1])
# if the p object read from the input file is a Pswarm
if (type(pObj) == sim.Pswarm):
formatter = colorlog.ColoredFormatter(
        "%(log_color)s%(levelname)-8s %(message)s %(reset)s",
        datefmt=None,
        reset=True,
        log_colors={
                'DEBUG':    'cyan',
                'INFO':     'green',
                'WARNING':  'yellow',
                'ERROR':    'red',
                'CRITICAL': 'red,bg_white',
        },
        secondary_log_colors={},
        style='%'
)
if ('--debug' in sys.argv):
    colorlog.basicConfig(stream = sys.stdout, level = logging.DEBUG)
else:
    colorlog.basicConfig(stream = sys.stdout, level = logging.INFO) # default log level

stream = colorlog.root.handlers[0]
stream.setFormatter(formatter);

if (len(sys.argv) < 2):
    logging.error("Expected input file path as parameter")
    exit(1)

# read Pcolony from file
pObj = sim.readInputFile(sys.argv[1])
# if the p object read from the input file is a Pswarm
if (type(pObj) == sim.Pswarm):
    if (len(sys.argv) < 3):
sys.path.append(".")
from StreamingOutputSubprocess import StreamingOutputFormattingProcess


def demoChained():
    """Demonstrate monitoring the standard error of a process for the string 'trigger', and when found - launching a second process
    which outlives its parent"""
    class ChainingSOFP(StreamingOutputFormattingProcess):
        def onStdoutLine(self, tag, line):
            super().onStdoutLine(tag, "[{}] {}".format(tag,
                                                       line.decode()).encode())

        def onStderrLine(self, tag, line):
            super().onStderrLine(tag, "[{}] {}".format(tag,
                                                       line.decode()).encode())
            if tag == "main" and b"trigger" in line:
                cmdChained = "bash -c 'echo chained stdout && sleep 2 && echo stderr chained 1>&2 && sleep 1 && echo done chained'"
                self.run(cmdChained, "chained")

    cmdInitial = "bash -c 'echo stdout && sleep 1 && echo trigger chained 1>&2 && sleep 1 && echo more output && sleep 1 && echo done && exit 3'"
    Spec = ChainingSOFP.OutputSpec
    sofp = ChainingSOFP(Spec("STDOUT: {}"), Spec("STDERR: {}", sys.stderr))
    status = sofp.run(cmdInitial, "main")
    print("Initial finished with status: {}.".format(status))


if __name__ == "__main__":
    print("demoChained:", demoChained.__doc__)
    logging_mod.basicConfig(level=logging.DEBUG)
    demoChained()
Beispiel #8
0
import asyncio
import logging
import colorlog
from threading import Thread

from src import env

getLogger = colorlog.getLogger

colorlog.basicConfig(
    format='%(log_color)s%(asctime)s:%(levelname)s:%(name)s - %(message)s',
    datefmt='%Y-%m-%d-%H:%M:%S',
    level=colorlog.DEBUG if env.DEBUG else colorlog.INFO)

_muted = colorlog.INFO if env.DEBUG else colorlog.WARNING
_shut_upped = colorlog.ERROR if env.DEBUG else colorlog.CRITICAL

getLogger('apscheduler').setLevel(colorlog.WARNING)
getLogger('aiohttp_retry').setLevel(_muted)
getLogger('asyncio').setLevel(_muted)
getLogger('telethon').setLevel(_muted)
getLogger('aiosqlite').setLevel(_muted)
getLogger('tortoise').setLevel(_muted)
getLogger('asyncpg').setLevel(_muted)


# flit log from apscheduler.scheduler
class APSCFilter(logging.Filter):
    def __init__(self):
        super().__init__()
        self.count = -3  # first 3 times muted
Beispiel #9
0
def test_colorlog_basicConfig(test_logger):
    colorlog.basicConfig()
    test_logger(colorlog.getLogger())
#!/usr/bin/env python2
"""Unit tests for the food2fork_api module.

Author: Alex Richard Ford ([email protected])
"""

import pytest
import colorlog
import urlparse

from food_finder import food2fork_api

colorlog.basicConfig(level="INFO")
_log = colorlog.getLogger(__name__)

# Attempt to load the Food2Fork API Key from file, if it exists. Otherwise,
# just set the API Key to some fake testing value.
try:
    with open("./secrets/f2f_api_key", "r") as key_file:
        _KEY = key_file.readline().strip()
except:
    _KEY = "fakeFood2ForkApiKey"


def test_build_query_endpoint_url():
    api = food2fork_api.Food2ForkAPI(_KEY)

    # Query endpoint requires a string type, so giving it None or some other
    # type should throw an exception
    with pytest.raises(AssertionError):
        urlActual = api._build_query_endpoint_url(None)
Beispiel #11
0
def train():
    colorlog.basicConfig(
        filename=None,
        level=logging.INFO,
        format="%(log_color)s[%(levelname)s:%(asctime)s]%(reset)s %(message)s",
        datafmt="%Y-%m-%d %H:%M:%S")

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=False,
                                          gpu_options=gpu_options)) as sess:
        global_step = tf.get_variable('global_step', [],
                                      initializer=tf.constant_initializer(0),
                                      trainable=False)
        num_examples_per_epoch, tower_img_embedding, tower_context_length, \
            tower_caption_length, tower_context_id, tower_caption_id, \
            tower_answer_id, tower_context_mask, \
            tower_caption_mask = enqueue(False)

        # Calculate the learning rate schedule.
        num_batches_per_epoch = (num_examples_per_epoch / FLAGS.batch_size /
                                 FLAGS.num_gpus)
        decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)

        # Decay the learning rate exponentially based on the number of steps.
        lr = tf.train.exponential_decay(FLAGS.init_lr,
                                        global_step,
                                        decay_steps,
                                        LEARNING_RATE_DECAY_FACTOR,
                                        staircase=True)

        # Create an optimizer that performs gradient descent.
        opt = tf.train.AdamOptimizer(lr)
        #opt = tf.train.MomentumOptimizer(lr,momentum=0.7,use_nesterov=True)

        # Calculate the gradients for each model tower.
        tower_grads = []
        with tf.variable_scope(tf.get_variable_scope()) as scope:
            for i in xrange(FLAGS.num_gpus):
                with tf.device('/gpu:%d' % i):
                    with tf.name_scope('%s_%d' % (TOWER_NAME, i)) as scope:
                        # Calculate the loss for one tower of the CIFAR model. This function
                        # constructs the entire CIFAR model but shares the variables across
                        # all towers.
                        inputs = [
                            tower_img_embedding[i], tower_context_length[i],
                            tower_caption_length[i], tower_context_id[i],
                            tower_caption_id[i], tower_answer_id[i],
                            tower_context_mask[i], tower_caption_mask[i]
                        ]
                        loss = _tower_loss(inputs, scope)

                        # Reuse variables for the next tower.
                        tf.get_variable_scope().reuse_variables()

                        # Retain the summaries from the final tower.
                        summaries = tf.get_collection(tf.GraphKeys.SUMMARIES,
                                                      scope)

                        # Calculate the gradients for the batch of data on this CIFAR tower.
                        grads = opt.compute_gradients(loss)

                        # Keep track of the gradients across all towers.
                        tower_grads.append(grads)

        # We must calculate the mean of each gradient. Note that this is the
        # synchronization point across all towers.
        grads = _average_gradients(tower_grads)

        # Add a summary to track the learning rate.
        summaries.append(tf.summary.scalar('learning_rate', lr))
        clipped_grads_and_vars = [(tf.clip_by_norm(gv[0], \
            FLAGS.max_grad_norm), gv[1]) for gv in grads]
        # Apply the gradients to adjust the shared variables.
        apply_gradient_op = opt.apply_gradients(clipped_grads_and_vars,
                                                global_step=global_step)
        # Create a saver.
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=200)

        # Build the summary operation from the last tower summaries.
        summary_op = tf.summary.merge(summaries)

        # Build an initialization operation to run below.
        init = tf.global_variables_initializer()

        sess.run(init)

        ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
        if ckpt and ckpt.model_checkpoint_path:
            # Restores from checkpoint
            #saver.restore(sess, ckpt.model_checkpoint_path)
            pass
        # Start the queue runners.
        tf.train.start_queue_runners(sess=sess)
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)

        for step in xrange(FLAGS.max_steps):
            start_time = time.time()
            _, loss_value = sess.run([apply_gradient_op, loss])
            duration = time.time() - start_time
            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

            if (step + 1) % 100 == 0:
                num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = duration / FLAGS.num_gpus

                format_str = (
                    '%s: step %d, loss = %.8f (%.1f examples/sec; %.3f '
                    'sec/batch)')
                c_g_step = int(global_step.eval(session=sess))
                print(format_str % (datetime.now(), c_g_step, loss_value,
                                    examples_per_sec, sec_per_batch))

            if (step + 1) % 500 == 0:
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, c_g_step)

            # Save the model checkpoint periodically.
            if (step + 1) % 500 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=c_g_step)
Beispiel #12
0
from config import HOST, PORT, ID, PW
import requests
from bs4 import BeautifulSoup
import colorlog
import logging
import inspect
import sys
import os
import re
from time import sleep

colorlog.basicConfig(level=logging.INFO)


def login():
    frame = inspect.currentframe()
    current_function_name = inspect.getframeinfo(frame).function

    try:
        data = {
            'id': ID,
            'pw': PW,
        }
        r = requests.post('http://{}:{}/login'.format(HOST, PORT),
                          data=data,
                          allow_redirects=False)

        if 'set-cookie' not in r.headers:
            colorlog.error('"{}" failed'.format(current_function_name))
            os._exit(1)
Beispiel #13
0
        formatter = colorlog.ColoredFormatter(
            "%(log_color)s%(levelname)-8s %(message)s %(reset)s",
            datefmt=None,
            reset=True,
            log_colors={
                'DEBUG': 'cyan',
                'INFO': 'green',
                'WARNING': 'yellow',
                'ERROR': 'red',
                'CRITICAL': 'red,bg_white',
            },
            secondary_log_colors={},
            style='%')

        colorlog.basicConfig(stream=sys.stdout, level=logLevel)
        stream = colorlog.root.handlers[0]
        stream.setFormatter(formatter)

    # colorlog not available
    except ImportError:
        logging.basicConfig(format='%(levelname)s:%(message)s', level=logLevel)
    if (len(sys.argv) < 2):
        logging.error("Expected input file path as parameter")
        exit(1)

    if (len(sys.argv) < 3):
        logging.error(
            "Expected the path to the file (without extensions) that will be generated"
        )
        exit(1)
Beispiel #14
0
import logging
import sys

import colorlog

from compiler.compiler import BSCompiler
from compiler.config.compiler_cli import CompilerCLI

def main(args):
    # parse the args.
    cli = CompilerCLI(args)
    compiler = BSCompiler(cli.config)
    compiler.compile()


if __name__ == '__main__':
    colorlog.basicConfig(level=logging.DEBUG,
                         format='%(log_color)s%(levelname)s:\t[%(name)s.%(funcName)s:%(lineno)d]\t %(message)s')
    # We don't need the first argument.
    main(sys.argv[1:])

Beispiel #15
0
# end class VrepBridge


##########################################################################
#   MAIN
if __name__ == "__main__":
    formatter = colorlog.ColoredFormatter(
        "%(log_color)s%(levelname)-8s %(message)s %(reset)s",
        datefmt=None,
        reset=True,
        log_colors={"DEBUG": "cyan", "INFO": "green", "WARNING": "yellow", "ERROR": "red", "CRITICAL": "red,bg_white"},
        secondary_log_colors={},
        style="%",
    )
    colorlog.basicConfig(level=logging.DEBUG)
    stream = colorlog.root.handlers[0]
    stream.setFormatter(formatter)

    bridge = VrepBridge()

    bridge.spawnRobots(nr=10, spawnType=SpawnType.circular)

    bridge.getState(0)
    bridge.setState(0, Motion.forward, [0, 2, 0])

    bridge.getState(1)
    bridge.setState(1, Motion.left, [2, 0, 0])

    bridge.getState(2)
    bridge.setState(2, Motion.right, [0, 0, 2])
Beispiel #16
0
def set_logger(fname=None):
    colorlog.basicConfig(
        filename=fname,
        level=logging.INFO,
        format="%(log_color)s[%(levelname)s:%(asctime)s]%(reset)s %(message)s",
        datefmt="%Y-%m-%d %H:%M:%S")
Beispiel #17
0
def main():
    """
    Main function, called when vipermonkey is run from the command line
    """
    # print banner with version
    print('vmonkey %s - https://github.com/decalage2/ViperMonkey' %
          __version__)
    print('THIS IS WORK IN PROGRESS - Check updates regularly!')
    print(
        'Please report any issue at https://github.com/decalage2/ViperMonkey/issues'
    )
    print('')

    DEFAULT_LOG_LEVEL = "info"  # Default log level
    LOG_LEVELS = {
        'debug': logging.DEBUG,
        'info': logging.INFO,
        'warning': logging.WARNING,
        'error': logging.ERROR,
        'critical': logging.CRITICAL
    }

    usage = 'usage: %prog [options] <filename> [filename2 ...]'
    parser = optparse.OptionParser(usage=usage)
    # parser.add_option('-o', '--outfile', dest='outfile',
    #     help='output file')
    # parser.add_option('-c', '--csv', dest='csv',
    #     help='export results to a CSV file')
    parser.add_option("-r",
                      action="store_true",
                      dest="recursive",
                      help='find files recursively in subdirectories.')
    parser.add_option(
        "-z",
        "--zip",
        dest='zip_password',
        type='str',
        default=None,
        help=
        'if the file is a zip archive, open first file from it, using the provided password (requires Python 2.6+)'
    )
    parser.add_option(
        "-f",
        "--zipfname",
        dest='zip_fname',
        type='str',
        default='*',
        help=
        'if the file is a zip archive, file(s) to be opened within the zip. Wildcards * and ? are supported. (default:*)'
    )
    parser.add_option(
        "-e",
        action="store_true",
        dest="scan_expressions",
        help='Extract and evaluate/deobfuscate constant expressions')
    parser.add_option(
        '-l',
        '--loglevel',
        dest="loglevel",
        action="store",
        default=DEFAULT_LOG_LEVEL,
        help=
        "logging level debug/info/warning/error/critical (default=%default)")
    parser.add_option("-a",
                      action="store_true",
                      dest="altparser",
                      help='Use the alternate line parser (experimental)')

    (options, args) = parser.parse_args()

    # Print help if no arguments are passed
    if len(args) == 0:
        print __doc__
        parser.print_help()
        sys.exit()

    # setup logging to the console
    # logging.basicConfig(level=LOG_LEVELS[options.loglevel], format='%(levelname)-8s %(message)s')
    colorlog.basicConfig(level=LOG_LEVELS[options.loglevel],
                         format='%(log_color)s%(levelname)-8s %(message)s')

    for container, filename, data in xglob.iter_files(
            args,
            recursive=options.recursive,
            zip_password=options.zip_password,
            zip_fname=options.zip_fname):
        # ignore directory names stored in zip files:
        if container and filename.endswith('/'):
            continue
        if options.scan_expressions:
            process_file_scanexpr(container, filename, data)
        else:
            process_file(container,
                         filename,
                         data,
                         altparser=options.altparser)
Beispiel #18
0
def main():
    """
    Main function, called when vbashell is run from the command line
    """
    # print banner with version
    print('vbashell %s - https://github.com/decalage2/ViperMonkey' %
          __version__)
    print('THIS IS WORK IN PROGRESS - Check updates regularly!')
    print(
        'Please report any issue at https://github.com/decalage2/ViperMonkey/issues'
    )
    print('')

    DEFAULT_LOG_LEVEL = "info"  # Default log level
    LOG_LEVELS = {
        'debug': logging.DEBUG,
        'info': logging.INFO,
        'warning': logging.WARNING,
        'error': logging.ERROR,
        'critical': logging.CRITICAL
    }

    usage = 'usage: %prog [options] <filename> [filename2 ...]'
    parser = optparse.OptionParser(usage=usage)
    parser.add_option('-p',
                      '--parse',
                      dest='parse_file',
                      help='VBA text file to be parsed')
    parser.add_option('-e',
                      '--eval',
                      dest='eval_expr',
                      help='VBA expression to be evaluated')
    parser.add_option(
        '-l',
        '--loglevel',
        dest="loglevel",
        action="store",
        default=DEFAULT_LOG_LEVEL,
        help=
        "logging level debug/info/warning/error/critical (default=%default)")

    (options, args) = parser.parse_args()

    # Print help if no arguments are passed
    # if len(args) == 0:
    #     print(__doc__)
    #     parser.print_help()
    #     sys.exit()

    # setup logging to the console
    # logging.basicConfig(level=LOG_LEVELS[options.loglevel], format='%(levelname)-8s %(message)s')

    colorlog.basicConfig(level=LOG_LEVELS[options.loglevel],
                         format='%(log_color)s%(levelname)-8s %(message)s')

    if options.parse_file:
        parse(options.parse_file)

    if options.eval_expr:
        eval_expression(options.eval_expr)

    while True:
        try:
            print("VBA> ", end='')
            cmd = raw_input()

            if cmd.startswith('exit'):
                break

            if cmd.startswith('parse'):
                parse()

            if cmd.startswith('trace'):
                args = cmd.split()
                print('Tracing %s' % args[1])
                vm.trace(entrypoint=args[1])
                # print table of all recorded actions
                print('Recorded Actions:')
                print(vm.dump_actions())

            if cmd.startswith('eval'):
                expr = cmd[5:]
                eval_expression(expr)
        except Exception:
            vmonkey.log.exception('ERROR')
Beispiel #19
0
import logging
import colorlog

fmt = "{log_color}{levelname} {name}: {message}"
colorlog.basicConfig(level=logging.DEBUG, style="{", format=fmt, stream=None)

log = logging.getLogger()

log.warning("hello")
Beispiel #20
0
def init_logging(
    flavor: typing.Union[None, str, int, typing.Dict[str, typing.Any]],
    allow_color: bool,
    force_color: bool,
) -> None:
    """Attempt to initialize logging for the user.

    If any handlers already exist, this is ignored entirely. This ensures the
    user can use any existing logging configuration without us interfering.
    You can manually disable this by passing `None` as the `flavor` parameter.

    Parameters
    ----------
    flavor : typing.Optional[builtins.None, builtins.str, typing.Dict[builtins.str, typing.Any]]
        The hint for configuring logging.

        This can be `builtins.None` to not enable logging automatically.

        If you pass a `builtins.str` or a `builtins.int`, it is interpreted as
        the global logging level to use, and should match one of `"DEBUG"`,
        `"INFO"`, `"WARNING"`, `"ERROR"` or `"CRITICAL"`, if `builtins.str`.
        The configuration will be set up to use a `colorlog` coloured logger,
        and to use a sane logging format strategy. The output will be written
        to `sys.stderr` using this configuration.

        If you pass a `builtins.dict`, it is treated as the mapping to pass to
        `logging.config.dictConfig`.
    allow_color : builtins.bool
        If `builtins.False`, no colour is allowed. If `builtins.True`, the
        output device must be supported for this to return `builtins.True`.
    force_color : builtins.bool
        If `builtins.True`, return `builtins.True` always, otherwise only
        return `builtins.True` if the device supports colour output and the
        `allow_color` flag is not `builtins.False`.
    """
    # One observation that has been repeatedly made from seeing beginners writing
    # bots in Python is that most people seem to have no idea what logging is or
    # why it is beneficial to use it. This results in them spending large amounts
    # of time scratching their head wondering why something is not working, staring
    # at a blank screen. If they had enabled logging, they would have immediately
    # known where the issue was. This usually ends up with support servers on Discord
    # being spammed with the same basic questions again and again and again...
    #
    # As part of Hikari's set of opinionated defaults, we turn logging on with
    # a desirable format that is coloured in an effort to draw the user's attention
    # to it, rather than encouraging them to ignore it.

    if len(logging.root.handlers) != 0 or flavor is None:
        # Skip, the user is using something else to configure their logging.
        return

    if isinstance(flavor, dict):
        logging.config.dictConfig(flavor)
        return

    # Apparently this makes logging even more efficient!
    logging.logThreads = False
    logging.logProcesses = False
    if supports_color(allow_color, force_color):
        colorlog.basicConfig(
            level=flavor,
            format=
            "%(log_color)s%(bold)s%(levelname)-1.1s%(thin)s %(asctime)23.23s %(bold)s%(name)s: "
            "%(thin)s%(message)s%(reset)s",
            stream=sys.stderr,
        )
    else:
        logging.basicConfig(
            level=flavor,
            format="%(levelname)-1.1s %(asctime)23.23s %(name)s: %(message)s",
            stream=sys.stderr,
        )

    # DeprecationWarning is disabled by default, but it's useful to have enabled
    warnings.simplefilter("always", DeprecationWarning)
    logging.captureWarnings(True)
Beispiel #21
0
import pickle
import colorlog, logging
logging.disable(logging.DEBUG)
colorlog.basicConfig(
    filename=None,
    level=logging.NOTSET,
    format="%(log_color)s[%(levelname)s:%(asctime)s]%(reset)s %(message)s",
    datefmt="%Y-%m-%d %H:%M:%S")
from NSC import NSC
from CustomDatset import TrainDataset, EvalDataset
from torch.utils.data import Dataset, DataLoader


class EngineState():
    def __init__(self):
        super(EngineState, self).__init__()
        pass


class Engine():
    def __init__(self, epoch, iteration):
        self.state = EngineState()
        colorlog.info("[Engine Initialized] Epoch {} Iteration {}".format(
            epoch, iteration))
        self.state.epoch = epoch
        self.state.iteration = iteration


class Instructor:
    def __init__(self, args):
        self.args = args
Beispiel #22
0
#!/usr/bin/env python3

import colorlog
import logging
log = colorlog.getLogger(__name__)
colorlog.basicConfig(level=logging.INFO)

import argparse
from contextlib import contextmanager
import glob
import os
import pprint
import sys

# Or write fallback os.walk code for glob ** usage
assert sys.version_info >= (3,5)

import sh
logging.getLogger('sh').setLevel(logging.WARN)

parser = argparse.ArgumentParser()
parser.add_argument('-X', '--stop', action='store_true',
		help='Stop on first error')
args = parser.parse_args()

# n.b. pushd is coming to sh in v1.10
@contextmanager
def pushd(path):
	cwd = os.getcwd()
	os.chdir(path)
	yield
Beispiel #23
0
def main():
    """
    Main function, called when vipermonkey is run from the command line
    """

    # Increase recursion stack depth.
    sys.setrecursionlimit(13000)

    # print banner with version
    # Generated with http://www.patorjk.com/software/taag/#p=display&f=Slant&t=ViperMonkey
    print(''' _    ___                 __  ___            __             
| |  / (_)___  ___  _____/  |/  /___  ____  / /_____  __  __
| | / / / __ \/ _ \/ ___/ /|_/ / __ \/ __ \/ //_/ _ \/ / / /
| |/ / / /_/ /  __/ /  / /  / / /_/ / / / / ,< /  __/ /_/ / 
|___/_/ .___/\___/_/  /_/  /_/\____/_/ /_/_/|_|\___/\__, /  
     /_/                                           /____/   ''')
    print('vmonkey %s - https://github.com/decalage2/ViperMonkey' %
          __version__)
    print('THIS IS WORK IN PROGRESS - Check updates regularly!')
    print(
        'Please report any issue at https://github.com/decalage2/ViperMonkey/issues'
    )
    print('')

    DEFAULT_LOG_LEVEL = "info"  # Default log level
    LOG_LEVELS = {
        'debug': logging.DEBUG,
        'info': logging.INFO,
        'warning': logging.WARNING,
        'error': logging.ERROR,
        'critical': logging.CRITICAL
    }

    usage = 'usage: %prog [options] <filename> [filename2 ...]'
    parser = optparse.OptionParser(usage=usage)
    # parser.add_option('-o', '--outfile', dest='outfile',
    #     help='output file')
    # parser.add_option('-c', '--csv', dest='csv',
    #     help='export results to a CSV file')
    parser.add_option("-r",
                      action="store_true",
                      dest="recursive",
                      help='find files recursively in subdirectories.')
    parser.add_option(
        "-z",
        "--zip",
        dest='zip_password',
        type='str',
        default=None,
        help=
        'if the file is a zip archive, open first file from it, using the provided password (requires Python 2.6+)'
    )
    parser.add_option(
        "-f",
        "--zipfname",
        dest='zip_fname',
        type='str',
        default='*',
        help=
        'if the file is a zip archive, file(s) to be opened within the zip. Wildcards * and ? are supported. (default:*)'
    )
    parser.add_option(
        "-e",
        action="store_true",
        dest="scan_expressions",
        help='Extract and evaluate/deobfuscate constant expressions')
    parser.add_option(
        '-l',
        '--loglevel',
        dest="loglevel",
        action="store",
        default=DEFAULT_LOG_LEVEL,
        help=
        "logging level debug/info/warning/error/critical (default=%default)")
    parser.add_option("-a",
                      action="store_true",
                      dest="altparser",
                      help='Use the alternate line parser (experimental)')
    parser.add_option(
        "-s",
        '--strip',
        action="store_true",
        dest="strip_useless_code",
        help='Strip useless VB code from macros prior to parsing.')
    parser.add_option(
        '-i',
        '--init',
        dest="entry_points",
        action="store",
        default=None,
        help=
        "Emulate starting at the given function name(s). Use comma seperated list for multiple entries."
    )
    parser.add_option('-t',
                      '--time-limit',
                      dest="time_limit",
                      action="store",
                      default=None,
                      type='int',
                      help="Time limit (in minutes) for emulation.")

    (options, args) = parser.parse_args()

    # Print help if no arguments are passed
    if len(args) == 0:
        print __doc__
        parser.print_help()
        sys.exit()

    # setup logging to the console
    # logging.basicConfig(level=LOG_LEVELS[options.loglevel], format='%(levelname)-8s %(message)s')
    colorlog.basicConfig(level=LOG_LEVELS[options.loglevel],
                         format='%(log_color)s%(levelname)-8s %(message)s')

    for container, filename, data in xglob.iter_files(
            args,
            recursive=options.recursive,
            zip_password=options.zip_password,
            zip_fname=options.zip_fname):
        # ignore directory names stored in zip files:
        if container and filename.endswith('/'):
            continue
        if options.scan_expressions:
            process_file_scanexpr(container, filename, data)
        else:
            entry_points = None
            if (options.entry_points is not None):
                entry_points = options.entry_points.split(",")
            process_file(container,
                         filename,
                         data,
                         altparser=options.altparser,
                         strip_useless=options.strip_useless_code,
                         entry_points=entry_points,
                         time_limit=options.time_limit)
Beispiel #24
0
    parser.add_argument("--namespace",
                        "-n",
                        help="The namespace name you want to interact with")
    parser.add_argument("-i",
                        "--init",
                        action="store_true",
                        help="Force initialization logic")
    parser.add_argument(
        "--dev",
        action="store_true",
        help="Mount host's aladdin directory onto aladdin container")
    parser.add_argument(
        "--image",
        help="Use the specified aladdin image (if building it yourself)")
    parser.add_argument(
        "--skip-prompts",
        action="store_true",
        help="Skip confirmation prompts during command execution",
    )
    parser.add_argument("--non-terminal",
                        action="store_true",
                        help="Run aladdin container without tty")

    # Initialize logging across python
    colorlog.basicConfig(format="%(log_color)s%(levelname)s:%(message)s",
                         level=logging.INFO)
    logging.getLogger("botocore").setLevel(logging.WARNING)

    args = parser.parse_args()
    args.func(args)
Beispiel #25
0
import logging, time, os, fileinput
from jinja2 import Template
from config import *
import colorlog

colorlog.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s %(levelname)s %(message)s',
                    datefmt='%H:%M:%S')
lenetcd = len(etcd)
lst = []
for m in range(lenetcd):
    lst.append(("etcd-{1}=https://{0}:2380".format(etcd[m], m)))


def etcd_init():
    os.system('rm -rf tmp/etcd*')
    for x in range(lenetcd):
        os.mkdir('tmp/etcd-{0}'.format(x))
        path = 'tmp/etcd-{0}'.format(x)
        f = open(str(path) + '/kubeadmcfg.yml', "w")
        template = Template(""" 
    apiVersion: kubeadm.k8s.io/v1beta2
    kind: ClusterConfiguration
    kubernetesVersion: 1.17.3
    etcd:
        local:
            serverCertSANs:
            - "{{etcd[x]}}"
            peerCertSANs:
            - "{{etcd[x]}}"
import sys
import xmltodict
import subprocess
import logging
import colorlog
'''
set log this code is Useless
log.debug  is white ,info is green ,warn is yellow ,error is red ,critical  red!
'''
LOG_LEVEL = logging.NOTSET
LOGFORMAT = "[%(log_color)s%(levelname)s] [%(log_color)s%(asctime)s] %(log_color)s%(filename)s [line:%(log_color)s%(lineno)d] : %(log_color)s%(message)s%(reset)s"
logging.root.setLevel(LOG_LEVEL)
colorlog.basicConfig(
    format=
    '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
    filename='myapp.log',
    filemode='w',
    datefmt='%a, %d %b %Y %H:%M:%S',
)
formatter = colorlog.ColoredFormatter(LOGFORMAT)
stream = logging.StreamHandler()
stream.setLevel(LOG_LEVEL)
stream.setFormatter(formatter)
log = logging.getLogger()
log.setLevel(LOG_LEVEL)
log.addHandler(stream)

default_encodeing = 'utf-8'
scan_dict = {}

ipdata = sys.argv[1]