Ejemplo n.º 1
0
 def setUp(self):
     """
     Create a temporary working directory and a virtual environment where
     pip-accel can be tested in isolation (starting with an empty download
     cache, source index and binary index and no installed modules) and make
     sure pip and pip-accel use the directory. Also creates the directories
     for the download cache, the source index and the binary index (normally
     this is done from pip_accel.main).
     """
     coloredlogs.install(level=logging.DEBUG)
     # Create a temporary working directory.
     self.working_directory = tempfile.mkdtemp()
     self.download_cache = os.path.join(self.working_directory, 'download-cache')
     # Create a temporary build directory.
     self.build_directory = os.path.join(self.working_directory, 'build')
     # Create a temporary virtual environment.
     self.virtual_environment = os.path.join(self.working_directory, 'environment')
     python = 'python%i.%i' % (sys.version_info[0], sys.version_info[1])
     assert os.system('virtualenv --python=%s %s' % (pipes.quote(python), pipes.quote(self.virtual_environment))) == 0
     # Make sure pip-accel uses the pip in the temporary virtual environment.
     os.environ['PATH'] = '%s:%s' % (os.path.join(self.virtual_environment, 'bin'), os.environ['PATH'])
     os.environ['VIRTUAL_ENV'] = self.virtual_environment
     # Make pip and pip-accel use the temporary working directory.
     os.environ['PIP_DOWNLOAD_CACHE'] = self.download_cache
     os.environ['PIP_ACCEL_CACHE'] = self.working_directory
Ejemplo n.º 2
0
def _configure_logging():
    """
    Configures python logging to using KWIVER / SPROKIT environment variables

    SeeAlso:
        kwiver/vital/logger: logic for the vital logger
    """
    # Use the C++ logging level by default, but allow python to be different
    cxx_level = os.environ.get('KWIVER_DEFAULT_LOG_LEVEL', 'DEBUG')
    py_level = os.environ.get('KWIVER_PYTHON_DEFAULT_LOG_LEVEL', cxx_level)

    # Option to colorize the python logs (must pip install coloredlogs)
    truthy_values = {'true', 'on', 'yes', '1'}
    use_color_env = os.environ.get('KWIVER_PYTHON_COLOREDLOGS', 'false')

    # Default options
    use_color = use_color_env.strip().lower() in truthy_values
    level = getattr(logging, py_level.upper())
    # Match KWIVER's log prefix: date time level file(lineno)
    logfmt = '%(asctime)s.%(msecs)03d %(levelname)s %(name)s(%(lineno)d): %(message)s'
    datefmt = '%Y-%m-%d %H:%M:%S'

    # Maybe use file based configs in the future?

    if use_color:
        import coloredlogs
        # The colorscheme can be controlled by several environment variables
        # https://coloredlogs.readthedocs.io/en/latest/#environment-variables
        coloredlogs.install(level=level, fmt=logfmt, datefmt=datefmt)
    else:
        logging.basicConfig(format=logfmt, level=level, datefmt=datefmt)
Ejemplo n.º 3
0
def main():
    coloredlogs.install(level=logging.INFO, show_hostname=False)

    publisher = zmq_context.socket(zmq.PUB)
    publisher.bind('tcp://0.0.0.0:8888')

    poller = zmq.Poller()
    poller.register(publisher, zmq.POLLOUT)
    logger.info("listening on tcp://0.0.0.0:8888")

    MAX = 200
    index = 0
    while True:
        if index > MAX:
            url = 'stop'
        else:
            url = generate_name()
            index += 1

        try:
            socks = dict(poller.poll(0.3))

            if publisher in socks and socks[publisher] == zmq.POLLOUT:
                logger.info("sending data %s", url)
                publisher.send(b" ".join([b'status', bytes(url)]))
                if url == 'stop':
                    break
                else:
                    time.sleep(.13)
        except KeyboardInterrupt:
            publisher.close()
            zmq_context.term()
            raise SystemExit
Ejemplo n.º 4
0
def main():
    """The command line interface for the ``pip-accel`` program."""
    arguments = sys.argv[1:]
    # If no arguments are given, the help text of pip-accel is printed.
    if not arguments:
        usage()
        sys.exit(0)
    # If no install subcommand is given we pass the command line straight
    # to pip without any changes and exit immediately afterwards.
    if 'install' not in arguments:
        # This will not return.
        os.execvp('pip', ['pip'] + arguments)
    else:
        arguments = [arg for arg in arguments if arg != 'install']
    # Initialize logging output.
    coloredlogs.install()
    # Adjust verbosity based on -v, -q, --verbose, --quiet options.
    for argument in list(arguments):
        if match_option(argument, '-v', '--verbose'):
            coloredlogs.increase_verbosity()
        elif match_option(argument, '-q', '--quiet'):
            coloredlogs.decrease_verbosity()
    # Perform the requested action(s).
    try:
        accelerator = PipAccelerator(Config())
        accelerator.install_from_arguments(arguments)
    except Exception:
        logger.exception("Caught unhandled exception!")
        sys.exit(1)
Ejemplo n.º 5
0
def main():
  import argparse
  import coloredlogs

  parser = argparse.ArgumentParser(
      formatter_class=argparse.ArgumentDefaultsHelpFormatter)
  parser.add_argument("--config", "-c", help="file to load configuration from",
                      default="merc.conf")
  parser.add_argument("--verbose", "-v", help="enable verbose (debug) logging",
                      action="store_true", default=False)

  args = parser.parse_args()

  coloredlogs.install(level=logging.DEBUG if args.verbose else logging.INFO)
  logging.getLogger("asyncio").setLevel(logging.WARN)

  try:
    app = Application(args.config)
    app.start()
  except config.ParseError as e:
    logger.fatal('Could not load configuration file, aborting.')
    logger.fatal(e)
  except Exception as e:
    logger.fatal('Could not initialize merc, aborting.')
    logger.fatal(e)
Ejemplo n.º 6
0
def setUpModule():
    """
    Prepare the test suite.

    This function does two things:

    1. Sets up verbose logging to the terminal. When a test fails the logging
       output can help to perform a post-mortem analysis of the failure in
       question (even when its hard to reproduce locally). This is especially
       useful when debugging remote test failures, whether they happened on
       Travis CI or a user's local system.

    2. Creates temporary directories where the pip download cache and the
       pip-accel binary cache are located. Isolating the pip-accel binary cache
       from the user's system is meant to ensure that the tests are as
       independent from the user's system as possible. The function
       :py:func:`tearDownModule` is responsible for cleaning up the temporary
       directory after the test suite finishes.
    """
    # Initialize verbose logging to the terminal.
    coloredlogs.install()
    coloredlogs.increase_verbosity()
    # Create temporary directories to store the pip download cache and
    # pip-accel's binary cache, to make sure these tests run isolated from the
    # rest of the system.
    os.environ['PIP_DOWNLOAD_CACHE'] = create_temporary_directory()
    os.environ['PIP_ACCEL_CACHE'] = create_temporary_directory()
Ejemplo n.º 7
0
 def test_plain_text_output_format(self):
     """Inspect the plain text output of coloredlogs."""
     logger = VerboseLogger(random_string(25))
     stream = StringIO()
     install(level=logging.NOTSET, logger=logger, stream=stream)
     # Test that filtering on severity works.
     logger.setLevel(logging.INFO)
     logger.debug("No one should see this message.")
     assert len(stream.getvalue().strip()) == 0
     # Test that the default output format looks okay in plain text.
     logger.setLevel(logging.NOTSET)
     for method, severity in ((logger.debug, 'DEBUG'),
                              (logger.info, 'INFO'),
                              (logger.verbose, 'VERBOSE'),
                              (logger.warning, 'WARNING'),
                              (logger.error, 'ERROR'),
                              (logger.critical, 'CRITICAL')):
         # Prepare the text.
         text = "This is a message with severity %r." % severity.lower()
         # Log the message with the given severity.
         method(text)
         # Get the line of output generated by the handler.
         output = stream.getvalue()
         lines = output.splitlines()
         last_line = lines[-1]
         assert text in last_line
         assert severity in last_line
         assert PLAIN_TEXT_PATTERN.match(last_line)
Ejemplo n.º 8
0
    def pre_run(self):
        # Hack around http://bugs.python.org/issue9253 ?
        if "--version" in sys.argv:
            import pkg_resources
            version = pkg_resources.get_distribution("elasticluster").version
            print("elasticluster version %s" % version)
            sys.exit(0)

        cli.app.CommandLineApp.pre_run(self)

        # print *all* Python warnings through the logging subsystem
        warnings.resetwarnings()
        warnings.simplefilter('once')
        utils.redirect_warnings(logger='gc3.elasticluster')

        # Set verbosity level
        loglevel = max(1, logging.WARNING - 10 * max(0, self.params.verbose))
        coloredlogs.install(logger=log, level=loglevel)

        # In debug mode, avoid forking
        if self.params.verbose > 3:
            log.DO_NOT_FORK = True
            log.raiseExceptions = True

        if not os.path.isdir(self.params.storage):
            # We do not create *all* the parents, but we do create the
            # directory if we can.
            try:
                os.makedirs(self.params.storage)
            except OSError as ex:
                sys.stderr.write("Unable to create storage directory: "
                                 "%s\n" % (str(ex)))
                sys.exit(1)

        # If no configuration file was specified and default does not exists...
        if not os.path.isfile(self.params.config):
            if self.params.config == self.default_configuration_file:
            # Copy the default configuration file to the user's home
                if not os.path.exists(os.path.dirname(self.params.config)):
                    os.mkdir(os.path.dirname(self.params.config))
                template = resource_filename(
                    'elasticluster', 'share/etc/config.template')
                log.warning("Deploying default configuration file to %s.",
                            self.params.config)
                shutil.copyfile(template, self.params.config)
            else:
                # Exit if supplied configuration file does not exists.
                if not os.path.isfile(self.params.config):
                    sys.stderr.write(
                        "Unable to read configuration file `%s`.\n" %
                        self.params.config)
                    sys.exit(1)

        assert self.params.func, ("No subcommand defined in `ElastiCluster.setup()")
        try:
            self.params.func.pre_run()
        except (RuntimeError, ConfigurationError) as ex:
            sys.stderr.write(str(ex).strip())
            sys.stderr.write('\n')
            sys.exit(1)
Ejemplo n.º 9
0
def create_app() -> Callable[[Any, Any], Any]:
    ''' Create a WSGI compatible App object. '''
    validate_config()
    coloredlogs.install(fmt='[%(asctime)-15s] %(name)s %(message)s')
    logging.getLogger('elasticsearch').disabled = True
    if config.config['debug']:
        logging.getLogger('szurubooru').setLevel(logging.INFO)
    if config.config['show_sql']:
        logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)

    purge_thread = threading.Thread(target=purge_old_uploads)
    purge_thread.daemon = True
    purge_thread.start()

    try:
        posts.populate_reverse_search()
        db.session.commit()
    except errors.ThirdPartyError:
        pass

    rest.errors.handle(errors.AuthError, _on_auth_error)
    rest.errors.handle(errors.ValidationError, _on_validation_error)
    rest.errors.handle(errors.SearchError, _on_search_error)
    rest.errors.handle(errors.IntegrityError, _on_integrity_error)
    rest.errors.handle(errors.NotFoundError, _on_not_found_error)
    rest.errors.handle(errors.ProcessingError, _on_processing_error)
    rest.errors.handle(errors.ThirdPartyError, _on_third_party_error)
    rest.errors.handle(sa.orm.exc.StaleDataError, _on_stale_data_error)

    return rest.application
Ejemplo n.º 10
0
 def setUp(self):
     """Enable logging to the terminal and prepare a temporary package cache."""
     coloredlogs.install()
     coloredlogs.set_level(logging.DEBUG)
     self.db_directory = tempfile.mkdtemp()
     self.load_package_cache()
     os.environ['DPT_FORCE_ENTROPY'] = 'yes'
Ejemplo n.º 11
0
    def __init__(self, workspace=None):
        """
        Initialize the Validator.
        A workspace may be provided for an easy parameter configuration,
        such as location and extension of descriptors, verbosity level, etc.
        :param workspace: SONATA workspace object
        """
        self._workspace = workspace
        self._syntax = True
        self._integrity = True
        self._topology = True

        # create "virtual" workspace if not provided (don't actually create
        # file structure)
        if not self._workspace:
            self._workspace = Workspace('.', log_level='info')

        # load configurations from workspace
        self._dext = self._workspace.default_descriptor_extension
        self._dpath = '.'
        self._log_level = self._workspace.log_level

        # configure logs
        coloredlogs.install(level=self._log_level)

        # descriptors storage
        self._storage = DescriptorStorage()

        # syntax validation
        self._schema_validator = SchemaValidator(self._workspace)

        # wrapper to count number of errors and warnings
        log.error = CountCalls(log.error)
        log.warning = CountCalls(log.warning)
Ejemplo n.º 12
0
def cli(debug):
    log_level = logging.INFO
    requests_cache.install_cache('fr_cache', expire_after=60*60*24*3)  # 3 days
    if debug:
        log_level = logging.DEBUG
        sys.excepthook = lambda t, v, tb: ipdb.post_mortem(tb)
    coloredlogs.install(level=log_level, fmt="%(levelname)s %(message)s")
Ejemplo n.º 13
0
    def set(self, debug=None, info=None, timings=None, dry=None, quiet=None, verbosity=None, colors=None, log=None):
        self.log = log if log is not None else os.getenv(MB_LOG, str(DEFAULT_LOG))
        self.quiet = quiet if quiet is not None else lib.str2bool(os.getenv(MB_QUIET, str(DEFAULT_QUIET)))
        self.debug = debug if debug is not None else lib.str2bool(os.getenv(MB_DEBUG, str(DEFAULT_DEBUG)))
        self.info = info if info is not None else lib.str2bool(os.getenv(MB_INFO, str(DEFAULT_INFO)))
        self.timings = timings if timings is not None else lib.str2bool(os.getenv(MB_TIMINGS, str(DEFAULT_TIMINGS)))
        self.dry = dry if dry is not None else lib.str2bool(os.getenv(MB_DRY, str(DEFAULT_DRY)))
        self.colors = colors if colors is not None else lib.str2bool(os.getenv(MB_COLORS, str(DEFAULT_COLORS)))
        self.verbosity = verbosity if verbosity is not None else os.getenv(MB_VERBOSITY, DEFAULT_VERBOSITY)

        if self.timings or self.info:
            self.verbosity = 'info'
            self.quiet = True
        if self.debug:
            self.verbosity = 'debug'
            self.quiet = True

        self.level = verbosities[self.verbosity]
        logging.basicConfig(level=self.level, format=self.fmt)
        if self.colors:
            coloredlogs.install(level=self.level, fmt=self.fmt)

        if self.log is not None:
            if check_file_writable(self.log):
                fh = logging.FileHandler(self.log)
                fh.setLevel(logging.DEBUG)
                logging.getLogger().addHandler(fh)
            else:
                logger.warning('No permission to write to %s for current user %s', self.log, current_user)
        logger.debug(self)
Ejemplo n.º 14
0
 def configure(self, syntax=None, integrity=None, topology=None,
               dpath=None, dext=None, debug=False):
     """
     Configure parameters for validation. It is recommended to call this
     function before performing a validation.
     :param syntax: specifies whether to validate syntax
     :param integrity: specifies whether to validate integrity
     :param topology: specifies whether to validate network topology
     :param dpath: directory to search for function descriptors (VNFDs)
     :param dext: extension of descriptor files (default: 'yml')
     :param debug: increase verbosity level of logger
     """
     # assign parameters
     if syntax is not None:
         self._syntax = syntax
     if integrity is not None:
         self._integrity = integrity
     if topology is not None:
         self._topology = topology
     if dext is not None:
         self._dext = dext
     if dpath is not None:
         self._dpath = dpath
     if debug:
         coloredlogs.install(level='debug')
Ejemplo n.º 15
0
def query_logger():
    if pytest.config.option.verbose > 0:
        import logging
        import coloredlogs
        coloredlogs.install(fmt='[%(asctime)-15s] %(name)s %(message)s', isatty=True)
        logging.basicConfig()
        logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
Ejemplo n.º 16
0
def main():
    """The command line interface for the ``pip-accel`` program."""
    arguments = sys.argv[1:]
    # If no arguments are given, the help text of pip-accel is printed.
    if not arguments:
        usage()
        sys.exit(0)
    # If no install subcommand is given we pass the command line straight
    # to pip without any changes and exit immediately afterwards.
    if 'install' not in arguments:
        # This will not return.
        os.execvp('pip', ['pip'] + arguments)
    else:
        arguments = [arg for arg in arguments if arg != 'install']
    # Initialize logging output.
    coloredlogs.install()
    # Adjust verbosity based on -v, -q, --verbose, --quiet options.
    for argument in list(arguments):
        if match_option(argument, '-v', '--verbose'):
            coloredlogs.increase_verbosity()
        elif match_option(argument, '-q', '--quiet'):
            coloredlogs.decrease_verbosity()
    # Perform the requested action(s).
    try:
        accelerator = PipAccelerator(Config())
        accelerator.install_from_arguments(arguments)
    except NothingToDoError as e:
        # Don't print a traceback for this (it's not very user friendly) and
        # exit with status zero to stay compatible with pip. For more details
        # please refer to https://github.com/paylogic/pip-accel/issues/47.
        logger.warning("%s", e)
        sys.exit(0)
    except Exception:
        logger.exception("Caught unhandled exception!")
        sys.exit(1)
Ejemplo n.º 17
0
 def test_program_name_filter(self):
     """Make sure :func:`install()` integrates with :class:`~coloredlogs.ProgramNameFilter()`."""
     install(fmt='%(programname)s')
     with CaptureOutput() as capturer:
         logging.info("A truly insignificant message ..")
         output = capturer.get_text()
         assert find_program_name() in output
Ejemplo n.º 18
0
def main():
    description = """This program takes as input a set of OSSOS measurements and adjusts the astrometric and photometric
    entries to be consistent with the current best estimate for the astrometric and photometric calibrations.
    """
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('ast_file', help="An MPC file to update.")
    parser.add_argument('--discovery', help="Only process the discovery images.", action='store_true', default=False)
    parser.add_argument('--result_base_name', help="base name for remeasurement results (defaults to basename of input)",
                        default=None)
    parser.add_argument('--skip-mags', action="store_true", help="Recompute magnitudes.", default=False)
    parser.add_argument('--skip-centroids', action="store_true", help="Recompute centroids.", default=False)
    parser.add_argument('--compare-orbits', action='store_true', help="Compute/Compare pre and post remeasure orbits?", default=False)
    parser.add_argument('--debug', action='store_true')

    args = parser.parse_args()

    level = logging.INFO
    if args.debug:
        level = logging.DEBUG

    import coloredlogs
    logger = logging.getLogger('update_astrom')
    coloredlogs.install(level=level)

    if args.result_base_name is None:
        base_name = os.path.splitext(os.path.basename(args.ast_file))[0]
    else:
        base_name = args.result_base_name
    run(args.ast_file, base_name, 
        skip_discovery=not args.discovery, 
        skip_mags=args.skip_mags, 
        skip_centroids=args.skip_centroids, 
        compare_orbits=args.compare_orbits)
Ejemplo n.º 19
0
def app_main():
    args = docopt.docopt(__doc__, version=__version__)

    debug = args["-d"]

    if debug:
        log_level = logging.DEBUG
    else:
        log_level = [logging.ERROR, logging.INFO, logging.DEBUG][args["-v"]]

    try:
        import coloredlogs

        coloredlogs.install(
            level=log_level, stream=stderr, datefmt=DATEFMT, fmt=LOGFMT
        )
    except ImportError:
        _LOGGER.debug("no colored logs. pip install coloredlogs?")
        logging.basicConfig(
            level=log_level, stream=stderr, datefmt=DATEFMT, format=LOGFMT
        )

    logging.captureWarnings(debug)

    if debug:
        _LOGGER.info("Debug is on")

    try:
        asyncio.run(main(args), debug=debug)  # pylint: disable=no-member
    except KeyboardInterrupt:
        exit()
Ejemplo n.º 20
0
def configure_logging(args):
    level = logging.DEBUG if args['--debug'] else logging.ERROR
    coloredlogs.install(
            level=level,
            show_timestamps=False,
            show_hostname=False
        )
Ejemplo n.º 21
0
def setUpModule():
    """
    Prepare the test suite.

    Sets up logging to the terminal. When a test fails the logging output can
    help to perform a post-mortem analysis of the failure in question (even
    when its hard to reproduce locally). This is especially useful when
    debugging remote test failures, whether they happened on Travis CI or a
    user's local system.

    Also makes sure that the Apache web server is installed and running because
    this is required to run the test suite.
    """
    # Set up logging to the terminal.
    coloredlogs.install(level=logging.DEBUG)
    # Make sure Apache is installed and configured.
    try:
        manager = ApacheManager()
        manager.fetch_status_page(manager.text_status_url)
    except Exception as e:
        logger.exception("Failed to connect to local Apache server!")
        raise Exception(compact("""
            Please make sure the Apache web server is installed and configured
            (running) before you run this test suite because this test suite
            tests the actual integration with Apache (it doesn't use mocking)
            and so requires Apache to be installed, configured and running.

            Swallowed exception: {message} ({type})
        """, message=e, type=type(e)))
Ejemplo n.º 22
0
 def __init__(self):
     # make sure the config file has correct permissions (0600)
     self._check_config_chmod()
     coloredlogs.install(show_hostname=False, show_name=False, show_severity=False)
     logging.info('Reading configuration..')
     self.config = ConfigParser.ConfigParser(allow_no_value=True)
     try:
         self.config.readfp(open(os.path.dirname(os.path.realpath(__file__)) + '/CloneRow.cfg'))
     except IOError:
         logging.error('You have not setup a CloneRow.cfg file for your requirements')
         logging.info('take a look at CloneRow.example.cfg')
         logging.info('https://github.com/lathonez/mysql-clone-row#configuration')
         sys.exit(3)
     self.source = {
         'alias': None,
         'connection': None,
         'db_name': None,
         'row': {}
     }
     self.target = {
         'alias': None,
         'backup': None,
         'connection': None,
         'db_name': None,
         'new_insert': False,
         'row': {}
     }
     self.database = {
         'table': None,
         'column': None,
         'filter': None,
         'ignore_columns': [],
         'deltas': {}
     }
Ejemplo n.º 23
0
Archivo: main.py Proyecto: haum/hms_irc
def main():
    """Entry point of the program."""

    # Logging
    coloredlogs.install(level='INFO')

    # Connect to Rabbit
    rabbit = Client('hms_irc', settings.RABBIT_EXCHANGE,
                    settings.RABBIT_ROUTING_KEYS)

    rabbit.connect(settings.RABBIT_HOST)

    rabbit_thread = Thread(target=rabbit.start_consuming)
    rabbit_thread.setDaemon(True)  # To kill the thread when main is gone

    # IRC bot settings
    bot = MyBot(settings.IRC_CHAN, settings.IRC_NAME, settings.IRC_SERVER)
    bot.rabbit = rabbit

    # Add callbacks
    rabbit.listeners.append(bot.handle_rabbit_msg)

    def chan_joined():
        """Callback that will start the RabbitMQ receive thread."""
        if not rabbit_thread.is_alive():
            get_logger().info('Starting RabbitMQ consume thread...')
            rabbit_thread.start()
        else:
            get_logger().warning("Chan joined but RabbitMQ thread is alive")

    bot.join_callback = chan_joined

    # Create restart thread
    def restart_periodic():
        while True:
            time.sleep(3600)
            bot.reconnect_if_disconnected()

    restart_thread = Thread(target=restart_periodic)
    restart_thread.setDaemon(True)
    restart_thread.start()

    # Start IRC thread that will start Rabbit thread using callback
    try:
        get_logger().info('Starting IRC bot...')
        bot.start()
    except KeyboardInterrupt:
        get_logger().critical("Got a KeyboardInterrupt")
        get_logger().info("Disconnecting from Rabbit")

        # Beautiful RabbiMQ shutdown attempt
        rabbit.stop_consuming()
        rabbit.disconnect()

        # Beautiful IRC shutdown
        get_logger().info("Disconnecting from IRC")
        bot.die(msg="got a KeyboardInterrupt in my face! >_<")

        # Exit and kill daemon thread
        sys.exit(0)
Ejemplo n.º 24
0
 def __init__(self, prog, plugin):
     os.chdir(os.path.dirname(__file__))
     # parse input arguments
     parser = argparse.ArgumentParser(
         prog=__file__,
         description="Task runner for SCAPL plugin {}.".format(plugin),
         formatter_class=argparse.RawTextHelpFormatter,
     )
     parser.add_argument("-c", "--config", dest="config", help="task configuration dictionary")
     parser.add_argument("-p", "--param", dest="param", help="plugin's specific parameters")
     parser.add_argument("-t", "--task", dest="task", help="task identifier for logging purpose")
     parser.add_argument(
         "-v", dest="verbose", action="count", default=0, help="verbose level [default: 0 (critical)]"
     )
     args = parser.parse_args()
     args.config, args.param = eval(args.config), eval(args.param)
     # configure logging and get the root logger
     args.verbose = args.config["LOG_LEVEL_MAPPING"][min(max(args.config["LOG_LEVEL_MAPPING"].keys()), args.verbose)]
     logging.basicConfig(format="%(name)s - %(asctime)s [%(levelname)s] %(message)s", level=args.verbose)
     self.logger = logging.getLogger(args.task)
     if colored_logs_present:
         coloredlogs.install(args.verbose)
     # set arguments as attributes
     for arg in vars(args):
         setattr(self, arg, getattr(args, arg))
Ejemplo n.º 25
0
def main():
    """Command line interface for the ``dwim`` program."""
    from dwim import DEFAULT_PROFILE, dwim
    # Initialize logging to the terminal.
    coloredlogs.install()
    # Define the command line option defaults.
    profile_script = DEFAULT_PROFILE
    # Parse the command line arguments.
    try:
        options, _ = getopt.getopt(sys.argv[1:], 'c:vqh', [
            'config=', 'verbose', 'quiet', 'help',
        ])
        for option, value in options:
            if option in ('-c', '--config'):
                profile_script = value
            elif option in ('-v', '--verbose'):
                coloredlogs.increase_verbosity()
            elif option in ('-q', '--quiet'):
                coloredlogs.decrease_verbosity()
            elif option in ('-h', '--help'):
                usage(__doc__)
                sys.exit(0)
    except Exception as e:
        warning("Error: Failed to parse command line arguments! (%s)", e)
        sys.exit(1)
    # Execute the requested action(s).
    try:
        dwim(profile_script)
    except Exception:
        logger.exception("Caught a fatal exception! Terminating ..")
        sys.exit(1)
Ejemplo n.º 26
0
def setupLogger():
    if settings.logfile is not None:
        fmt = '%(asctime)s [%(levelname)s] %(message)s'
        date_fmt = '%Y-%m-%d %H:%M'

        try:
            consoleFormatter = coloredlogs.ColoredFormatter(fmt, date_fmt)
        except NameError:
            consoleFormatter = logging.Formatter(fmt, date_fmt)
        consoleHandler = logging.StreamHandler()
        consoleHandler.setLevel(settings.loglevel_stderr)
        consoleHandler.setFormatter(consoleFormatter)

        fileFormatter = logging.Formatter(fmt, date_fmt)
        fileHandler = logging.FileHandler(settings.logfile)
        fileHandler.setLevel(settings.loglevel_file)
        fileHandler.setFormatter(fileFormatter)

        logging.getLogger().setLevel(0)
        logging.getLogger().addHandler(fileHandler)
        logging.getLogger().addHandler(consoleHandler)
    else:
        try:
            coloredlogs.install(fmt=fmt, datefmt=date_fmt, level=settings.loglevel_stderr)
        except NameError:
            logging.basicConfig(format=fmt, datefmt=date_fmt, level=settings.loglevel_stderr)
Ejemplo n.º 27
0
def main():
    """Command line interface for the ``negotiator-guest`` program."""
    # Initialize logging to the terminal and system log.
    coloredlogs.install(syslog=True)
    # Parse the command line arguments.
    list_commands = False
    execute_command = None
    start_daemon = False
    timeout = DEFAULT_TIMEOUT
    character_device = None
    try:
        options, arguments = getopt.getopt(sys.argv[1:], 'le:dt:c:vqh', [
            'list-commands', 'execute=', 'daemon', 'timeout=',
            'character-device=', 'verbose', 'quiet', 'help'
        ])
        for option, value in options:
            if option in ('-l', '--list-commands'):
                list_commands = True
            elif option in ('-e', '--execute'):
                execute_command = value
            elif option in ('-d', '--daemon'):
                start_daemon = True
            elif option in ('-t', '--timeout'):
                timeout = int(value)
            elif option in ('-c', '--character-device'):
                character_device = value
            elif option in ('-v', '--verbose'):
                coloredlogs.increase_verbosity()
            elif option in ('-q', '--quiet'):
                coloredlogs.decrease_verbosity()
            elif option in ('-h', '--help'):
                usage(__doc__)
                sys.exit(0)
        if not (list_commands or execute_command or start_daemon):
            usage(__doc__)
            sys.exit(0)
    except Exception:
        warning("Error: Failed to parse command line arguments!")
        sys.exit(1)
    # Start the guest daemon.
    try:
        if not character_device:
            channel_name = HOST_TO_GUEST_CHANNEL_NAME if start_daemon else GUEST_TO_HOST_CHANNEL_NAME
            character_device = find_character_device(channel_name)
        ga = GuestAgent(character_device)
        if start_daemon:
            ga.enter_main_loop()
        elif list_commands:
            with TimeOut(timeout):
                print('\n'.join(ga.call_remote_method('list_commands')))
        elif execute_command:
            with TimeOut(timeout):
                timer = Timer()
                output = ga.call_remote_method('execute', *shlex.split(execute_command), capture=True)
                logger.debug("Took %s to execute remote command.", timer)
                print(output.rstrip())
    except Exception:
        logger.exception("Caught a fatal exception! Terminating ..")
        sys.exit(1)
Ejemplo n.º 28
0
 def test_colorama_enabled(self):
     """Test that colorama is enabled (through mocking)."""
     init_function = MagicMock()
     with mocked_colorama_module(init_function):
         # Configure logging to the terminal.
         coloredlogs.install()
         # Ensure that our mock method was called.
         assert init_function.called
Ejemplo n.º 29
0
def main(log_level):
    """
    Pilot Jenkins from the command line.
    """
    coloredlogs.install(
        fmt='%(levelname)s %(message)s',
        level=log_level
    )
Ejemplo n.º 30
0
def main():
    args = parse_args()

    loglevel = logging.INFO if args.verbose is None else logging.DEBUG
    logging.basicConfig(level=loglevel)
    coloredlogs.install()

    import_new_features(args.node_eui, args.exclude)
Ejemplo n.º 31
0
import math
import base64
import utils
import coloredlogs
import time
import threading
from queue import Queue, Empty as QEmpty
import input_obj
import gzip
import gzipinputstream
from datetime import datetime
from newline_reader import NewlineReader, NewlineIterator


logger = logging.getLogger(__name__)
coloredlogs.install(level=logging.DEBUG)


class EcoRecode(object):
    """
    Processing censys http eco system dataset.
    Recodes dataset to regular snapshots. Generates json, dataset for classification.
    
    Script is designed to run on a big server with huge amount of RAM.
    One-pass, all in ram method.
    
    In case of optimisation is needed:
     - download big cert file to the disk, ungzip
     - do a sort on the fingerprints, disk sort
     - host file, one by one, download, disk sort, find fingerprints in the main big cert file.
    
Ejemplo n.º 32
0
def build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None, param_noise_filter_func=None):
    """Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):

    Parameters
    ----------
    make_obs_ph: str -> tf.placeholder or TfInput
        a function that take a name and creates a placeholder of input with that name
    q_func: (tf.Variable, int, str, bool) -> tf.Variable
        the model that takes the following inputs:
            observation_in: object
                the output of observation placeholder
            num_actions: int
                number of actions
            scope: str
            reuse: bool
                should be passed to outer variable scope
        and returns a tensor of shape (batch_size, num_actions) with values of every action.
    num_actions: int
        number of actions.
    scope: str or VariableScope
        optional scope for variable_scope.
    reuse: bool or None
        whether or not the variables should be reused. To be able to reuse the scope must be given.
    param_noise_filter_func: tf.Variable -> bool
        function that decides whether or not a variable should be perturbed. Only applicable
        if param_noise is True. If set to None, default_param_noise_filter is used by default.

    Returns
    -------
    act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable
        function to select and action given observation.
`       See the top of the file for details.
    """
    logger = logging.getLogger()
    coloredlogs.install(level='DEBUG', fmt='%(asctime)s,%(msecs)03d %(filename)s[%(process)d] %(levelname)s %(message)s')
    logger.setLevel(logging.DEBUG)
    
    if param_noise_filter_func is None:
        param_noise_filter_func = default_param_noise_filter

    with tf.variable_scope(scope, reuse=reuse):
        observations_ph = make_obs_ph("observation")
        stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
        update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
        update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold")
        update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale")
        reset_ph = tf.placeholder(tf.bool, (), name="reset")

        eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
        param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False)
        param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False)

        # Unmodified Q.
        q_values = q_func(observations_ph.get(), num_actions, scope="q_func")

        # Perturbable Q used for the actual rollout.
        q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func")
        # We have to wrap this code into a function due to the way tf.cond() works. See
        # https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for
        # a more detailed discussion.
        def perturb_vars(original_scope, perturbed_scope):
            all_vars = scope_vars(absolute_scope_name(original_scope))
            all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))
            assert len(all_vars) == len(all_perturbed_vars)
            perturb_ops = []
            for var, perturbed_var in zip(all_vars, all_perturbed_vars):
                if param_noise_filter_func(perturbed_var):
                    # Perturb this variable.
                    op = tf.assign(perturbed_var, var + tf.random_normal(shape=tf.shape(var), mean=0., stddev=param_noise_scale))
                else:
                    # Do not perturb, just assign.
                    op = tf.assign(perturbed_var, var)
                perturb_ops.append(op)
            assert len(perturb_ops) == len(all_vars)
            return tf.group(*perturb_ops)

        # Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy
        # of the network and measures the effect of that perturbation in action space. If the perturbation
        # is too big, reduce scale of perturbation, otherwise increase.
        q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func")
        perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func")
        kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1)
        mean_kl = tf.reduce_mean(kl)
        def update_scale():
            with tf.control_dependencies([perturb_for_adaption]):
                update_scale_expr = tf.cond(mean_kl < param_noise_threshold,
                    lambda: param_noise_scale.assign(param_noise_scale * 1.01),
                    lambda: param_noise_scale.assign(param_noise_scale / 1.01),
                )
            return update_scale_expr

        # Functionality to update the threshold for parameter space noise.
        update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0,
            lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold))

        # Put everything together.
        deterministic_actions = tf.argmax(q_values_perturbed, axis=1)
        batch_size = tf.shape(observations_ph.get())[0]
        random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
        chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
        stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)

        output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
        update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
        updates = [
            update_eps_expr,
            tf.cond(reset_ph, lambda: perturb_vars(original_scope="q_func", perturbed_scope="perturbed_q_func"), lambda: tf.group(*[])),
            tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)),
            update_param_noise_threshold_expr,
        ]
        _act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph],
                         outputs=output_actions,
                         givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False},
                         updates=updates)
        def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True, update_eps=-1):
            return _act(ob, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale)
        return act
Ejemplo n.º 33
0
def build_act(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None):
    """Creates the act function:

    Parameters
    ----------
    make_obs_ph: str -> tf.placeholder or TfInput
        a function that take a name and creates a placeholder of input with that name
    q_func: (tf.Variable, int, str, bool) -> tf.Variable
        the model that takes the following inputs:
            observation_in: object
                the output of observation placeholder
            num_actions: int
                number of actions
            scope: str
            reuse: bool
                should be passed to outer variable scope
        and returns a tensor of shape (batch_size, num_actions) with values of every action.
    num_actions: int
        number of actions.
    scope: str or VariableScope
        optional scope for variable_scope.
    reuse: bool or None
        whether or not the variables should be reused. To be able to reuse the scope must be given.

    Returns
    -------
(
    act: (tf.Variable, bool, float) -> tf.Variable
        function to select and action given observation.
`       See the top of the file for details.
    """
    logger = logging.getLogger()
    coloredlogs.install(level='DEBUG', fmt='%(asctime)s,%(msecs)03d %(filename)s[%(process)d] %(levelname)s %(message)s')
    logger.setLevel(logging.DEBUG)
    
    with tf.variable_scope(scope, reuse=reuse):
        observations_ph = make_obs_ph("observation")
        stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
        update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")

        eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))

        q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
        deterministic_actions = tf.argmax(q_values, axis=1)

        batch_size = tf.shape(observations_ph.get())[0]
        random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
        chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
        stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)

        output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
        update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
#        _act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],
#                         outputs=output_actions,
#                         givens={update_eps_ph: -1.0, stochastic_ph: True},
#                         updates=[update_eps_expr])
#        def act(ob, stochastic=True, update_eps=-1):
#            return _act(ob, stochastic, update_eps)
#        return act
        _act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],
                         outputs=(output_actions, q_values),
                         givens={update_eps_ph: -1.0, stochastic_ph: True},
                         updates=[update_eps_expr])
        def act(ob, stochastic=True, update_eps=-1):
            return _act(ob, stochastic, update_eps)
        return act
Ejemplo n.º 34
0
def happi_cli(args):
    args = parser.parse_args(args)

    # Logging Level handling
    if args.verbose:
        shown_logger = logging.getLogger()
        level = "DEBUG"
    else:
        shown_logger = logging.getLogger('happi')
        level = "INFO"
    coloredlogs.install(level=level,
                        logger=shown_logger,
                        fmt='[%(asctime)s] - %(levelname)s -  %(message)s')
    logger.debug("Set logging level of %r to %r", shown_logger.name, level)

    # Version endpoint
    if args.version:
        print(f'Happi: Version {happi.__version__} from {happi.__file__}')
        return
    logger.debug('Command line arguments: %r' % args)

    client = happi.client.Client.from_config(cfg=args.path)
    logger.debug("Happi client: %r" % client)
    logger.debug('Happi command: %r' % args.cmd)

    if args.cmd == 'search':
        logger.debug("We're in the search block")

        # Get search criteria into dictionary for use by client
        client_args = {}
        for user_arg in args.search_criteria:
            if '=' in user_arg:
                criteria, value = user_arg.split('=', 1)
            else:
                criteria = 'name'
                value = user_arg
            if criteria in client_args:
                logger.error(
                    'Received duplicate search criteria %s=%r (was %r)',
                    criteria, value, client_args[criteria])
                return
            if value.replace('.', '').isnumeric():
                logger.debug('Changed %s to float', value)
                value = float(value)
            client_args[criteria] = value

        results = client.search(**client_args)
        if results:
            for res in results:
                res.device.show_info()
            return results
        else:
            logger.error('No devices found')
    elif args.cmd == 'add':
        logger.debug('Starting interactive add')
        registry = happi.containers.registry
        if args.clone:
            clone_source = client.find_device(name=args.clone)
            # Must use the same container if cloning
            response = registry.entry_from_class(clone_source.__class__)
        else:
            # Keep Device at registry for backwards compatibility but filter
            # it out of new devices options
            options = os.linesep.join(
                [k for k, _ in registry.items() if k != "Device"])
            logger.info(
                'Please select a container, or press enter for generic '
                'Ophyd Device container: %s%s', os.linesep, options)
            response = input()
            if response and response not in registry:
                logger.info('Invalid device container f{response}')
                return
            elif not response:
                response = 'OphydItem'

        container = registry[response]
        kwargs = {}
        for info in container.entry_info:
            valid_value = False
            while not valid_value:
                if args.clone:
                    default = getattr(clone_source, info.key)
                else:
                    default = info.default
                logger.info(f'Enter value for {info.key}, default={default}, '
                            f'enforce={info.enforce}')
                item_value = input()
                if not item_value:
                    if info.optional or args.clone:
                        logger.info(f'Selecting default value {default}')
                        item_value = default
                    else:
                        logger.info('Not an optional field!')
                        continue
                try:
                    info.enforce_value(item_value)
                    valid_value = True
                    kwargs[info.key] = item_value
                except Exception:
                    logger.info(f'Invalid value {item_value}')

        device = client.create_device(container, **kwargs)
        logger.info('Please confirm the following info is correct:')
        device.show_info()
        ok = input('y/N\n')
        if 'y' in ok:
            logger.info('Adding device')
            device.save()
        else:
            logger.info('Aborting')
    elif args.cmd == 'edit':
        logger.debug('Starting edit block')
        device = client.find_device(name=args.name)
        for edit in args.edits:
            field, value = edit.split('=', 1)
            logger.info(f'Setting {args.name}.{field} = {value}')
            setattr(device, field, value)
        device.save()
        device.show_info()
    elif args.cmd == 'load':
        logger.debug('Starting load block')
        logger.info(f'Creating shell with devices {args.device_names}')
        devices = {}
        for name in args.device_names:
            devices[name] = client.load_device(name=name)
        start_ipython(argv=['--quick'], user_ns=devices)
Ejemplo n.º 35
0
def build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0,
    double_q=True, scope="deepq", reuse=None, param_noise=False, param_noise_filter_func=None):
    """Creates the train function:

    Parameters
    ----------
    make_obs_ph: str -> tf.placeholder or TfInput
        a function that takes a name and creates a placeholder of input with that name
    q_func: (tf.Variable, int, str, bool) -> tf.Variable
        the model that takes the following inputs:
            observation_in: object
                the output of observation placeholder
            num_actions: int
                number of actions
            scope: str
            reuse: bool
                should be passed to outer variable scope
        and returns a tensor of shape (batch_size, num_actions) with values of every action.
    num_actions: int
        number of actions
    reuse: bool
        whether or not to reuse the graph variables
    optimizer: tf.train.Optimizer
        optimizer to use for the Q-learning objective.
    grad_norm_clipping: float or None
        clip gradient norms to this value. If None no clipping is performed.
    gamma: float
        discount rate.
    double_q: bool
        if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).
        In general it is a good idea to keep it enabled.
    scope: str or VariableScope
        optional scope for variable_scope.
    reuse: bool or None
        whether or not the variables should be reused. To be able to reuse the scope must be given.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    param_noise_filter_func: tf.Variable -> bool
        function that decides whether or not a variable should be perturbed. Only applicable
        if param_noise is True. If set to None, default_param_noise_filter is used by default.

    Returns
    -------
    act: (tf.Variable, bool, float) -> tf.Variable
        function to select and action given observation.
`       See the top of the file for details.
    train: (object, np.array, np.array, object, np.array, np.array) -> np.array
        optimize the error in Bellman's equation.
`       See the top of the file for details.
    update_target: () -> ()
        copy the parameters from optimized Q function to the target Q function.
`       See the top of the file for details.
    debug: {str: function}
        a bunch of functions to print debug data like q_values.
    """
    logger = logging.getLogger()
    coloredlogs.install(level='DEBUG', fmt='%(asctime)s,%(msecs)03d %(filename)s[%(process)d] %(levelname)s %(message)s')
    logger.setLevel(logging.DEBUG)
    
    #logger.info("Inside build train function in build_graph")
    if param_noise:
        #logger.info("Inside build train function in build_graph and there is param noise")
        act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse,
            param_noise_filter_func=param_noise_filter_func)
    else:
        #logger.info("Inside build train function in build_graph and there is NOT param noise")
        act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse)

    with tf.variable_scope(scope, reuse=reuse):
        # set up placeholders
        #logger.info("Inside build train function in build_graph and setting up placeholders")
        obs_t_input = make_obs_ph("obs_t")
        act_t_ph = tf.placeholder(tf.int32, [None], name="action")
        rew_t_ph = tf.placeholder(tf.float32, [None], name="reward")
        obs_tp1_input = make_obs_ph("obs_tp1")
        done_mask_ph = tf.placeholder(tf.float32, [None], name="done")
        importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight")

        # q network evaluation
        #logger.info("Inside build train function in build_graph and performing q eval")
        q_t = q_func(obs_t_input.get(), num_actions, scope="q_func", reuse=True)  # reuse parameters from act
        q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/q_func")

        # target q network evalution
        #logger.info("Inside build train function in build_graph and performing target q eval")
        q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope="target_q_func")
        target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/target_q_func")

        # q scores for actions which we know were selected in the given state.
        #logger.info("Inside build train function in build_graph and collecting q scores")
        q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1)

        # compute estimate of best possible value starting from state at t + 1
        if double_q:
            #logger.info("Inside build train function in build_graph and double q is yes")
            q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope="q_func", reuse=True)
            q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1)
            q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1)
        else:
            #logger.info("Inside build train function in build_graph and double q is no")
            q_tp1_best = tf.reduce_max(q_tp1, 1)
        #logger.info("Inside build train function in build_graph and exited double q check")
        q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best

        # compute RHS of bellman equation
        #logger.info("Inside build train function in build_graph and computing RHS of bellman equation")
        q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked

        # compute the error (potentially clipped)
        #logger.info("Inside build train function in build_graph and computing the error")
        td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
        errors = U.huber_loss(td_error)
        weighted_error = tf.reduce_mean(importance_weights_ph * errors)

        # compute optimization op (potentially with gradient clipping)
        #logger.info("Inside build train function in build_graph and compute optimization op")
        if grad_norm_clipping is not None:
            #logger.info("Inside build train function in build_graph and grad norm clipping is yes")
            gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)
            for i, (grad, var) in enumerate(gradients):
                if grad is not None:
                    gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)
            optimize_expr = optimizer.apply_gradients(gradients)
        else:
            #logger.info("Inside build train function in build_graph and grad norm clipping is no")
            optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)

        #logger.info("Inside build train function in build_graph and exited grad norm clipping check")
        # update_target_fn will be called periodically to copy Q network to target Q network
        update_target_expr = []
        for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
                                   sorted(target_q_func_vars, key=lambda v: v.name)):
            update_target_expr.append(var_target.assign(var))
        update_target_expr = tf.group(*update_target_expr)

        # Create callable functions
        train = U.function(
            inputs=[
                obs_t_input,
                act_t_ph,
                rew_t_ph,
                obs_tp1_input,
                done_mask_ph,
                importance_weights_ph
            ],
            outputs=td_error,
            updates=[optimize_expr]
        )
        update_target = U.function([], [], updates=[update_target_expr])

        q_values = U.function([obs_t_input], q_t)
    
        #logger.info("Inside build train function in build_graph and about to return")
        return act_f, train, update_target, {'q_values': q_values}
Ejemplo n.º 36
0
def log_config():
    coloredlogs.install()
    logging.basicConfig()
    LOGGER.setLevel(logging.DEBUG)
Ejemplo n.º 37
0
def main():
    parser = argparse.ArgumentParser(
        description="Security analysis of Ethereum smart contracts")
    parser.add_argument("solidity_file", nargs="*")

    commands = parser.add_argument_group("commands")
    commands.add_argument("-g",
                          "--graph",
                          help="generate a control flow graph")
    commands.add_argument(
        "-V",
        "--version",
        action="store_true",
        help="print the Mythril version number and exit",
    )
    commands.add_argument(
        "-x",
        "--fire-lasers",
        action="store_true",
        help="detect vulnerabilities, use with -c, -a or solidity file(s)",
    )
    commands.add_argument(
        "-t",
        "--truffle",
        action="store_true",
        help="analyze a truffle project (run from project dir)",
    )
    commands.add_argument("-d",
                          "--disassemble",
                          action="store_true",
                          help="print disassembly")
    commands.add_argument(
        "-j",
        "--statespace-json",
        help="dumps the statespace json",
        metavar="OUTPUT_FILE",
    )

    inputs = parser.add_argument_group("input arguments")
    inputs.add_argument(
        "-c",
        "--code",
        help='hex-encoded bytecode string ("6060604052...")',
        metavar="BYTECODE",
    )
    inputs.add_argument(
        "-f",
        "--codefile",
        help="file containing hex-encoded bytecode string",
        metavar="BYTECODEFILE",
        type=argparse.FileType("r"),
    )
    inputs.add_argument(
        "-a",
        "--address",
        help="pull contract from the blockchain",
        metavar="CONTRACT_ADDRESS",
    )
    inputs.add_argument(
        "-l",
        "--dynld",
        action="store_true",
        help="auto-load dependencies from the blockchain",
    )

    outputs = parser.add_argument_group("output formats")
    outputs.add_argument(
        "-o",
        "--outform",
        choices=["text", "markdown", "json"],
        default="text",
        help="report output format",
        metavar="<text/markdown/json>",
    )
    outputs.add_argument(
        "--verbose-report",
        action="store_true",
        help="Include debugging information in report",
    )

    database = parser.add_argument_group("local contracts database")
    database.add_argument("-s",
                          "--search",
                          help="search the contract database",
                          metavar="EXPRESSION")
    database.add_argument(
        "--leveldb-dir",
        help="specify leveldb directory for search or direct access operations",
        metavar="LEVELDB_PATH",
    )

    utilities = parser.add_argument_group("utilities")
    utilities.add_argument("--hash",
                           help="calculate function signature hash",
                           metavar="SIGNATURE")
    utilities.add_argument(
        "--storage",
        help="read state variables from storage index, use with -a",
        metavar="INDEX,NUM_SLOTS,[array] / mapping,INDEX,[KEY1, KEY2...]",
    )
    utilities.add_argument(
        "--solv",
        help=
        "specify solidity compiler version. If not present, will try to install it (Experimental)",
        metavar="SOLV",
    )
    utilities.add_argument(
        "--contract-hash-to-address",
        help="returns corresponding address for a contract address hash",
        metavar="SHA3_TO_LOOK_FOR",
    )

    options = parser.add_argument_group("options")
    options.add_argument(
        "-m",
        "--modules",
        help="Comma-separated list of security analysis modules",
        metavar="MODULES",
    )
    options.add_argument(
        "--max-depth",
        type=int,
        default=22,
        help="Maximum recursion depth for symbolic execution",
    )

    options.add_argument(
        "--strategy",
        choices=["dfs", "bfs", "naive-random", "weighted-random"],
        default="dfs",
        help="Symbolic execution strategy",
    )
    options.add_argument(
        "--max-transaction-count",
        type=int,
        default=3,
        help="Maximum number of transactions issued by laser",
    )

    options.add_argument(
        "--execution-timeout",
        type=int,
        default=600,
        help="The amount of seconds to spend on symbolic execution",
    )
    options.add_argument(
        "--create-timeout",
        type=int,
        default=10,
        help="The amount of seconds to spend on "
        "the initial contract creation",
    )
    options.add_argument("--solc-args", help="Extra arguments for solc")
    options.add_argument("--phrack",
                         action="store_true",
                         help="Phrack-style call graph")
    options.add_argument("--enable-physics",
                         action="store_true",
                         help="enable graph physics simulation")
    options.add_argument("-v",
                         type=int,
                         help="log level (0-2)",
                         metavar="LOG_LEVEL")
    options.add_argument(
        "-q",
        "--query-signature",
        action="store_true",
        help="Lookup function signatures through www.4byte.directory",
    )

    rpc = parser.add_argument_group("RPC options")
    rpc.add_argument("-i",
                     action="store_true",
                     help="Preset: Infura Node service (Mainnet)")
    rpc.add_argument(
        "--rpc",
        help="custom RPC settings",
        metavar="HOST:PORT / ganache / infura-[network_name]",
    )
    rpc.add_argument("--rpctls",
                     type=bool,
                     default=False,
                     help="RPC connection over TLS")

    # Get config values

    args = parser.parse_args()

    if args.version:
        if args.outform == "json":
            print(json.dumps({"version_str": VERSION}))
        else:
            print("Mythril version {}".format(VERSION))
        sys.exit()

    # Parse cmdline args

    if not (args.search or args.hash or args.disassemble or args.graph
            or args.fire_lasers or args.storage or args.truffle
            or args.statespace_json or args.contract_hash_to_address):
        parser.print_help()
        sys.exit()

    if args.v:
        if 0 <= args.v < 3:
            coloredlogs.install(
                fmt="%(name)s[%(process)d] %(levelname)s %(message)s",
                level=[logging.NOTSET, logging.INFO, logging.DEBUG][args.v],
            )
        else:
            exit_with_error(
                args.outform,
                "Invalid -v value, you can find valid values in usage")

    if args.query_signature:
        if sigs.ethereum_input_decoder == None:
            exit_with_error(
                args.outform,
                "The --query-signature function requires the python package ethereum-input-decoder",
            )

    # -- commands --
    if args.hash:
        print(Mythril.hash_for_function_signature(args.hash))
        sys.exit()

    try:
        # the mythril object should be our main interface
        # infura = None, rpc = None, rpctls = None
        # solc_args = None, dynld = None, max_recursion_depth = 12):

        mythril = Mythril(
            solv=args.solv,
            dynld=args.dynld,
            solc_args=args.solc_args,
            enable_online_lookup=args.query_signature,
        )
        if args.dynld and not (args.rpc or args.i):
            mythril.set_api_from_config_path()

        if args.address:
            # Establish RPC connection if necessary
            if args.i:
                mythril.set_api_rpc_infura()
            elif args.rpc:
                mythril.set_api_rpc(rpc=args.rpc, rpctls=args.rpctls)
            elif not args.dynld:
                mythril.set_api_rpc_localhost()
        elif args.search or args.contract_hash_to_address:
            # Open LevelDB if necessary
            mythril.set_api_leveldb(mythril.leveldb_dir if not args.leveldb_dir
                                    else args.leveldb_dir)

        if args.search:
            # Database search ops
            mythril.search_db(args.search)
            sys.exit()

        if args.contract_hash_to_address:
            # search corresponding address
            try:
                mythril.contract_hash_to_address(args.contract_hash_to_address)
            except AddressNotFoundError:
                print("Address not found.")

            sys.exit()

        if args.truffle:
            try:
                # not really pythonic atm. needs refactoring
                mythril.analyze_truffle_project(args)
            except FileNotFoundError:
                print(
                    "Build directory not found. Make sure that you start the analysis from the project root, and that 'truffle compile' has executed successfully."
                )
            sys.exit()

        # Load / compile input contracts
        address = None

        if args.code:
            # Load from bytecode
            address, _ = mythril.load_from_bytecode(args.code)
        elif args.codefile:
            bytecode = "".join(
                [l.strip() for l in args.codefile if len(l.strip()) > 0])
            address, _ = mythril.load_from_bytecode(bytecode)
        elif args.address:
            # Get bytecode from a contract address
            address, _ = mythril.load_from_address(args.address)
        elif args.solidity_file:
            # Compile Solidity source file(s)
            if args.graph and len(args.solidity_file) > 1:
                exit_with_error(
                    args.outform,
                    "Cannot generate call graphs from multiple input files. Please do it one at a time.",
                )
            address, _ = mythril.load_from_solidity(
                args.solidity_file)  # list of files
        else:
            exit_with_error(
                args.outform,
                "No input bytecode. Please provide EVM code via -c BYTECODE, -a ADDRESS, or -i SOLIDITY_FILES",
            )

        # Commands

        if args.storage:
            if not args.address:
                exit_with_error(
                    args.outform,
                    "To read storage, provide the address of a deployed contract with the -a option.",
                )

            storage = mythril.get_state_variable_from_storage(
                address=address,
                params=[a.strip() for a in args.storage.strip().split(",")],
            )
            print(storage)

        elif args.disassemble:
            easm_text = mythril.contracts[0].get_easm(
            )  # or mythril.disassemble(mythril.contracts[0])
            sys.stdout.write(easm_text)

        elif args.graph or args.fire_lasers:
            if not mythril.contracts:
                exit_with_error(
                    args.outform,
                    "input files do not contain any valid contracts")

            if args.graph:
                html = mythril.graph_html(
                    strategy=args.strategy,
                    contract=mythril.contracts[0],
                    address=address,
                    enable_physics=args.enable_physics,
                    phrackify=args.phrack,
                    max_depth=args.max_depth,
                    execution_timeout=args.execution_timeout,
                    create_timeout=args.create_timeout,
                )

                try:
                    with open(args.graph, "w") as f:
                        f.write(html)
                except Exception as e:
                    exit_with_error(args.outform,
                                    "Error saving graph: " + str(e))

            else:
                report = mythril.fire_lasers(
                    strategy=args.strategy,
                    address=address,
                    modules=[
                        m.strip() for m in args.modules.strip().split(",")
                    ] if args.modules else [],
                    verbose_report=args.verbose_report,
                    max_depth=args.max_depth,
                    execution_timeout=args.execution_timeout,
                    create_timeout=args.create_timeout,
                    max_transaction_count=args.max_transaction_count,
                )
                outputs = {
                    "json": report.as_json(),
                    "text": report.as_text(),
                    "markdown": report.as_markdown(),
                }
                print(outputs[args.outform])

        elif args.statespace_json:

            if not mythril.contracts:
                exit_with_error(
                    args.outform,
                    "input files do not contain any valid contracts")

            statespace = mythril.dump_statespace(
                strategy=args.strategy,
                contract=mythril.contracts[0],
                address=address,
                max_depth=args.max_depth,
                execution_timeout=args.execution_timeout,
                create_timeout=args.create_timeout,
            )

            try:
                with open(args.statespace_json, "w") as f:
                    json.dump(statespace, f)
            except Exception as e:
                exit_with_error(args.outform, "Error saving json: " + str(e))

        else:
            parser.print_help()

    except CriticalError as ce:
        exit_with_error(args.outform, str(ce))
from app import app_controller
import filestore
from magic_defines import *
import sd_material_ui
from sd_material_ui import Snackbar
import coloredlogs, logging
from sd_material_ui import Snackbar
from magic_defines import *
from utils import *
from autolink import Redirect
from localstorage_writer import LocalStorageWriter
from localstorage_reader import LocalStorageReader

import coloredlogs, logging
logger = logging.getLogger(__name__)
coloredlogs.install(level='DEBUG', logger=logger)

import gettext
zh = gettext.translation("user_profile", locale_d(), languages=["zh_CN"])
zh.install(True)
_ = zh.gettext


def gen_id(name):
    # user module as name prefix
    s_id = g_id(__name__, name)
    return s_id


def generate_field_user_id(user):
    field_row = html.Div(
Ejemplo n.º 39
0
def main():
    parser = argparse.ArgumentParser(
        prog='nrfsec',
        description=
        'perform security related tasks on nRF51 targets through SWD interface',
        usage='nrfsec <command> [<args>]')
    parser.add_argument(
        '-f',
        '--frequency',
        help='frequency to run the SWD interface (default 4 MHz)',
        type=int,
        default=4000000)

    parser.add_argument(
        '-v',
        '--verbose',
        action='store_true',
        help='increase output verbosity',
    )

    subparsers = parser.add_subparsers(title='supported subcommands',
                                       dest='command')

    # parser - info
    parser_info = subparsers.add_parser('info',
                                        help='display chip information')
    parser_info.add_argument(
        '-d',
        '--delay',
        type=int,
        help=
        'delay after reset before reading info (to initialize RAM and peripherals)'
    )
    parser_info.add_argument(
        '-m',
        '--maxsearch',
        help=
        'Maximum space to search for gadgets, starting at the reset vector',
        default='0x1000')

    # parser - erase
    parser_erase = subparsers.add_parser('erase',
                                         help='perform a complete erase')
    parser_erase.add_argument(
        '-m',
        '--maxsearch',
        help=
        'Maximum space to search for gadgets, starting at the reset vector',
        default='0x1000')

    # parser - restore
    parser_restore = subparsers.add_parser(
        'restore', help='restore specific images to an unlocked chip')
    parser_restore.add_argument('-u',
                                '--uicr',
                                help='UICR image file to restore',
                                default=os.getcwd() +
                                '/fw/0x10001000_0x10001400_UICR.bin')
    parser_restore.add_argument('-r',
                                '--rom',
                                help='ROM image file to restore',
                                default=os.getcwd() +
                                '/fw/0x00000000_0x00040000_ROM.bin')
    parser_restore.add_argument('-s',
                                '--skipverify',
                                action='store_true',
                                help='skip image verification steps')

    # parser - read
    parser_read = subparsers.add_parser('read',
                                        help='read memory contents to outfile')
    parser_read.add_argument('-o',
                             '--outfile',
                             help='File to store memory contents',
                             default='dump.bin')
    parser_read.add_argument(
        '-m',
        '--maxsearch',
        help=
        'Maximum space to search for gadgets, starting at the reset vector',
        default='0x1000')
    parser_read.add_argument('--skipverify',
                             action='store_true',
                             help='skip image verification steps')
    parser_read.add_argument(
        '-d',
        '--delay',
        type=int,
        help='Delay time after boot before reading memory')
    address_group = parser_read.add_mutually_exclusive_group()
    address_group.add_argument('-s',
                               '--start',
                               help='address to begin reading memory')
    parser_read.add_argument('-e',
                             '--end',
                             help='address to stop reading memory')
    address_group.add_argument(
        '-a',
        '--all',
        action='store_true',
        help=
        'Dump all memory as discovered in FICR register starting @ 0x00000000')

    # parser - unlock
    parser_unlock = subparsers.add_parser('unlock',
                                          help='unlock the device if locked')
    parser_unlock.add_argument(
        '-m',
        '--maxsearch',
        help=
        'Maximum space to search for gadgets, starting at the reset vector',
        default='0x1000')
    parser_unlock.add_argument(
        '-d',
        '--directory',
        help='Directory to store the recovered firmware',
        default=os.getcwd() + '/fw')
    parser_unlock.add_argument('--skipverify',
                               action='store_true',
                               help='skip image verification steps')
    # parser - lock
    subparsers.add_parser('lock', help='lock the device if unlocked')

    args = parser.parse_args()

    # logging setup
    if args.verbose:
        coloredlogs.install(level='DEBUG',
                            datefmt='%H:%M:%S',
                            fmt='%(asctime)s: %(levelname)s - %(message)s')
    else:
        coloredlogs.install(level='INFO',
                            datefmt='%H:%M:%S',
                            fmt='%(asctime)s: %(levelname)s - %(message)s')

    sub_commands = ['info', 'erase', 'restore', 'read', 'unlock', 'lock']
    if args.command not in sub_commands:
        logger.info('please specify a subcommand: {}'.format(sub_commands))
        exit()

    # connect to ST-Link, required for all commands
    logger.info('Connecting to debug interface @ {}Hz'.format(args.frequency))
    dev = dbg.DeviceInterface(args.frequency)

    # commands unaffected by lock state
    if args.command == 'erase':
        if dev.EraseAll():
            logger.info('target erased')
            exit()
        else:
            logger.info('unable to erase target')

    if dev.isLocked():
        logger.debug('target memory is locked')
        if args.command == 'lock':
            logger.info('target is already locked')
            exit()

        if args.command == 'restore':
            logger.info('target is locked, perform unlock before restoring')
            exit()

        r_gadget = dev.SearchReadGadget(int(args.maxsearch, 0))

        if r_gadget:
            if args.command == 'info':
                if args.delay:
                    dev.Delay(args.delay)
                dev.Dumpinfo(r_gadget)
                utils.printChipInfo(dev.chip_info)
                if args.verbose:
                    print('Factory Information Configuration Registers (FICR)')
                    utils.printHexDump(dev.ficr, nordic.NRF_FICR_BASE)
                    print('User Information Configuration Registers (UICR)')
                    utils.printHexDump(dev.uicr, nordic.NRF_UICR_BASE)
                if 0x3FFFFFFF > int(dev.chip_info['ECB -> ECBDATAPTR'],
                                    0) > 0x20000000:
                    utils.printECB(dev.GetECB(r_gadget))
                if 0x20000000 < int(dev.chip_info['CCM -> CNFPTR'],
                                    0) < 0x3FFFFFFF:
                    pass

            if args.command == 'read':
                if args.delay:
                    dev.Delay(args.delay)
                if args.all:
                    dev.Dumpinfo(r_gadget)
                    dev.ReadAll(os.getcwd() + '/fw', args.skipverify, r_gadget)
                elif args.start is not None and args.end is not None:
                    if int(args.start, 0) < int(args.end, 0):
                        dev.DumpToFile(args.outfile, int(args.start, 0),
                                       int(args.end, 0), r_gadget)
                    else:
                        logger.error(
                            'End address cannot begin before start address')
                        exit()
                else:
                    logger.error(
                        'You must supply a start and end address to read')
                    exit()

            if args.command == 'unlock':
                dev.Dumpinfo(r_gadget)
                if dev.VolatileUnlock(r_gadget, args.directory,
                                      args.skipverify):
                    logger.info('Unlocking procedure was successful')
                    dev.cortex_m.reset()
                    exit()
                else:
                    logger.info('Unlocking procedure was unsuccessful')
                    exit()
        else:
            logger.info('no read gadget found, exitting')
            exit()
    # unlocked
    else:
        logger.debug('target memory is unlocked')
        if args.command == 'lock':
            dev.LockTarget()

        if args.command == 'unlock':
            logger.info(
                'target is already unlocked proceed with other commands')
            exit()

        if args.command == 'restore':
            if not os.path.isfile(args.rom):
                logger.error(
                    'ROM file {} not found\n'
                    'specify the -u and -r filename for the UICR and ROM files respectivily'
                    .format(args.rom))
                exit()
            if not os.path.isfile(args.uicr):
                logger.error(
                    'UICR file {} not found:\n'
                    'specify the -u and -r filename for the UICR and ROM files respectivily'
                    .format(args.uicr))
                exit()

            utils.UnlockUICRImage(args.uicr)
            dev.RestoreImage(args.rom, nordic.ROM_START)

            if not args.skipverify:
                if not dev.VerifyImage(args.rom, nordic.ROM_START,
                                       os.path.getsize(args.rom)):
                    logger.info(
                        'failed to verify writing {} to the target'.format(
                            args.rom))
                    exit()
            dev.RestoreImage(args.uicr, nordic.NRF_UICR_BASE)
            if not args.skipverify:
                if not dev.VerifyImage(args.uicr, nordic.NRF_UICR_BASE,
                                       os.path.getsize(args.uicr)):
                    logger.info(
                        'failed to verify writing {} to the target'.format(
                            args.uicr))
                    exit()
            logger.info('Target memory has been restored')
            dev.cortex_m.nodebug()
            exit()

        if args.command == 'info':
            dev.Dumpinfo(args.delay)
            utils.printChipInfo(dev.chip_info)
            if args.verbose:
                print('Factory Information Configuration Registers (FICR)')
                utils.printHexDump(dev.ficr, nordic.NRF_FICR_BASE)
                print('User Information Configuration Registers (UICR)')
                utils.printHexDump(dev.uicr, nordic.NRF_UICR_BASE)
            # these values must point to RAM or they are uninitialized
            if 0x20000000 < int(dev.chip_info['ECB -> ECBDATAPTR'],
                                0) < 0x3FFFFFFF:
                (dev.GetECB())
            if 0x20000000 < int(dev.chip_info['CCM -> CNFPTR'],
                                0) < 0x3FFFFFFF:
                pass

        if args.command == 'read':
            if args.delay:
                dev.Delay(args.delay)
            if args.all:
                dev.Dumpinfo()
                dev.ReadAll(os.getcwd() + '/fw', args.skipverify)
            elif args.start is not None and args.end is not None:
                if int(args.start, 0) < int(args.end, 0):
                    dev.DumpToFile(args.outfile, int(args.start, 0),
                                   int(args.end, 0))
                else:
                    logger.error(
                        'End address cannot begin before start address')
                    exit()
            else:
                logger.error('You must supply a start and end address to read')
                exit()
Ejemplo n.º 40
0
import os
import sys
from etree_patch import ET, patch_et
import logging
level = logging.DEBUG
level = logging.ERROR
try:
  import coloredlogs
  coloredlogs.DEFAULT_LOG_FORMAT = '%(levelname)-8s %(name)-15s %(message)s'
  coloredlogs.install()
  coloredlogs.set_level(level)
except Exception:
  logging.basicConfig(level=level)


gir_nsmap = {'glib': 'http://www.gtk.org/introspection/glib/1.0', 'c': 'http://www.gtk.org/introspection/c/1.0', None: 'http://www.gtk.org/introspection/core/1.0'}
ns_c = "{%s}" % (gir_nsmap['c'],)
ns_gir = "{%s}" % (gir_nsmap[None],)
ns_glib = "{%s}" % (gir_nsmap['glib'],)
patch_et(gir_nsmap)

class NO_DEFAULT (object):
  pass

_squelched = set()
def squelch (f, msg, *args):
  if args: msg = msg % args
  if msg in _squelched: return
  _squelched.add(msg)
  if len(_squelched) > 2000: _squelched.clear()
  f(msg)
Ejemplo n.º 41
0
 def _init_logger(self):
     coloredlogs.install()
     logging.getLogger("requests").setLevel(logging.WARNING)
     logger = logging.getLogger('passme')
     logger.setLevel(logging.INFO)
     self.logger = logger
Ejemplo n.º 42
0
from collections import OrderedDict

if util.find_spec("coloredlogs"):
    import coloredlogs
if util.find_spec("humanize"):
    import humanize

if sys.version_info[0] < 3:
    raise Exception("run_fpga_task script must be using Python 3")

# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# Configure logging system
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
LOG_FORMAT = "%(levelname)5s (%(threadName)15s) - %(message)s"
if util.find_spec("coloredlogs"):
    coloredlogs.install(level='INFO', stream=sys.stdout, fmt=LOG_FORMAT)
else:
    logging.basicConfig(level=logging.INFO,
                        stream=sys.stdout,
                        format=LOG_FORMAT)
logger = logging.getLogger('OpenFPGA_Task_logs')

# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# Read commandline arguments
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
parser = argparse.ArgumentParser()
parser.add_argument('tasks', nargs='+')
parser.add_argument('--maxthreads',
                    type=int,
                    default=2,
                    help="Number of fpga_flow threads to run default = 2," +
Ejemplo n.º 43
0
import csv

# Locals
dirname = os.path.dirname(
    os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
sys.path.append(dirname)
from src.models.methods.metadata import get_parameter_metadata
from src.models.icestupaClass import Icestupa
from src.utils.settings import config
from src.utils import setup_logger

if __name__ == "__main__":
    logger = logging.getLogger(__name__)
    coloredlogs.install(
        fmt="%(funcName)s %(levelname)s %(message)s",
        level=logging.INFO,
        logger=logger,
    )

    # answers = dict(
    #     # location="Schwarzsee 2019",
    #     location="Guttannen 2020",
    #     # location="Gangles 2021",
    # )
    locations = [
        "Guttannen 2021", "Guttannen 2020", "Schwarzsee 2019", "Gangles 2021"
    ]
    filenames = []

    for location in locations:
        # Initialise icestupa object
Ejemplo n.º 44
0
import coloredlogs
import numpy as np
import torch
import torch.nn as nn
import torchtext.vocab
from tqdm.auto import tqdm

import table
import table.Models
import table.modules
from table.Models import CopyGenerator, LayCoAttention, ParserModel, QCoAttention, RNNEncoder, SeqDecoder
from table.modules.Embeddings import PartUpdateEmbedding

logger = logging.getLogger(__name__)
coloredlogs.install(level='DEBUG')


def load_orig_glove(emb_file: str) -> dict:
    def get_coefs(word, *arr):
        return word, np.asarray(arr, dtype='float32')

    if os.path.isfile(emb_file + ".pickle"):
        logger.info(" * load glove from pickle")
        emb = pickle.load(open(emb_file + ".pickle", "rb"))
    else:
        logger.info(" * load glove from txt, dumping to pickle")
        emb = dict(
            get_coefs(*o.split(" ")) for o in open(emb_file, encoding='latin'))
        pickle.dump(emb, open(emb_file + ".pickle", "wb"))
Ejemplo n.º 45
0
    # make condor scratch directory
    scratch = make_condor_scratch_dir(args.traverse_root)

    # make condor file
    condorpath = make_condor_file(
        scratch,
        args.previous_traverse,
        args.traverse_root,
        args.cpus,
        args.memory,
        args.chunk_size,
        args.exclude,
        args.fast_forward,
        args.accounting_group,
        args.path_to_virtualenv,
    )

    # Execute
    if args.dryrun:
        logging.error(
            f"Script Aborted: Condor job not submitted ({condorpath}).")
    else:
        cmd = f"condor_submit {condorpath}"
        logging.info(cmd)
        subprocess.check_call(cmd.split(), cwd=scratch)


if __name__ == "__main__":
    coloredlogs.install(level="DEBUG")
    main()
Ejemplo n.º 46
0
    def initLogging(self):

        self.logger = logging.getLogger(self.__class__.__name__)
        if 'logLevel' in self.params:
            coloredlogs.install(level=self.params['logLevel'])
Ejemplo n.º 47
0
# -*- coding: utf-8 -*-

import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

import random
import logging
import coloredlogs
coloredlogs.install(level=logging.INFO)

import bson
from tqdm import tqdm

from data.utils import encode_dict_list


def products_iter(input_bson_path, product_ids):
    with open(input_bson_path, 'rb') as reader:
        data = bson.decode_file_iter(reader)
        for i, prod in tqdm(enumerate(data), unit='products', total=args.num_products, disable=True):
            prod_id = prod.get('_id')
            if prod_id in product_ids:
                yield prod


def main(args):
    if os.path.exists(args.save_train_bson):
        raise FileExistsError(args.save_train_bson)
    if os.path.exists(args.save_val_bson):
        raise FileExistsError(args.save_val_bson)
Ejemplo n.º 48
0
from botaclan.constants import (
    LOG_LEVEL,
    LOG_ASYNCIO_LEVEL,
    LOG_DISCORD_LEVEL,
    LOG_WEBSOCKETS_LEVEL,
)
from importlib_metadata import version
import coloredlogs
import logging
import sys

__version__ = version(__package__)

root_log = logging.getLogger()
root_log.setLevel(LOG_LEVEL)
coloredlogs.install(logger=root_log, stream=sys.stdout, level=LOG_LEVEL)
logging.Formatter("%(asctime)s | %(name)s | %(levelname)s | %(message)s")
logging.getLogger("asyncio").setLevel(LOG_ASYNCIO_LEVEL)
logging.getLogger("discord").setLevel(LOG_DISCORD_LEVEL)
logging.getLogger("websockets").setLevel(LOG_WEBSOCKETS_LEVEL)
logging.getLogger(__name__)
Ejemplo n.º 49
0
def main():
    coloredlogs.install()
    coloredlogs.install(fmt='%(asctime)s %(message)s')
    logging.info('Price-notifier bot just started')
    report()
Ejemplo n.º 50
0
def main(
    argsl: Optional[List[str]] = None,
    args: Optional[argparse.Namespace] = None,
    job_order_object: Optional[CWLObjectType] = None,
    stdin: IO[Any] = sys.stdin,
    stdout: Optional[Union[TextIO, StreamWriter]] = None,
    stderr: IO[Any] = sys.stderr,
    versionfunc: Callable[[], str] = versionstring,
    logger_handler: Optional[logging.Handler] = None,
    custom_schema_callback: Optional[Callable[[], None]] = None,
    executor: Optional[JobExecutor] = None,
    loadingContext: Optional[LoadingContext] = None,
    runtimeContext: Optional[RuntimeContext] = None,
    input_required: bool = True,
) -> int:
    if not stdout:  # force UTF-8 even if the console is configured differently
        if hasattr(sys.stdout,
                   "encoding") and sys.stdout.encoding.upper() not in (
                       "UTF-8",
                       "UTF8",
                   ):
            if hasattr(sys.stdout, "detach"):
                stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
            else:
                stdout = getwriter("utf-8")(sys.stdout)  # type: ignore
        else:
            stdout = sys.stdout

    _logger.removeHandler(defaultStreamHandler)
    stderr_handler = logger_handler
    if stderr_handler is not None:
        _logger.addHandler(stderr_handler)
    else:
        coloredlogs.install(logger=_logger, stream=stderr)
        stderr_handler = _logger.handlers[-1]
    workflowobj = None
    prov_log_handler = None  # type: Optional[logging.StreamHandler]
    try:
        if args is None:
            if argsl is None:
                argsl = sys.argv[1:]
            addl = []  # type: List[str]
            if "CWLTOOL_OPTIONS" in os.environ:
                addl = os.environ["CWLTOOL_OPTIONS"].split(" ")
            parser = arg_parser()
            argcomplete.autocomplete(parser)
            args = parser.parse_args(addl + argsl)
            if args.record_container_id:
                if not args.cidfile_dir:
                    args.cidfile_dir = os.getcwd()
                del args.record_container_id

        if runtimeContext is None:
            runtimeContext = RuntimeContext(vars(args))
        else:
            runtimeContext = runtimeContext.copy()

        # If caller parsed its own arguments, it may not include every
        # cwltool option, so fill in defaults to avoid crashing when
        # dereferencing them in args.
        for key, val in get_default_args().items():
            if not hasattr(args, key):
                setattr(args, key, val)

        configure_logging(args, stderr_handler, runtimeContext)

        if args.version:
            print(versionfunc())
            return 0
        _logger.info(versionfunc())

        if args.print_supported_versions:
            print("\n".join(supported_cwl_versions(args.enable_dev)))
            return 0

        if not args.workflow:
            if os.path.isfile("CWLFile"):
                args.workflow = "CWLFile"
            else:
                _logger.error(
                    "CWL document required, no input file was provided")
                parser.print_help()
                return 1

        if args.ga4gh_tool_registries:
            ga4gh_tool_registries[:] = args.ga4gh_tool_registries
        if not args.enable_ga4gh_tool_registry:
            del ga4gh_tool_registries[:]

        if args.mpi_config_file is not None:
            runtimeContext.mpi_config = MpiConfig.load(args.mpi_config_file)

        setup_schema(args, custom_schema_callback)

        if args.provenance:
            if argsl is None:
                raise Exception("argsl cannot be None")
            if setup_provenance(args, argsl, runtimeContext) is not None:
                return 1

        loadingContext = setup_loadingContext(loadingContext, runtimeContext,
                                              args)

        uri, tool_file_uri = resolve_tool_uri(
            args.workflow,
            resolver=loadingContext.resolver,
            fetcher_constructor=loadingContext.fetcher_constructor,
        )

        try_again_msg = ("" if args.debug else
                         ", try again with --debug for more information")

        try:
            job_order_object, input_basedir, jobloader = load_job_order(
                args,
                stdin,
                loadingContext.fetcher_constructor,
                loadingContext.overrides_list,
                tool_file_uri,
            )

            if args.overrides:
                loadingContext.overrides_list.extend(
                    load_overrides(file_uri(os.path.abspath(args.overrides)),
                                   tool_file_uri))

            loadingContext, workflowobj, uri = fetch_document(
                uri, loadingContext)

            if args.print_deps and loadingContext.loader:
                printdeps(workflowobj, loadingContext.loader, stdout,
                          args.relative_deps, uri)
                return 0

            loadingContext, uri = resolve_and_validate_document(
                loadingContext,
                workflowobj,
                uri,
                preprocess_only=(args.print_pre or args.pack),
                skip_schemas=args.skip_schemas,
            )

            if loadingContext.loader is None:
                raise Exception("Impossible code path.")
            processobj, metadata = loadingContext.loader.resolve_ref(uri)
            processobj = cast(CommentedMap, processobj)
            if args.pack:
                stdout.write(print_pack(loadingContext, uri))
                return 0

            if args.provenance and runtimeContext.research_obj:
                # Can't really be combined with args.pack at same time
                runtimeContext.research_obj.packed_workflow(
                    print_pack(loadingContext, uri))

            if args.print_pre:
                stdout.write(
                    json_dumps(processobj,
                               indent=4,
                               sort_keys=True,
                               separators=(",", ": ")))
                return 0

            tool = make_tool(uri, loadingContext)
            if args.make_template:
                make_template(tool)
                return 0

            if args.validate:
                print(f"{args.workflow} is valid CWL.")
                return 0

            if args.print_rdf:
                stdout.write(
                    printrdf(tool, loadingContext.loader.ctx,
                             args.rdf_serializer))
                return 0

            if args.print_dot:
                printdot(tool, loadingContext.loader.ctx, stdout)
                return 0

            if args.print_targets:
                for f in ("outputs", "steps", "inputs"):
                    if tool.tool[f]:
                        _logger.info("%s%s targets:", f[0].upper(), f[1:-1])
                        stdout.write("  " + "\n  ".join(
                            [shortname(t["id"]) for t in tool.tool[f]]) + "\n")
                return 0

            if args.target:
                ctool = choose_target(args, tool, loadingContext)
                if ctool is None:
                    return 1
                else:
                    tool = ctool

            elif args.single_step:
                ctool = choose_step(args, tool, loadingContext)
                if ctool is None:
                    return 1
                else:
                    tool = ctool

            if args.print_subgraph:
                if "name" in tool.tool:
                    del tool.tool["name"]
                stdout.write(
                    json_dumps(tool.tool,
                               indent=4,
                               sort_keys=True,
                               separators=(",", ": ")))
                return 0

        except (ValidationException) as exc:
            _logger.error("Tool definition failed validation:\n%s",
                          str(exc),
                          exc_info=args.debug)
            return 1
        except (RuntimeError, WorkflowException) as exc:
            _logger.error(
                "Tool definition failed initialization:\n%s",
                str(exc),
                exc_info=args.debug,
            )
            return 1
        except Exception as exc:
            _logger.error(
                "I'm sorry, I couldn't load this CWL file%s.\nThe error was: %s",
                try_again_msg,
                str(exc) if not args.debug else "",
                exc_info=args.debug,
            )
            return 1

        if isinstance(tool, int):
            return tool

        # If on MacOS platform, TMPDIR must be set to be under one of the
        # shared volumes in Docker for Mac
        # More info: https://dockstore.org/docs/faq
        if sys.platform == "darwin":
            default_mac_path = "/private/tmp/docker_tmp"
            if runtimeContext.tmp_outdir_prefix == DEFAULT_TMP_PREFIX:
                runtimeContext.tmp_outdir_prefix = default_mac_path
            if runtimeContext.tmpdir_prefix == DEFAULT_TMP_PREFIX:
                runtimeContext.tmpdir_prefix = default_mac_path

        if check_working_directories(runtimeContext) is not None:
            return 1

        if args.cachedir:
            if args.move_outputs == "move":
                runtimeContext.move_outputs = "copy"
            runtimeContext.tmp_outdir_prefix = args.cachedir

        runtimeContext.secret_store = getdefault(runtimeContext.secret_store,
                                                 SecretStore())
        runtimeContext.make_fs_access = getdefault(
            runtimeContext.make_fs_access, StdFsAccess)

        if not executor:
            if args.parallel:
                temp_executor = MultithreadedJobExecutor()
                runtimeContext.select_resources = temp_executor.select_resources
                real_executor = temp_executor  # type: JobExecutor
            else:
                real_executor = SingleJobExecutor()
        else:
            real_executor = executor

        try:
            runtimeContext.basedir = input_basedir

            if isinstance(tool, ProcessGenerator):
                tfjob_order = {}  # type: CWLObjectType
                if loadingContext.jobdefaults:
                    tfjob_order.update(loadingContext.jobdefaults)
                if job_order_object:
                    tfjob_order.update(job_order_object)
                tfout, tfstatus = real_executor(tool.embedded_tool,
                                                tfjob_order, runtimeContext)
                if not tfout or tfstatus != "success":
                    raise WorkflowException(
                        "ProcessGenerator failed to generate workflow")
                tool, job_order_object = tool.result(tfjob_order, tfout,
                                                     runtimeContext)
                if not job_order_object:
                    job_order_object = None

            try:
                initialized_job_order_object = init_job_order(
                    job_order_object,
                    args,
                    tool,
                    jobloader,
                    stdout,
                    print_input_deps=args.print_input_deps,
                    relative_deps=args.relative_deps,
                    make_fs_access=runtimeContext.make_fs_access,
                    input_basedir=input_basedir,
                    secret_store=runtimeContext.secret_store,
                    input_required=input_required,
                )
            except SystemExit as err:
                return err.code

            del args.workflow
            del args.job_order

            conf_file = getattr(args,
                                "beta_dependency_resolvers_configuration",
                                None)  # str
            use_conda_dependencies = getattr(args, "beta_conda_dependencies",
                                             None)  # str

            if conf_file or use_conda_dependencies:
                runtimeContext.job_script_provider = DependenciesConfiguration(
                    args)
            else:
                runtimeContext.find_default_container = functools.partial(
                    find_default_container,
                    default_container=runtimeContext.default_container,
                    use_biocontainers=args.beta_use_biocontainers,
                )

            (out, status) = real_executor(tool,
                                          initialized_job_order_object,
                                          runtimeContext,
                                          logger=_logger)

            if out is not None:
                if runtimeContext.research_obj is not None:
                    runtimeContext.research_obj.create_job(out, True)

                    def remove_at_id(doc: CWLObjectType) -> None:
                        for key in list(doc.keys()):
                            if key == "@id":
                                del doc[key]
                            else:
                                value = doc[key]
                                if isinstance(value, MutableMapping):
                                    remove_at_id(value)
                                elif isinstance(value, MutableSequence):
                                    for entry in value:
                                        if isinstance(entry, MutableMapping):
                                            remove_at_id(entry)

                    remove_at_id(out)
                    visit_class(
                        out,
                        ("File", ),
                        functools.partial(add_sizes,
                                          runtimeContext.make_fs_access("")),
                    )

                def loc_to_path(obj: CWLObjectType) -> None:
                    for field in ("path", "nameext", "nameroot", "dirname"):
                        if field in obj:
                            del obj[field]
                    if cast(str, obj["location"]).startswith("file://"):
                        obj["path"] = uri_file_path(cast(str, obj["location"]))

                visit_class(out, ("File", "Directory"), loc_to_path)

                # Unsetting the Generation from final output object
                visit_class(out, ("File", ),
                            MutationManager().unset_generation)

                if isinstance(out, str):
                    stdout.write(out)
                else:
                    stdout.write(json_dumps(out, indent=4, ensure_ascii=False))
                stdout.write("\n")
                if hasattr(stdout, "flush"):
                    stdout.flush()

            if status != "success":
                _logger.warning("Final process status is %s", status)
                return 1
            _logger.info("Final process status is %s", status)
            return 0

        except (ValidationException) as exc:
            _logger.error("Input object failed validation:\n%s",
                          str(exc),
                          exc_info=args.debug)
            return 1
        except UnsupportedRequirement as exc:
            _logger.error(
                "Workflow or tool uses unsupported feature:\n%s",
                str(exc),
                exc_info=args.debug,
            )
            return 33
        except WorkflowException as exc:
            _logger.error(
                "Workflow error%s:\n%s",
                try_again_msg,
                strip_dup_lineno(str(exc)),
                exc_info=args.debug,
            )
            return 1
        except Exception as exc:  # pylint: disable=broad-except
            _logger.error(
                "Unhandled error%s:\n  %s",
                try_again_msg,
                str(exc),
                exc_info=args.debug,
            )
            return 1

    finally:
        if (args and runtimeContext and runtimeContext.research_obj
                and workflowobj and loadingContext):
            research_obj = runtimeContext.research_obj
            if loadingContext.loader is not None:
                research_obj.generate_snapshot(
                    prov_deps(workflowobj, loadingContext.loader, uri))
            else:
                _logger.warning("Unable to generate provenance snapshot "
                                " due to missing loadingContext.loader.")
            if prov_log_handler is not None:
                # Stop logging so we won't half-log adding ourself to RO
                _logger.debug("[provenance] Closing provenance log file %s",
                              prov_log_handler)
                _logger.removeHandler(prov_log_handler)
                # Ensure last log lines are written out
                prov_log_handler.flush()
                # Underlying WritableBagFile will add the tagfile to the manifest
                prov_log_handler.stream.close()
                prov_log_handler.close()
            research_obj.close(args.provenance)

        _logger.removeHandler(stderr_handler)
        _logger.addHandler(defaultStreamHandler)
Ejemplo n.º 51
0
from datetime import datetime
from tests.settings import SUMMARIES_BASE_DIR, TESTS_BASE_DIR
from src.orbitaz.utils.filesystem import (
    find_or_create_target_directory, )

import pytest
from orbitaz.utils.fixtures_handler import FixtureHandler
import shutil
from pathlib import Path

# ______________________________________________________________________________
# /////////////////////////////////////////////////////////////////////// LOGGER
import coloredlogs, logging

logger = logging.getLogger(__name__)
coloredlogs.install(level="DEBUG", logger=logger)


# ______________________________________________________________________________
# ///////////////////////////////////////////////////////////////////////// CONF
def pytest_configure(config) -> None:
    """
    This pytest built-in fixture function allows to set pytest terminal commands programatically on runtime.

    This specific implementation configures timestamps for excel summaries and takes care of storing excel test summaries at the right folder.

    In the course of that, it applies a recursive search function called 'get_or_create_target_directory' to find or make a path for the excel reports.
    """

    # We try to find a summary directory for the current year and month
    target_directory_path = (SUMMARIES_BASE_DIR /
Ejemplo n.º 52
0
def run(init: bool, server: str, country_code: str, country: str, area: str,
        tcp: bool, daemon: bool, max_load: int, top_servers: int, pings: str,
        kill: bool, kill_flush: bool, update: bool, list_servers: bool,
        force_fw_rules: bool, p2p: bool, dedicated: bool, double_vpn: bool,
        tor_over_vpn: bool, anti_ddos: bool, netflix: bool, test: bool,
        internally_allowed: List, skip_dns_patch: bool, silent: bool,
        nvram: str, openvpn_options: str, location: float) -> bool:

    if init:
        initialise(log_folder)

    fieldstyles = {
        'asctime': {
            'color': 'green'
        },
        'hostname': {
            'color': 'magenta'
        },
        'levelname': {
            'color': 'black',
            'bold': True
        },
        'name': {
            'color': 'blue'
        },
        'programname': {
            'color': 'cyan'
        },
    }
    levelstyles = {
        'spam': {
            'color': 'green',
            'faint': True
        },
        'debug': {
            'color': 'green',
            'bold': True
        },
        'verbose': {
            'color': 'blue',
            'bold': True
        },
        'info': {},
        'notice': {
            'color': 'magenta',
            'bold': True
        },
        'warning': {
            'color': 'yellow',
            'bold': True
        },
        'success': {
            'color': 'green',
            'bold': True
        },
        'error': {
            'color': 'red',
            'bold': True
        },
        'critical': {
            'color': 'white',
            'background': 'red',
            'bold': True
        }
    }

    logger.addHandler(logging.StreamHandler())

    # if log folder doesnt exist, exit, "--init" creates it
    if not os.path.exists(log_folder):
        logger.error(
            "Please initialise first by running 'sudo openpyn --init', then start using 'openpyn' without sudo"
        )
        return 1

    # Add another rotating handler to log to .log files
    # fix permissions if needed
    for attempt in range(2):
        try:
            file_handler = logging.handlers.TimedRotatingFileHandler(
                log_folder + '/openpyn.log', when='W0', interval=4)
            file_handler_formatter = logging.Formatter(log_format)
            file_handler.setFormatter(file_handler_formatter)
            logger.addHandler(file_handler)
        except PermissionError:
            root.verify_root_access(
                "Root access needed to set permissions of {}/openpyn.log".
                format(log_folder))
            subprocess.run(
                "sudo chmod 777 {}/openpyn.log".format(log_folder).split())
            subprocess.run(
                "sudo chmod 777 {}/openpyn-notifications.log".format(
                    log_folder).split())
        else:
            break

    # In this case only log messages originating from this logger will show up on the terminal.
    coloredlogs.install(level="verbose",
                        logger=logger,
                        fmt=log_format,
                        level_styles=levelstyles,
                        field_styles=fieldstyles)

    stats = True
    if sys.__stdin__.isatty():
        logger.debug("Interactive")
    else:
        logger.addHandler(logging.StreamHandler(sys.stdout))
        logger.setLevel(logging.WARNING)
        logger.debug("Non-Interactive")
        stats = False

    port = "udp"
    if tcp:
        port = "tcp"

    detected_os = sys.platform
    if detected_os == "linux":
        if subprocess.check_output(["/bin/uname", "-o"]).decode(
                sys.stdout.encoding).strip() == "ASUSWRT-Merlin":
            force_fw_rules = False
            silent = True
            skip_dns_patch = True
            if openvpn_options:
                openvpn_options += " " + "--syslog openpyn"
            else:
                openvpn_options = "--syslog openpyn"
            logger.debug(openvpn_options)
        elif os.path.exists("/etc/openwrt_release"):
            force_fw_rules = False
            silent = True
            skip_dns_patch = True
            nvram = None
        else:
            nvram = None
    elif detected_os == "win32":
        logger.error("Are you even a l33t mate? Try GNU/Linux")
        return 1

    # check if dependencies are installed
    if shutil.which("openvpn") is None or shutil.which(
            "wget") is None or shutil.which("unzip") is None:
        # In case of Debian Sid where "openvpn" is only in root's PATH, don't error out
        try:
            root_access = root.verify_root_access(
                "Sudo credentials required to check if 'openvpn' is available in root's PATH"
            )
            if root_access is False:
                root.obtain_root_access()
            subprocess.check_output(["sudo", "which", "wget"])
            subprocess.check_output(["sudo", "which", "unzip"])
            # subprocess.check_output(["sudo", "which", "openvpn"])
        except subprocess.CalledProcessError:
            logger.error("Please Install 'openvpn' 'wget' 'unzip' first")
            return 1

    elif daemon:
        if detected_os != "linux":
            logger.error("Daemon mode is only available in GNU/Linux distros")
            return 1

        if not root.verify_running_as_root():
            logger.error("Please run '--daemon' or '-d' mode with sudo")
            return 1
        openpyn_options = ""

        # if only positional argument used
        if country_code is None and server is None:
            country_code = country  # consider the positional arg e.g "us" same as "-c us"
        # if either "-c" or positional arg f.e "au" is present

        if country_code:
            if len(country_code) > 2:  # full country name
                # get the country_code from the full name
                country_code = api.get_country_code(full_name=country_code)
            country_code = country_code.lower()
            openpyn_options += country_code

        elif server:
            openpyn_options += " --server " + server

        if area:
            openpyn_options += " --area " + area
        if tcp:
            openpyn_options += " --tcp"
        if max_load:
            openpyn_options += " --max-load " + str(max_load)
        if top_servers:
            openpyn_options += " --top-servers " + str(top_servers)
        if pings:
            openpyn_options += " --pings " + str(pings)
        if force_fw_rules:
            openpyn_options += " --force-fw-rules"
        if p2p:
            openpyn_options += " --p2p"
        if dedicated:
            openpyn_options += " --dedicated"
        if double_vpn:
            openpyn_options += " --double"
        if tor_over_vpn:
            openpyn_options += " --tor"
        if anti_ddos:
            openpyn_options += " --anti-ddos"
        if netflix:
            openpyn_options += " --netflix"
        if test:
            openpyn_options += " --test"
        if internally_allowed:
            open_ports = ""
            for port_number in internally_allowed:
                open_ports += " " + port_number
            openpyn_options += " --allow" + open_ports
        if skip_dns_patch:
            openpyn_options += " --skip-dns-patch"
        if nvram:
            openpyn_options += " --nvram " + str(nvram)
        if openvpn_options:
            openpyn_options += " --openvpn-options '" + openvpn_options + "'"
        # logger.debug(openpyn_options)
        if subprocess.check_output(["/bin/uname", "-o"]).decode(
                sys.stdout.encoding).strip() == "ASUSWRT-Merlin":
            initd.update_service(openpyn_options, run=True)
        elif os.path.exists("/etc/openwrt_release"):
            initd.update_service(openpyn_options, run=True)
        else:
            systemd.update_service(openpyn_options, run=True)
        return 0

    elif kill:
        logger.warning("Killing the running processes")
        kill_management_client()
        kill_vpn_processes()  # don't touch iptable rules
        kill_openpyn_process()

    elif kill_flush:
        firewall.clear_fw_rules()  # also clear iptable rules
        # if --allow present, allow those ports internally
        logger.info("Re-enabling ipv6")
        firewall.manage_ipv6(disable=False)
        if internally_allowed:
            network_interfaces = get_network_interfaces()
            firewall.internally_allow_ports(network_interfaces,
                                            internally_allowed)
        kill_management_client()
        kill_vpn_processes()
        kill_openpyn_process()

    elif update:
        update_config_files()

    # a hack to list all countries and their codes when no arg supplied with "-l"
    elif list_servers != 'nope':  # means "-l" supplied
        if list_servers is None:  # no arg given with "-l"
            if p2p or dedicated or double_vpn or tor_over_vpn or anti_ddos or netflix:
                # show the special servers in all countries
                display_servers(list_servers="all",
                                port=port,
                                area=area,
                                p2p=p2p,
                                dedicated=dedicated,
                                double_vpn=double_vpn,
                                tor_over_vpn=tor_over_vpn,
                                anti_ddos=anti_ddos,
                                netflix=netflix,
                                location=location)
            else:
                api.list_all_countries()
        # if a country code is supplied give details about that country only.
        else:
            # if full name of the country supplied get country_code
            if len(list_servers) > 2:
                list_servers = api.get_country_code(full_name=list_servers)
            display_servers(list_servers=list_servers,
                            port=port,
                            area=area,
                            p2p=p2p,
                            dedicated=dedicated,
                            double_vpn=double_vpn,
                            tor_over_vpn=tor_over_vpn,
                            anti_ddos=anti_ddos,
                            netflix=netflix,
                            location=location)

    # only clear/touch FW Rules if "-f" used
    elif force_fw_rules:
        firewall.clear_fw_rules()

    # check if OpenVPN config files exist if not download them.
    check_config_files()

    # if only positional argument used
    if country_code is None and server is None:
        country_code = country  # consider the positional arg e.g "us" same as "-c us"
    # if either "-c" or positional arg f.e "au" is present
    if country_code:
        # ask for and store credentials if not present, skip if "--test"
        if not test:
            if credentials.check_credentials() is False:
                credentials.save_credentials()

        if len(country_code) > 2:  # full country name
            # get the country_code from the full name
            country_code = api.get_country_code(full_name=country_code)
        country_code = country_code.lower()

        # keep trying to connect to new servers
        for tries in range(3):  # pylint: disable=W0612
            better_servers_list = find_better_servers(
                country_code, area, max_load, top_servers, tcp, p2p, dedicated,
                double_vpn, tor_over_vpn, anti_ddos, netflix, location, stats)
            pinged_servers_list = ping_servers(better_servers_list, pings,
                                               stats)
            chosen_servers = choose_best_servers(pinged_servers_list, stats)

            # connect to chosen_servers, if one fails go to next
            for aserver in chosen_servers:
                if stats:
                    print(
                        Style.BRIGHT + Fore.BLUE +
                        "Out of the Best Available Servers, Chose",
                        (Fore.GREEN + aserver + Fore.BLUE) + "\n")
                # if "-f" used apply firewall rules
                if force_fw_rules:
                    network_interfaces = get_network_interfaces()
                    vpn_server_ip = get_vpn_server_ip(aserver, port)
                    firewall.apply_fw_rules(network_interfaces, vpn_server_ip,
                                            skip_dns_patch)
                    if internally_allowed:
                        firewall.internally_allow_ports(
                            network_interfaces, internally_allowed)
                if nvram:
                    # TODO return 0 on success else 1 in asus.run()
                    asus.run(aserver, country_code, nvram, "All", "adaptive",
                             "Strict", tcp, test)
                    logger.success("SAVED SERVER " + aserver + " ON PORT " +
                                   port + " TO NVRAM")
                return (connect(aserver, port, silent, test, skip_dns_patch,
                                openvpn_options))
    elif server:
        # ask for and store credentials if not present, skip if "--test"
        if not test:
            if credentials.check_credentials() is False:
                credentials.save_credentials()

        server = server.lower()
        # if "-f" used apply firewall rules
        if force_fw_rules:
            network_interfaces = get_network_interfaces()
            vpn_server_ip = get_vpn_server_ip(server, port)
            firewall.apply_fw_rules(network_interfaces, vpn_server_ip,
                                    skip_dns_patch)
            if internally_allowed:
                firewall.internally_allow_ports(network_interfaces,
                                                internally_allowed)
        if nvram:
            asus.run(server, country_code, nvram, "All", "adaptive", "Strict",
                     tcp, test)
            logger.success("SAVED SERVER " + server + " ON PORT " + port +
                           " TO NVRAM")
            return 0
        for i in range(20):  # pylint: disable=W0612
            return (connect(server, port, silent, test, skip_dns_patch,
                            openvpn_options))
    else:
        logger.info(
            'To see usage options type: "openpyn -h" or "openpyn --help"')
    return 0  # if everything went ok
Ejemplo n.º 53
0
import re
import signal
import sys
import tempfile
import threading
import time
import traceback
from typing import List, Optional

import coloredlogs
import yaml

from dnsrobocert.core import background, certbot, config, legacy, utils

LOGGER = logging.getLogger(__name__)
coloredlogs.install(logger=LOGGER)


def _process_config(
    config_path: str,
    directory_path: str,
    runtime_config_path: str,
    lock: threading.Lock,
):
    dnsrobocert_config = config.load(config_path)

    if not dnsrobocert_config:
        return

    if dnsrobocert_config.get("draft"):
        LOGGER.info(
Ejemplo n.º 54
0
import logging
import os
import random
import time

import coloredlogs
from pmdarima import auto_arima
from matplotlib import gridspec, pyplot as plt
from data.convertData import interpolate_cgm, convert
from data.readData import read_data
import pandas as pd
coloredlogs.install(
    level='INFO',
    fmt=
    '%(asctime)s %(filename)s[%(lineno)d]:%(funcName)s %(levelname)s %(message)s'
)

path = os.getenv('T1DPATH', '../')
filename = path + "data/csv/data_17_1-6.csv"

test = 60 * 3

time_periods = [600, 2400, 3600, 6000]


def prep_csv():
    data = read_data(filename)
    data = convert(data)
    data = interpolate_cgm(data)
    cgm = data['cgmValue']
    cgm.to_csv(path + 'data/csv/cgm_17_1-6.csv')
Ejemplo n.º 55
0
parser = argparse.ArgumentParser()
parser.add_argument('-i', dest='inventory', required=True,
                    help='Path to inventory file')
parser.add_argument('-v', dest='verbose', action='store_true', help='Verbose output')

args = parser.parse_args()

log = logging.getLogger(__name__)
if args.verbose:
    log.setLevel(logging.DEBUG)
    level = 'DEBUG'
else:
    log.setLevel(logging.INFO)
    level = 'INFO'
coloredlogs.install(level=level)

if __name__ == '__main__':
    with open(args.inventory, 'r') as inventory_file:
        inventory = yaml.load(inventory_file)
        log.info('Parse K8S config')
        k8s_config = k8s.K8SConfig(**inventory['k8s'])
        log.info('Parse docker config')
        docker_config = k8s.DockerConfig(**inventory['docker'])
        log.info('Parse nodes')
        nodes = utils.parse_nodes(inventory['nodes'])
        utils.parse_roles(**inventory['nodes'])
        log.info('Install {} for nodes with label {}'.format(' '.join(constants.PACKAGES), 'all'))
        utils.install_pkgs(constants.PACKAGES, 'all')
        utils.create_repo('docker.list', constants.DOCKER_REPO)
        log.info('Install {} for nodes with label {}'.format(' '.join(constants.DOCKER_PACKAGES), 'docker'))
Ejemplo n.º 56
0
configuration repos for a production release.
"""
import argparse
import logging
from pathlib import Path

import coloredlogs
import hglib
import verboselogs
import yaml

NAME = "tag_release"
COMMIT_AUTHOR = f"SalishSeaNowcast.release_mgmt.{NAME}"
verboselogs.install()
logger = logging.getLogger(NAME)
coloredlogs.install(
    fmt="%(asctime)s %(hostname)s %(name)s %(levelname)s %(message)s")


def main():
    parsed_args = _command_line_interface()
    repos = _load_repos_list(parsed_args.repos_file)
    for repo in repos:
        _tag_repo(repo, parsed_args.tag)


def _command_line_interface():
    parser = argparse.ArgumentParser(description="""
        Tag NEMO code and model configuration repos for a production release.
        """)
    parser.prog = f"python -m release_mgmt.{NAME}"
    parser.add_argument(
Ejemplo n.º 57
0
    def __init__(self, config, external_logger=None):
        """
        Initialize the model, read config file, start and end date, and logging
        """
        # read the config file and store
        if isinstance(config, str):
            if not os.path.isfile(config):
                raise Exception(
                    'Configuration file does not exist --> {}'.format(config))
            configFile = config

            # Read in the original users config
            ucfg = get_user_config(config, modules='smrf')

        elif isinstance(config, UserConfig):
            ucfg = config
            configFile = config.filename

        else:
            raise Exception('Config passed to SMRF is neither file name nor '
                            ' UserConfig instance')
        # start logging
        if external_logger == None:

            if 'log_level' in ucfg.cfg['system']:
                loglevel = ucfg.cfg['system']['log_level'].upper()
            else:
                loglevel = 'INFO'

            numeric_level = getattr(logging, loglevel, None)
            if not isinstance(numeric_level, int):
                raise ValueError('Invalid log level: %s' % loglevel)

            # setup the logging
            logfile = None
            if ucfg.cfg['system']['log_file'] != None:
                logfile = ucfg.cfg['system']['log_file']
                if not os.path.isabs(logfile):
                    logfile = abspath(
                        join(dirname(configFile),
                             ucfg.cfg['system']['log_file']))

                if not os.path.isdir(dirname(logfile)):
                    os.makedirs(dirname(logfile))

                if not os.path.isfile(logfile):
                    with open(logfile, 'w+') as f:
                        f.close()

            fmt = '%(levelname)s:%(name)s:%(message)s'
            if logfile is not None:
                logging.basicConfig(filename=logfile,
                                    level=numeric_level,
                                    filemode='w+',
                                    format=fmt)
            else:
                logging.basicConfig(level=numeric_level)
                coloredlogs.install(level=numeric_level, fmt=fmt)

            self._loglevel = numeric_level

            self._logger = logging.getLogger(__name__)
        else:
            self._logger = external_logger

        # add the title
        title = self.title(2)
        for line in title:
            self._logger.info(line)

        out = ucfg.cfg['output']['out_location']

        # Make the tmp and output directories if they do not exist
        makeable_dirs = [out, join(out, 'tmp')]
        for path in makeable_dirs:
            if not os.path.isdir(path):
                try:
                    self._logger.info("Directory does not exist, Creating:\n{}"
                                      "".format(path))
                    os.makedirs(path)

                except OSError as e:
                    raise e

        self.temp_dir = path

        # Check the user config file for errors and report issues if any
        self._logger.info("Checking config file for issues...")
        warnings, errors = check_config(ucfg)
        print_config_report(warnings, errors, logger=self._logger)
        self.ucfg = ucfg
        self.config = self.ucfg.cfg

        # Exit SMRF if config file has errors
        if len(errors) > 0:
            self._logger.error("Errors in the config file. See configuration"
                               " status report above.")
            sys.exit()

        # Write the config file to the output dir no matter where the project is
        full_config_out = abspath(join(out, 'config.ini'))

        self._logger.info("Writing config file with full options.")
        generate_config(self.ucfg, full_config_out)

        # Process the system variables
        for k, v in self.config['system'].items():
            setattr(self, k, v)

        os.environ['WORKDIR'] = self.temp_dir

        # Get the time section utils
        self.start_date = pd.to_datetime(self.config['time']['start_date'])
        self.end_date = pd.to_datetime(self.config['time']['end_date'])

        # Get the timesetps correctly in the time zone
        d = data.mysql_data.date_range(
            self.start_date, self.end_date,
            timedelta(minutes=int(self.config['time']['time_step'])))

        tzinfo = pytz.timezone(self.config['time']['time_zone'])
        self.date_time = [di.replace(tzinfo=tzinfo) for di in d]
        self.time_steps = len(self.date_time)

        # need to align date time
        if 'date_method_start_decay' in self.config['albedo'].keys():
            self.config['albedo']['date_method_start_decay'] = \
            self.config['albedo']['date_method_start_decay'].replace(tzinfo=tzinfo)
            self.config['albedo']['date_method_end_decay'] = \
            self.config['albedo']['date_method_end_decay'].replace(tzinfo=tzinfo)

        # if a gridded dataset will be used
        self.gridded = False
        self.forecast_flag = False
        if 'gridded' in self.config:
            self.gridded = True
            if self.config['gridded']['data_type'] in [
                    'hrrr_netcdf', 'hrrr_grib'
            ]:
                self.forecast_flag = self.config['gridded'][
                    'hrrr_forecast_flag']

            # hours from start of day
            self.day_hour = self.start_date - pd.to_datetime(
                d[0].strftime("%Y%m%d"))
            self.day_hour = int(self.day_hour / np.timedelta64(1, 'h'))

        if ((self.start_date > datetime.now() and not self.gridded)
                or (self.end_date > datetime.now() and not self.gridded)):
            raise ValueError("A date set in the future can only be used with"
                             " WRF generated data!")

        self.distribute = {}

        if self.config['system']['qotw']:
            self._logger.info(getqotw())

        # Initialize the distribute dict
        self._logger.info('Started SMRF --> %s' % datetime.now())
        self._logger.info('Model start --> %s' % self.start_date)
        self._logger.info('Model end --> %s' % self.end_date)
        self._logger.info('Number of time steps --> %i' % self.time_steps)
Ejemplo n.º 58
0
from bs4 import BeautifulSoup
import argparse
import yaml
import coloredlogs
import logging
import os
import datetime
from pushbullet import Pushbullet
import shelve
import schedule
import time
import re
import sys

# Setup logging
coloredlogs.install(level="INFO")
log = logging.getLogger("main")


class LuxMedSniper:
    LUXMED_LOGIN_URL = 'https://portalpacjenta.luxmed.pl/PatientPortal/Account/LogIn'
    LUXMED_LOGOUT_URL = 'https://portalpacjenta.luxmed.pl/PatientPortal/Account/LogOn'
    MAIN_PAGE_URL = 'https://portalpacjenta.luxmed.pl/PatientPortal'
    REQUEST_RESERVATION_URL = 'https://portalpacjenta.luxmed.pl/PatientPortal/Reservations/Reservation/PartialSearch'
    VISITS_PAGE_URL = 'https://portalpacjenta.luxmed.pl/PatientPortal/Reservations/Visits/GetVisitsAjax?source=0&fromDate={FromDate}&toDate={ToDate}'  # &toDate=14-03-2020&fromDate=14-01-2020

    def __init__(self, configuration_file="luxmedSniper.yaml"):
        self.log = logging.getLogger("LuxMedSniper")
        self.log.info("LuxMedSniper logger initialized")
        # Open configuration file
Ejemplo n.º 59
0
import cairosvg
import os
from shutil import copyfile
import logging, coloredlogs

logging.basicConfig(
    format='%(asctime)s %(levelname)-8s %(name)s   : %(message)s',
    level=logging.INFO,
    datefmt='%Y-%m-%d %H:%M:%S')

log = logging.getLogger("convert")
coloredlogs.install(level=os.getenv("LOG_LEVEL", "INFO"))

source_dir = "picons-source/build-source/logos/"
output_dir = "picons"

for subdir, dirs, files in os.walk(source_dir):
    log.info("Found {} files in {}. Converting...".format(
        len(files), source_dir))

    for file in files:
        raw_name = file.replace(".default.svg", "")
        raw_name = raw_name.replace(".default.png", "")
        raw_name = raw_name.replace(".default.png", "")
        raw_name = raw_name.split("-")[0]
        output_file = "{}/{}.png".format(output_dir, raw_name)
        if file.endswith("png"):
            log.debug("Already a png {}. saving to {}...".format(
                file, output_file))
            copyfile("picons-source/build-source/logos/{}".format(file),
                     output_file)
Ejemplo n.º 60
0
import openpyxl
import argparse
import os
import logging
import coloredlogs

argparser = argparse.ArgumentParser()
argparser.add_argument('dir', help='存有Excel的目录')
argparser.add_argument('col', help='列名(例如`E`)')
argparser.add_argument('-s', '--skip-rows', type=int, default=0, help='跳过行数')
argparser.add_argument('-v', '--verbose', action='store_true', help='打印调试日志')

args = argparser.parse_args()

coloredlogs.install(level=logging.DEBUG if args.verbose else logging.INFO)

col_index = ord(args.col.lower()) - ord('a')

total_char_count = 0

for filename in os.listdir(args.dir):
    if not filename.lower().endswith('.xlsx') or filename.startswith('~$'):
        logging.info(f'跳过文件:{filename}')
        continue
    logging.debug(f'打开:{filename}')
    wb = openpyxl.open(os.path.join(args.dir, filename))
    for ws in wb:
        logging.debug(f'工作簿:{ws.title}')
        rows = list(ws.iter_rows())
        if len(rows) <= args.skip_rows: