Пример #1
0
def read_config(albumNum, trackNum):
	with open(CONFIG, 'r') as file:
		cfgfile = file.read()
	config = ConfigParser.RawConfigParser(allow_no_value=True)
	config.readfp(io.BytesIO(cfgfile))

	# list all contents
	#logger.info('List all contents')
	#logger.info('Sections: {}'.format(config.sections()))
	#for section in config.sections():
	#	logger.info('Section: {}'.format(section))
	#	logger.info('Options: {}'.format(config.options(section)))
	#	for option in config.options(section):
	#		val = config.get(section, option)
	#		if val == -1:
	#			logger.warning('skip: {}'.format(option))
	#		logger.info('read config: {} {} {}'.format(section, option, val))

	try:
		albumNum = config.getint('cdc', 'album')
		trackNum = config.getint('cdc', 'track')
		logger.info('read config album: {}, track: {}'.format(albumNum, trackNum))
	except:
		logger.warning('can\'t read config file')
	return [albumNum, trackNum]
def get_cec_controller(config, send_errors, mail_sender):
    cec_mode_index = None
    try:
        cec_mode_index = config.getint("Alarmmonitor", "cec_mode")
        cec_mode = CecMode(cec_mode_index)
    except ValueError:
        logger.warning("Invalid CEC mode: " + cec_mode_index)
        cec_mode = CecMode.LIB_CEC
    logger.info("Using CEC mode: " + cec_mode.name)

    cec_logging_index = None
    try:
        cec_logging_index = config.getint(
            "Alarmmonitor",
            "cec_logging",
            fallback=CecLogging.CEC_LOG_ERROR.value)
        cec_logging = CecMode(cec_logging_index)
    except ValueError:
        logger.warning("Invalid CEC logging level: " + cec_logging_index)
        cec_logging = CecLogging.CEC_LOG_ERROR

    try:
        device_id = config.get("Alarmmonitor", "cec_device_id", fallback="1")
    except ValueError:
        logger.warning("Invalid cec_device_id")
        device_id = "1"

    if cec_mode == CecMode.PYTHON_CEC:
        return PythonCecController(send_errors, mail_sender)
    else:
        return LibCecController(send_errors,
                                mail_sender,
                                debug_level=cec_logging,
                                device_id=device_id)
Пример #3
0
def exec_with_backoff(fn, config, *args, **kwargs):
    """
    Executes fn with exponential backoff.

    Arguments:
        config: A ConfigParser instance, used to get backoff settings.
        fn: A callable to execute.
        args: A list of arguments to pass to the callable.
        kwargs: Keyword of arguments to pass to the callable.

    Raises:
        MaxTriesReachedError: After all retries failed.
    """
    backoff = config.getint('mailman2twitter', 'backoff')
    max_tries = config.getint('mailman2twitter', 'max_tries')

    for _ in range(max_tries):
        try:
            return fn(config, *args, **kwargs)
        except Exception as exception:
            logger.exception("Push failed, waiting for %s seconds" % backoff)
            time.sleep(backoff)
            backoff **= 2
    else:
        error = MaxTriesReachedError('Stop trying to task after: %s' % backoff)
        error.args += (exception,)
        raise error
def main():
    logging_config = get_logging_config("logging_config.yaml")
    set_up_logging(logging_config)

    config = configparser.ConfigParser()
    config.read("config.ini")

    alarm_duration = config.getint("Alarmmonitor", "hdmi_cec_device_on_time")
    polling_interval = config.getint("Alarmmonitor", "polling_interval")
    send_errors = config.getboolean("Alarmmonitor", "send_errors")
    send_starts = config.getboolean("Alarmmonitor", "send_starts")
    show_infos = config.getboolean("blaulichtSMS Einsatzmonitor", "show_infos")

    blaulichtsms_controller = BlaulichtSmsController(
        config["blaulichtSMS Einsatzmonitor"]["customer_id"],
        config["blaulichtSMS Einsatzmonitor"]["username"],
        config["blaulichtSMS Einsatzmonitor"]["password"],
        alarm_duration=alarm_duration,
        show_infos=show_infos)
    mail_sender = AlarmMonitorMailSender()
    hdmi_cec_controller = get_cec_controller(config, send_errors, mail_sender)
    browser_controller = ChromiumBrowserController(
        blaulichtsms_controller.get_session())
    alarm_monitor = AlarmMonitor(polling_interval, send_errors, send_starts,
                                 blaulichtsms_controller, hdmi_cec_controller,
                                 browser_controller, mail_sender)
    alarm_monitor.run()
Пример #5
0
def save_map_anchor(map_name, on_map, cropped, offset):
    h, w = cropped.shape[:2]
    section = const["section"]
    name = map_name + "-" + on_map
    anchor = {
        "Name":
        name,
        "MainSize": [
            config.getint("Device", "MainWidth"),
            config.getint("Device", "MainHeight"),
        ],
        "Offset":
        offset,
        "Size": (w, h),
        "Type":
        "Anchor",
        "OnMap":
        on_map,
        "Image":
        name + ".png",
    }
    # set_clip('{}: {},'.format(tojson(name), tojson(anchor)))
    set_clip(toyaml({name: anchor}))

    path = "%s/resources/%s.png" % (section, name)
    cv_save(path, cropped)
    logger.info("%s Saved.", os.path.realpath(path))
Пример #6
0
def main():
    """helper"""
    global QUIT_EVENT
    #read command params
    config_file = toolbar_lib.check_para(sys.argv, "f", "etc/helper.ini")
    #read config
    config = ConfigParser.ConfigParser()
    config.read(config_file)

    default_region = config.get("helper", "default_region")
    git_url = config.get("git", "url")
    git_branch = config.get("git", "branch")
    git_cache = config.get("git", "cache")
    git_dest = config.get("git", "dest")
    git_check_interval = config.getint("git", "check_interval")
    git_checker = GitChecker(git_url, git_branch, git_cache, git_dest,
                             git_check_interval)
    git_checker.run()

    group_update_interval = config.getint("group", "update_interval")
    group_updater = GroupUpdater(default_region, group_update_interval)
    group_updater.run()

    signal.signal(signal.SIGINT, signal_handler)

    while True:
        logging.info("helper_main running......")
        QUIT_EVENT.wait(300)
        if QUIT_EVENT.isSet():
            return
def main():
    if ( len(sys.argv) < 2 ):
        usage()
        exit(1)
        
    config = loadConfig(sys.argv[1])
    initLogging(config)
    logger = logging.getLogger(__name__)
    logger.info("Execution du script avec le fichier de configuration %s",sys.argv[1])
    
    twit = TwitterManager(
                          config.get(SECTION_TWITTER, OPT_CONSUMER_KEY),
                          config.get(SECTION_TWITTER, OPT_CONSUMER_SECRET),
                          config.get(SECTION_TWITTER, OPT_OAUTH_TOKEN),
                          config.get(SECTION_TWITTER, OPT_OAUTH_TOKEN_SECRET))

    tweets = twit.getTweets(config.getint(SECTION_QUERY, OPT_MAX_COUNT),
                            config.getint(SECTION_QUERY, OPT_MAX_RANGE))
    
    logger.debug(json.dumps(tweets, indent=1))
    rdb = RethinkdbManager(config.get(SECTION_RETHINKDB, OPT_HOST),
                           config.getint(SECTION_RETHINKDB, OPT_PORT),
                           config.get(SECTION_RETHINKDB, OPT_DB),
                           config.get(SECTION_RETHINKDB, OPT_PWD),
                           config.getint(SECTION_RETHINKDB, OPT_TIMEOUT))
    rdb.insertTweets(tweets, config.get(SECTION_RETHINKDB, OPT_TABLE))
    rdb.disconnect()

    logger.info("Fin du script.")
Пример #8
0
def save_crop(name, cropped, offset=None):
    section = const["section"]
    path = "%s/resources/%s.png" % (section, name)
    cv_save(path, cropped)
    logger.info("%s Saved.", os.path.realpath(path))
    if offset is None:
        return
    h, w = cropped.shape[:2]
    info = {
        "Name":
        name,
        "MainSize": [
            config.getint("Device", "MainWidth"),
            config.getint("Device", "MainHeight"),
        ],
        "Offset":
        list(offset),
        "Size": [w, h],
        "Type":
        "Static",
        "Image":
        name + ".png",
    }

    # set_clip('{}: {},'.format(tojson(name), tojson(info)))
    set_clip(hocon.dump({name: info}))
Пример #9
0
def _main(config):
    setup_logging(config)

    fp = config.get('ircbot', 'channel_config')
    if fp:
        fp = os.path.expanduser(fp)
        if not os.path.exists(fp):
            raise Exception("Unable to read layout config file at %s" % fp)
    else:
        raise Exception("Channel Config must be specified in config file.")

    channel_config = ChannelConfig(yaml.load(open(fp)))

    bot = GerritBot(channel_config.channels,
                    config.get('ircbot', 'nick'),
                    config.get('ircbot', 'pass'),
                    config.get('ircbot', 'server'),
                    config.getint('ircbot', 'port'),
                    config.getboolean('ircbot', 'force_ssl'),
                    config.get('ircbot', 'server_password'))
    g = Gerrit(bot,
               channel_config,
               config.get('gerrit', 'host'),
               config.get('gerrit', 'user'),
               config.getint('gerrit', 'port'),
               config.get('gerrit', 'key'))
    g.start()
    bot.start()
Пример #10
0
def read_config(albumNum, trackNum):
    with open(CONFIG, 'r') as file:
        cfgfile = file.read()
    config = ConfigParser.RawConfigParser(allow_no_value=True)
    config.readfp(io.BytesIO(cfgfile))

    # list all contents
    #logger.info('List all contents')
    #logger.info('Sections: {}'.format(config.sections()))
    #for section in config.sections():
    #	logger.info('Section: {}'.format(section))
    #	logger.info('Options: {}'.format(config.options(section)))
    #	for option in config.options(section):
    #		val = config.get(section, option)
    #		if val == -1:
    #			logger.warning('skip: {}'.format(option))
    #		logger.info('read config: {} {} {}'.format(section, option, val))

    try:
        albumNum = config.getint('cdc', 'album')
        trackNum = config.getint('cdc', 'track')
        logger.info('read config album: {}, track: {}'.format(
            albumNum, trackNum))
    except:
        logger.warning('can\'t read config file')
    return [albumNum, trackNum]
Пример #11
0
def _main():
    config = ConfigParser.ConfigParser({'server_password': None})
    config.read(sys.argv[1])
    setup_logging(config)

    fp = config.get('ircbot', 'channel_config')
    if fp:
        fp = os.path.expanduser(fp)
        if not os.path.exists(fp):
            raise Exception("Unable to read layout config file at %s" % fp)
    else:
        raise Exception("Channel Config must be specified in config file.")

    channel_config = ChannelConfig(yaml.load(open(fp)))

    doge = DogeMessage(config.get('doge', 'prefix').upper().split(','),
                       config.get('doge', 'positive').upper().split(','),
                       config.get('doge', 'neutral').upper().split(','),
                       config.get('doge', 'negative').upper().split(','))

    bot = GerritBot(channel_config.channels,
                    config.get('ircbot', 'nick'),
                    config.get('ircbot', 'pass'),
                    config.get('ircbot', 'server'),
                    config.getint('ircbot', 'port'),
                    config.get('ircbot', 'server_password'))
    g = Gerrit(bot,
               doge,
               channel_config,
               config.get('gerrit', 'host'),
               config.get('gerrit', 'user'),
               config.getint('gerrit', 'port'),
               config.get('gerrit', 'key'))
    g.start()
    bot.start()
def get_chassis_settings(config):
    """ Initialise chassis fan settings. """

    chassis = FanControl()
    chassis.pwm_min = config.getint("Chassis", "pwm_min")
    chassis.pwm_max = config.getint("Chassis", "pwm_max")
    chassis.pwm_safety = config.getint("Chassis", "pwm_safety")
    return chassis
Пример #13
0
 def create(cls, name, config):
     try:
         host = config.get('host', fallback='localhost')
         port = config.getint('port', fallback=6600)
         timeout = config.getint('timeout', fallback=5)
         return cls(name, host, port, timeout)
     except ValueError as error:
         raise ConfigurationError(
             'Host port or timeout configuration wrong: {}'.format(error))
Пример #14
0
 def create(cls, name, config):
     try:
         host = config.get('host', fallback='localhost')
         port = config.getint('port', fallback=6600)
         timeout = config.getint('timeout', fallback=5)
         return cls(name, host, port, timeout)
     except ValueError as error:
         raise ConfigurationError(
             'Host port or timeout configuration wrong: {}'.format(error))
Пример #15
0
def mirror(config):
    # Load the filter plugins so the loading doesn't happen in the fast path
    filter_project_plugins()
    filter_release_plugins()

    # Always reference those classes here with the fully qualified name to
    # allow them being patched by mock libraries!
    master = bandersnatch.master.Master(
        config.get("mirror", "master"), config.getfloat("mirror", "timeout")
    )

    # `json` boolean is a new optional option in 2.1.2 - want to support it
    # not existing in old configs and display an error saying that this will
    # error in the not to distance release
    try:
        json_save = config.getboolean("mirror", "json")
    except configparser.NoOptionError:
        logger.error(
            "Please update your config to include a json "
            + "boolean in the [mirror] section. Setting to False"
        )
        json_save = False

    try:
        root_uri = config.get("mirror", "root_uri")
    except configparser.NoOptionError:
        root_uri = None

    try:
        digest_name = config.get("mirror", "digest_name")
    except configparser.NoOptionError:
        digest_name = "sha256"
    if digest_name not in ("md5", "sha256"):
        raise ValueError(
            f"Supplied digest_name {digest_name} is not supported! Please "
            + "update digest_name to one of ('sha256', 'md5') in the [mirror] "
            + "section."
        )

    mirror = bandersnatch.mirror.Mirror(
        config.get("mirror", "directory"),
        master,
        stop_on_error=config.getboolean("mirror", "stop-on-error"),
        workers=config.getint("mirror", "workers"),
        hash_index=config.getboolean("mirror", "hash-index"),
        json_save=json_save,
        root_uri=root_uri,
        digest_name=digest_name,
        keep_index_versions=config.getint("mirror", "keep_index_versions", fallback=0),
    )

    changed_packages = mirror.synchronize()
    logger.info("{} packages had changes".format(len(changed_packages)))
    for package_name, changes in changed_packages.items():
        logger.debug(f"{package_name} added: {changes}")
Пример #16
0
def main():
    config = create_config()

    # configure logging
    configure_logging(config.get('misc', 'logging') or '',
                    verbosity=config.getint('misc', 'verbosity'))
    logger.info('----------- Starting pyethereum %s --------------', __version__)

    logger.debug("Config Ready:%s", konfig.dump_config(config))
    config_ready.send(sender=None, config=config)

    # initialize chain
    check_chain_version(config)
    from pyethereum.chainmanager import chain_manager

    # P2P TCP SERVER
    try:
        tcp_server.start()
    except IOError as e:
        logger.error("Could not start TCP server: \"{0}\"".format(str(e)))
        sys.exit(1)

    # PEER MANAGER THREAD
    peer_manager.start()

    # CHAIN MANAGER THREAD
    chain_manager.start()

    # API SERVER THREAD
    api_server.start()

    # handle termination signals
    def signal_handler(signum=None, frame=None):
        logger.info('Signal handler called with signal {0}'.format(signum))
        peer_manager.stop()
        chain_manager.stop()
        tcp_server.stop()

    for sig in [signal.SIGTERM, signal.SIGHUP, signal.SIGQUIT, signal.SIGINT]:
        signal.signal(sig, signal_handler)

    # connect peer
    if config.get('network', 'remote_host'):
        peer_manager.connect_peer(
            config.get('network', 'remote_host'),
            config.getint('network', 'remote_port'))

    # loop
    while not peer_manager.stopped():
        time.sleep(0.001)

    logger.info('exiting')
    peer_manager.join()
    logger.debug('main thread finished')
Пример #17
0
def main():
    config = create_config()

    # configure logging
    configure_logging(config.get('misc', 'logging') or '',
                      verbosity=config.getint('misc', 'verbosity'))
    logger.info('----------- Starting pyethereum %s --------------',
                __version__)

    logger.debug("Config Ready:%s", konfig.dump_config(config))
    config_ready.send(sender=None, config=config)

    # initialize chain
    check_chain_version(config)
    from pyethereum.chainmanager import chain_manager

    # P2P TCP SERVER
    try:
        tcp_server.start()
    except IOError as e:
        logger.error("Could not start TCP server: \"{0}\"".format(str(e)))
        sys.exit(1)

    # PEER MANAGER THREAD
    peer_manager.start()

    # CHAIN MANAGER THREAD
    chain_manager.start()

    # API SERVER THREAD
    api_server.start()

    # handle termination signals
    def signal_handler(signum=None, frame=None):
        logger.info('Signal handler called with signal {0}'.format(signum))
        peer_manager.stop()
        chain_manager.stop()
        tcp_server.stop()

    for sig in [signal.SIGTERM, signal.SIGHUP, signal.SIGQUIT, signal.SIGINT]:
        signal.signal(sig, signal_handler)

    # connect peer
    if config.get('network', 'remote_host'):
        peer_manager.connect_peer(config.get('network', 'remote_host'),
                                  config.getint('network', 'remote_port'))

    # loop
    while not peer_manager.stopped():
        time.sleep(0.001)

    logger.info('exiting')
    peer_manager.join()
    logger.debug('main thread finished')
Пример #18
0
    def config_read(self):
        """reading config file
        """
        current = time.time()
        if current - self.last_read_config < self.config_read_interval:
            return
        else:
            self.last_read_config = current

        config = ConfigParser.ConfigParser()
        config.read(self.config_file)
        self.dynamo_jobschedule = config.get("worker", "dynamo_jobschedule")
        self.dynamo_jobworker = config.get("worker", "dynamo_jobworker")
        self.sqs_name = config.get("worker", "sqs_name")
        self.worker_heartbeat_interval = config.getint("worker",
                                                       "heartbeat_interval")
        self.sqs_visibility_interval = config.getint(
            "worker", "sqs_visibility_interval")
        self.config_read_interval = config.getint("worker",
                                                  "config_read_interval")
        self.default_region = config.get("worker", "default_region")

        self.dynamo_client = boto3.client("dynamodb",
                                          region_name=self.default_region)
        self.sqs_client = boto3.client("sqs", region_name=self.default_region)
        self.sts_client = boto3.client("sts", region_name=self.default_region)

        self.sqs_visibility_timeout = self.sqs_visibility_interval + 30

        self.aws_account_id = sts.get_account_id(self.sts_client)
        self.queue_url = sqs.get_queue_url(self.sqs_client, self.sqs_name,
                                           self.aws_account_id)

        new_module_path = config.get("worker", "module_path")
        if not new_module_path.startswith("/"):
            current_dir = os.getcwd()
            new_module_path = os.path.normpath(
                os.path.join(current_dir, new_module_path))

        if not os.path.exists(new_module_path):
            os.makedirs(new_module_path)

        if new_module_path not in sys.path:
            sys.path.append(new_module_path)

        # if module_path didn't change, we don't need to schedule it again
        if self.module_path != new_module_path:
            self.observer.unschedule_all()
            self.observer.schedule(self.code_update_handler,
                                   new_module_path,
                                   recursive=True)

        self.module_path = new_module_path
Пример #19
0
        def __init__(self, configfname, problem, codefun, agentclass):
                config = ConfigParser.ConfigParser()
                config.readfp(open(configfname))

                logging.config.fileConfig(config.get('default','logconf'))
                log.info('Setting up evolutionary workbench...')
                self.evolog.critical(evologhead)

                self.problem = problem
                self.codefun = codefun
                self.popsize = config.getint('default','popsize')
                self.parentpsize = config.getint('default','parentpopsize')
                self.maxiters = config.getint('default','numiters')
                self.popratio = self.popsize / self.parentpsize

                opnames = config.get('default','operators')
                oprates = config.get('default','oprates')
                self.opargs = config.get('default','opargs').split(',')
                self.ops_, self.oprates = _initialize_ops(opnames,oprates)
                log.debug(self.ops_)
                log.debug(self.oprates)
                arncfg = config.get('default','arnconf')
                self.arnconfig = ConfigParser.ConfigParser()
                self.arnconfig.readfp(open(arncfg))
                self.agentclass = partial(agentclass, config = self.arnconfig)
                self.mutrate = config.getfloat('default','mutrate')
                self.orig_mutrate = self.mutrate
                self.mutate_ = partial(bitflipmutation,
                                       mutrate = self.mutrate)
                self.improves = 0
                self.tempevals = 0
                self.adfcount = 0

                self.localsearch = config.get('default','localsearch')
                if self.localsearch:
                        log.info('Initializing local search holder')
                        mainmod = __import__('__main__')
                        self.localsearch = getattr(mainmod,
                                                   self.localsearch)(5,codefun)

                self.basicadf = config.get('default','adf')
                if self.basicadf:
                        log.info('Initializing multiplex adf skeleton')
                        mainmod = __import__('__main__')
                        self.basicadf = getattr(mainmod,
                                                   self.basicadf)

                self.numevals = None
                self.population = None
                self.parents = None
                self.best = None
                self.itercount = None
Пример #20
0
def get_train_callbacks(config):
    '''Returns a list of keras callbacks
    :param config: config parser
    :return: list of keras callbacks
    :rtype: list(tf.keras.Callback)
    '''
    callbacks = []

    callbacks.append(tf.keras.callbacks.CSVLogger(
        config.get("default", "csv_log"),
        separator=',',
        append=True
    ))

    callbacks.append(tf.keras.callbacks.ReduceLROnPlateau(
        monitor='val_loss',
        factor=0.1,
        patience=config.getint("training", "reduce_lr_patience"),
        verbose=1,
        mode='min',
        min_lr=config.getfloat("training", "min_lr"),
    ))

    callbacks.append(tf.keras.callbacks.EarlyStopping(
        monitor='val_loss',
        min_delta=0,
        patience=config.getint("training", "early_stopping_patience"),
        verbose=1,
        mode='min',
        baseline=None,
        restore_best_weights=True
    ))

    callbacks.append(tf.keras.callbacks.ModelCheckpoint(
        config.get("default", "checkpoint_path"),
        monitor='val_loss',
        verbose=1,
        save_best_only=True,
        save_weights_only=True,
        mode="min",
        period=config.getint("training", "checkpoint_period"),
    ))
    
    def epoch_begin(epoch, logs):
        print("Resetting seed")
        np.random.seed(1)

    callbacks.append(tf.keras.callbacks.LambdaCallback(on_epoch_begin=epoch_begin))

    return callbacks
Пример #21
0
def load_config(f: TextIO) -> argparse.Namespace:
    """Load the configuration file with correct parameter data types.

    Args:
        f: a config file opened in text mode

    Returns:
        conf: a Namespace object with the loaded settings
    """
    # Interpolation is used e.g. for expanding the log file name
    config = configparser.ConfigParser(
        interpolation=configparser.ExtendedInterpolation()
    )
    config.read_file(f)

    # Read regex as a byte-string
    regex = literal_eval("b'{}'".format(config.get("parser", "regex", raw=True)))
    variables = validate_regex(regex)

    # Load group_by related options
    group = Group.from_config(config.get("parser", "group_by", fallback=None))
    group.validate(variables)

    # Hardcode the filename template, with {group} and {date} to be substituted when
    # writing to disk.
    config["DEFAULT"][
        "filename"
    ] = "${device:station}_${device:name}{group}_{date:%Y-%m-%d_%H-%M-%S}.npz"

    # Flatten the structure and convert the types of the parameters
    conf = dict(
        station=config.get("device", "station"),
        device=config.get("device", "name"),
        host=config.get("device", "host"),
        port=config.getint("device", "port"),
        timeout=config.getint("device", "timeout", fallback=None),
        regex=regex,
        group=group,
        pack_length=config.getint("parser", "pack_length"),
        dest_dir=config.get("parser", "destination"),
        filename=config.get("DEFAULT", "filename"),
        log_level=config.get("logging", "level"),
        log_file=config.get("logging", "file"),
    )

    # Convert the dictionary to a Namespace object, to enable .attribute access
    conf = argparse.Namespace(**conf)

    return conf
Пример #22
0
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    cache_dir = utils.get_cache_dir(config)
    category = utils.get_category(config, cache_dir)
    draw_bbox = utils.visualize.DrawBBox(config, category)
    batch_size = args.rows * args.cols
    paths = [os.path.join(cache_dir, phase + '.pkl') for phase in args.phase]
    dataset = utils.data.Dataset(
        utils.data.load_pickles(paths),
        transform=transform.augmentation.get_transform(config, config.get('transform', 'augmentation').split()),
        shuffle=config.getboolean('data', 'shuffle'),
    )
    logging.info('num_examples=%d' % len(dataset))
    try:
        workers = config.getint('data', 'workers')
    except configparser.NoOptionError:
        workers = multiprocessing.cpu_count()
    collate_fn = utils.data.Collate(
        utils.train.load_sizes(config),
        config.getint('data', 'maintain'),
        resize=transform.parse_transform(config, config.get('transform', 'resize_train')),
        transform_image=transform.get_transform(config, config.get('transform', 'image_train').split()),
    )
    loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=workers, collate_fn=collate_fn)
    for data in loader:
        path, size, image, yx_min, yx_max, cls = (t.numpy() if hasattr(t, 'numpy') else t for t in (data[key] for key in 'path, size, image, yx_min, yx_max, cls'.split(', ')))
        fig, axes = plt.subplots(args.rows, args.cols)
        axes = axes.flat if batch_size > 1 else [axes]
        for ax, path, size, image, yx_min, yx_max, cls in zip(*[axes, path, size, image, yx_min, yx_max, cls]):
            logging.info(path + ': ' + 'x'.join(map(str, size)))
            size = yx_max - yx_min
            target = np.logical_and(*[np.squeeze(a, -1) > 0 for a in np.split(size, size.shape[-1], -1)])
            yx_min, yx_max, cls = (a[target] for a in (yx_min, yx_max, cls))
            image = draw_bbox(image, yx_min.astype(np.int), yx_max.astype(np.int), cls)
            ax.imshow(image)
            ax.set_title('%d objects' % np.sum(target))
            ax.set_xticks([])
            ax.set_yticks([])
        fig.tight_layout()
        mng = plt.get_current_fig_manager()
        mng.resize(*mng.window.maxsize())
        plt.show()
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    cache_dir = utils.get_cache_dir(config)
    category = utils.get_category(config, cache_dir)
    draw_bbox = utils.visualize.DrawBBox(category)
    batch_size = args.rows * args.cols
    paths = [os.path.join(cache_dir, phase + '.pkl') for phase in args.phase]
    dataset = utils.data.Dataset(
        utils.data.load_pickles(paths),
        transform=transform.augmentation.get_transform(config, config.get('transform', 'augmentation').split()),
        shuffle=config.getboolean('data', 'shuffle'),
    )
    logging.info('num_examples=%d' % len(dataset))
    try:
        workers = config.getint('data', 'workers')
    except configparser.NoOptionError:
        workers = multiprocessing.cpu_count()
    collate_fn = utils.data.Collate(
        transform.parse_transform(config, config.get('transform', 'resize_train')),
        utils.train.load_sizes(config),
        maintain=config.getint('data', 'maintain'),
        transform_image=transform.get_transform(config, config.get('transform', 'image_train').split()),
    )
    loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=workers, collate_fn=collate_fn)
    for data in loader:
        path, size, image, yx_min, yx_max, cls = (t.numpy() if hasattr(t, 'numpy') else t for t in (data[key] for key in 'path, size, image, yx_min, yx_max, cls'.split(', ')))
        fig, axes = plt.subplots(args.rows, args.cols)
        axes = axes.flat if batch_size > 1 else [axes]
        for ax, path, size, image, yx_min, yx_max, cls in zip(*[axes, path, size, image, yx_min, yx_max, cls]):
            logging.info(path + ': ' + 'x'.join(map(str, size)))
            size = yx_max - yx_min
            target = np.logical_and(*[np.squeeze(a, -1) > 0 for a in np.split(size, size.shape[-1], -1)])
            yx_min, yx_max, cls = (a[target] for a in (yx_min, yx_max, cls))
            image = draw_bbox(image, yx_min.astype(np.int), yx_max.astype(np.int), cls)
            ax.imshow(image)
            ax.set_title('%d objects' % np.sum(target))
            ax.set_xticks([])
            ax.set_yticks([])
        fig.tight_layout()
        mng = plt.get_current_fig_manager()
        mng.resize(*mng.window.maxsize())
        plt.show()
Пример #24
0
    def drawTime(self, img, mtime, config, section):
        assert len(mtime) == 5, 'Do not support this time format'
        time_font = self.cf.get('header', 'time_font')
        time_size = config.getint(section, 'time_size')
        time_color = self.parseArrStr(config.get(section, 'time_color'), ',')
        time_pos = self.parseArrStr(config.get(section, 'time_pos'), ',')
        time_pos_1 = self.parseArrStr(config.get(section, 'time_pos_1'), ',')
        time_pos_2 = self.parseArrStr(config.get(section, 'time_pos_2'), ',')

        im = Image.open(img)
        draw = ImageDraw.Draw(im)
        ttfont = ImageFont.truetype(time_font, time_size)
        draw.text(time_pos,
                  mtime[0:2],
                  fill=(time_color[0], time_color[1], time_color[2]),
                  font=ttfont)
        draw.text(time_pos_1,
                  mtime[2],
                  fill=(time_color[0], time_color[1], time_color[2]),
                  font=ttfont)
        draw.text(time_pos_2,
                  mtime[3:],
                  fill=(time_color[0], time_color[1], time_color[2]),
                  font=ttfont)

        im.save('tmp_img/tmp.png')
        return cv2.imread('tmp_img/tmp.png')
Пример #25
0
def aprs_worker(config, sock):
    """
    Obtains telemetry with infinite loop, forwards to APRS-IS server

    :param config: Configuration file descriptor from aprs.INI
    :param sock: Internet socket
    :return: None
    """
    logger.debug('Starting aprs_worker thread')
    rate = config.getint("APRSIS", "RATE")

    # Local variable initialization
    telemSequence = 0

    # Start infinite loop to send station data to APRS-IS
    while True:
        # Query telemetry database for station data
        stations = getStations()
        stationData = getStationData(stations)

        # Indicate number of stations tracking
        str = "Tracking {0} Faraday stations..."
        logger.info(str.format(len(stations)))

        # Iterate through all stations sending telemetry and position data
        sendPositions(stationData, sock)
        telemSequence = sendtelemetry(stationData, telemSequence, sock)
        sendTelemLabels(stationData, sock)
        sendParameters(stationData, sock)
        sendEquations(stationData, sock)

        # Sleep for intended update rate (seconds)
        sleep(rate)
Пример #26
0
def configGetInt(optName):
    try:
        value = config.getint("solo", optName)
        return value
    except ConfigParser.NoOptionError:
        logger.error("%s not found in %s", optName, configFileName)
        raise # pass it on up
def main():
    # 環境設定(ディスプレイの出力先をlocalhostにする)
    os.environ['DISPLAY'] = ':0'

    # コマンド引数確認
    if len(sys.argv) < 2:
        print('使用法: python deep_regression_train.py 保存ファイル名.h5')
        sys.exit()

    # 探索試行回数を設定
    n_trials = config.getint('Trials', 'trials')

    # 最適化探索(optunaのstudyオブジェクト定義)
    study = optuna.create_study(sampler=optuna.samplers.TPESampler())
    # optimizeに最適化すべき目的関数(objective)を渡す。これをn_trials回試行する。目的関数の値が最小のものを探索する。
    study.optimize(outer_objective(), n_trials)

    # 最適だった試行回を表示
    logger.info('best_trial.number: ' + 'trial#' +
                str(study.best_trial.number))
    # 目的関数の最適(最小)値を表示
    logger.info('best_vmae: ' + str(study.best_value))

    # ハイパーパラメータをソートして表示
    logger.info('--- best hyperparameter ---')
    sorted_best_params = sorted(study.best_params.items(), key=lambda x: x[0])
    for i, k in sorted_best_params:
        logger.info(i + ' : ' + str(k))
    logger.info('------------')
Пример #28
0
def parse_config_file(filename):
    config = ConfigParser.ConfigParser()
    config.read(filename)

    try:
        config.items('loggers')
        # We have at least the loggers section so we can set logging config
        logging.config.fileConfig(filename)
    except ConfigParser.NoSectionError:
        log.info('No section loggers in %s' % filename)

    try:
        items = dict(config.items('sqla_taskq')).keys()
    except ConfigParser.NoSectionError:
        log.info('No section sqla_taskq in %s' % filename)
        return None

    dic = {}
    if 'sqla_url' in items:
        dic['sqla_url'] = config.get('sqla_taskq', 'sqla_url')

    if 'kill' in items:
        dic['kill'] = config.getboolean('sqla_taskq', 'kill')
    else:
        dic['kill'] = False

    if 'timeout' in items:
        dic['timeout'] = config.getint('sqla_taskq', 'timeout')
    else:
        dic['timeout'] = 60

    return dic
Пример #29
0
def parse_config_file(filename):
    config = ConfigParser.ConfigParser()
    config.read(filename)

    try:
        config.items('loggers')
        # We have at least the loggers section so we can set logging config
        logging.config.fileConfig(filename)
    except ConfigParser.NoSectionError:
        log.info('No section loggers in %s' % filename)

    try:
        items = dict(config.items('sqla_taskq')).keys()
    except ConfigParser.NoSectionError:
        log.info('No section sqla_taskq in %s' % filename)
        return None

    dic = {}
    if 'sqla_url' in items:
        dic['sqla_url'] = config.get('sqla_taskq', 'sqla_url')

    if 'kill' in items:
        dic['kill'] = config.getboolean('sqla_taskq', 'kill')
    else:
        dic['kill'] = False

    if 'timeout' in items:
        dic['timeout'] = config.getint('sqla_taskq', 'timeout')
    else:
        dic['timeout'] = 60

    return dic
Пример #30
0
    def test_journal_001(self):

        test_domain = self.test_domain

        test_path = config.get(test_domain, 'check_path1')
        test_ts = config.getint(test_domain, 'check_ts11')
        test_wlid = config.get(test_domain, 'check_uid11')

        workdir = get_working_dir()
        maschine = MaschineLSH(config, self.journal, outdir=workdir)

        self.assertEqual(test_domain, self.journal.domain)
        journal_entry = maschine.journal.get_entry(test_path, test_ts)
        self.assertEqual(test_wlid, journal_entry['warc_entry'].wlid)

        cross = maschine.get_cross(journal_entry)
        self.assertEqual(
            config.get(test_domain, 'check_cross11'),
            cross.get_unique_id()
        )

        self.assertEqual((1, 1, 1, 1, 1), cross.to_bits())
        self.assertEqual(test_wlid, cross.cur['warc_entry'].wlid)

        diffmethod = DBCEMethodLSH(config, cross)
Пример #31
0
    def run(self):
        logger.debug("initializing spool")
        config = self.config_parser

        self.spool = spool = Spooler(config)

        num_gather_workers = None
        if config.has_option('core', 'gather_workers'):
            num_gather_workers = abs(config.getint('core', 'gather_workers'))

        if not num_gather_workers:
            num_gather_workers = GATHER_POOL_WORKERS

        self._gather_pool = pool = Pool(num_gather_workers)

        persist_queue = self.persist_queue

        self.collect_manager = CollectPlugins(
            base_class=Collect,
            config=config,
            init_args=(config, persist_queue, spool, pool),
            entry_points='arke_plugins',
        )

        self.collect_manager.load(pool=self._gather_pool)
        try:
            self.persist_runner()
        except KeyboardInterrupt:
            pass

        self.shutdown()
Пример #32
0
def watchDirectories():
    """Start watching the watched directories defined in the WatchedDirectories table in the database."""
    watched_dir_path = config.get('MCPServer', "watchDirectoryPath")
    interval = config.getint('MCPServer', "watchDirectoriesPollInterval")

    watched_directories = WatchedDirectory.objects.all()

    for watched_directory in watched_directories:
        directory = watched_directory.watched_directory_path.replace("%watchDirectoryPath%", watched_dir_path, 1)

        # Tuple of variables that may be used by a callback
        row = (watched_directory.watched_directory_path, watched_directory.chain_id, watched_directory.only_act_on_directories, watched_directory.expected_type.description)

        if not os.path.isdir(directory):
            os.makedirs(directory)
        for item in os.listdir(directory):
            if item == ".gitignore":
                continue
            item = item.decode("utf-8")
            path = os.path.join(unicode(directory), item)
            while(limitTaskThreads <= threading.activeCount() + reservedAsTaskProcessingThreads ):
                time.sleep(1)
            createUnitAndJobChainThreaded(path, row, terminate=False)
        actOnFiles=True
        if watched_directory.only_act_on_directories:
            actOnFiles=False
        watchDirectory.archivematicaWatchDirectory(
            directory,
            variablesAdded=row,
            callBackFunctionAdded=createUnitAndJobChainThreaded,
            alertOnFiles=actOnFiles,
            interval=interval,
        )
Пример #33
0
    def register_schedule(self):
        channel = config.get('irc', 'channel')

        for schedule in config.getlist('irc', 'schedules'):
            sect = ':'.join(('schedule', schedule))

            # do not evaluate isenabled() here.
            # if it does, the disabled action will never be scheduled.
            if not config.has_section(sect):
                logging.error('[schedule] [%s] no such schedule', sect)
                continue

            if not config.has_option(sect, 'action'):
                logging.error('[schedule] [%s] no action specified', sect)
                continue

            action = ':'.join(('action', config.get(sect, 'action')))
            if not config.has_section(action):
                logging.error('[schedule] [%s] invalid action specified', sect)
                continue

            interval = config.getint(sect, 'interval')
            if interval < 60:
                logging.error('[schedule] [%s] interval too short', sect)
                continue

            self.reactor.register_schedule(interval, self.do_action,
                                           action, self.connection,
                                           None, {'target': channel}, sect)
            logging.info('[schedule] [%s] registered', sect)
Пример #34
0
def main():
    config = ConfigParser.ConfigParser()
    config.read(os.path.join(path, 'config.ini'))
    userIDs = config.get('CTP', 'userIDs').split(',')

    client = MongoClient(
        config.get('mongodb', 'host'),
        config.getint('mongodb', 'port'),
    )

    gateWays = []
    for userID in userIDs:
        ctpGateway = CtpGateway(config, 'CTP', userID, client)
        gateWays.append(ctpGateway)
        ctpGateway.run()

    stoped = Event()

    def shutdownFunction(signalnum, frame):
        logging.info(u'系统即将关闭')
        for g in gateWays:
            g.close()

        if not stoped.isSet():
            stoped.set()

    for sig in [signal.SIGINT, signal.SIGHUP, signal.SIGTERM]:
        signal.signal(sig, shutdownFunction)

    while not stoped.wait(1):
        pass

    logging.info(u'系统完全关闭')
Пример #35
0
def examples(config, concept, positives, vocab, neg_count=config.getint('sample','neg_count')):
    """
    Builds positive and negative examples.
    """
    while True:
        for (chosen_idx, idces), e_token_indices in positives:          
            if len(chosen_idx) ==1:
                # FIXME: only taking into account those that have exactly one gold concept
                c_token_indices = concept.vectorize[chosen_idx[0]]
            
                negative_token_indices = [concept.vectorize[i] for i in random.sample(list(set([*range(len(concept.names))])-set(idces)),neg_count)]

                entity_inputs = np.tile(pad_sequences([e_token_indices], padding='post', maxlen=config.getint('embedding','length')), (len(negative_token_indices)+1, 1)) # Repeat the same entity for all concepts
                concept_inputs = pad_sequences([c_token_indices]+negative_token_indices, padding='post', maxlen=config.getint('embedding','length'))
                # concept_inputs = np.asarray([[concept_dict[cid]] for cid in [concept_id]+negative_concepts])
                # import pdb; pdb.set_trace()
                distances = [1] + [0]*len(negative_token_indices)

                data = {
                    'inp_mentions': entity_inputs,
                    'inp_candidates': concept_inputs,
                    'prediction_layer': np.asarray(distances),
                }
            
                yield data, data
Пример #36
0
 def __init__(self, config):
     self.config = config
     self.workStatus = WorkStatus()
     try:
         self.id = config.getint('workder', 'id')
     except:
         self.id = random.randint(0, 10000000)
Пример #37
0
def __setup_advanced_logging() -> None:
    """Sets up advanced logging over mail and Discord
    """
    if config.getboolean("logging", "enable_mail_logging"):
        mailcfg = dict(config.items("mail_logging"))
        mailhost = (mailcfg["mailhost"], mailcfg["mailport"])
        toaddrs = mailcfg["toaddrs"].split(",")
        credentials = (mailcfg["username"], mailcfg["password"])
        eh = SMTPHandler(mailhost=mailhost,
                         fromaddr=mailcfg["fromaddr"],
                         toaddrs=toaddrs,
                         subject=mailcfg["subject"],
                         credentials=credentials,
                         secure=(),
                         timeout=config.getint("mail_logging",
                                               "timeout"))
        eh.setFormatter(formatter)
        eh.setLevel(logging.WARNING)
        logger.addHandler(eh)

    if config.getboolean("logging", "enable_discord_logging"):
        avatar_url = config["discord_logging"]["avatar_url"]
        avatar_url = avatar_url if avatar_url else None
        dh = DiscordHandler(config["discord_logging"]["username"],
                            config["discord_logging"]["webhook_url"],
                            avatar_url)
        dh.setFormatter(formatter)
        dh.setLevel(logging.WARNING)
        logger.addHandler(dh)
Пример #38
0
 def __init__(self, args, config):
     self.args = args
     self.config = config
     self.model_dir = utils.get_model_dir(config)
     self.cache_dir = utils.get_cache_dir(config)
     self.category = utils.get_category(config, self.cache_dir)
     self.anchors = torch.from_numpy(utils.get_anchors(config)).contiguous()
     logging.info('use cache directory ' + self.cache_dir)
     logging.info('tensorboard --logdir ' + self.model_dir)
     if args.delete:
         logging.warning('delete model directory: ' + self.model_dir)
         shutil.rmtree(self.model_dir, ignore_errors=True)
     os.makedirs(self.model_dir, exist_ok=True)
     with open(self.model_dir + '.ini', 'w') as f:
         config.write(f)
     self.saver = utils.train.Saver(self.model_dir,
                                    config.getint('save', 'keep'))
     self.timer_save = utils.train.Timer(config.getfloat('save', 'secs'),
                                         False)
     try:
         self.timer_eval = utils.train.Timer(
             eval(config.get('eval', 'secs')),
             config.getboolean('eval', 'first'))
     except configparser.NoOptionError:
         self.timer_eval = lambda: False
     self.summary_worker = SummaryWorker(self)
     self.summary_worker.start()
Пример #39
0
def launch(hardware):
    from hedgehog.server.hardware.simulated import SimulatedHardwareAdapter
    simulator = hardware == SimulatedHardwareAdapter

    args = parse_args(simulator)

    if args.logging_conf:
        logging.config.fileConfig(args.logging_conf)

    if simulator and args.simulate_sensors:
        _hardware = hardware

        def hardware(*args, **kwargs):
            return _hardware(*args, simulate_sensors=True, **kwargs)

    config = configparser.ConfigParser()
    config.read(args.config_file)

    if args.scan_config and os.path.isfile(args.scan_config_file):
        scan_config = configparser.ConfigParser()
        scan_config.read(args.scan_config_file)

        apply_scan_config(config, scan_config)

        with open(args.config_file, mode='w') as f:
            config.write(f)

    port = args.port or config.getint('default', 'port', fallback=0)

    with suppress(KeyboardInterrupt):
        start(hardware, port)
Пример #40
0
def ConnectMysql():
    # 读取配置信息
    config = configparser.ConfigParser()
    curPath = os.getcwd()
    parent_path = os.path.abspath(os.path.dirname(curPath) + os.path.sep + ".")
    #print(parent_path)

    config.read(parent_path + '/scripts/param.ini')
    host_id = config.get("wlblazers", "host")
    port = config.getint("wlblazers", "port")
    username = config.get("wlblazers", "user")
    password = config.get("wlblazers", "passwd")
    dbname = config.get("wlblazers", "db")

    # 打开数据库连接
    try:
        return pymysql.connect(host=host_id,
                               user=username,
                               passwd=password,
                               port=port,
                               db=dbname,
                               charset='utf8')
    except Exception as e:
        logger.error("Connect to wlblazers error: " + str(e))
        sys.exit(2)
Пример #41
0
    def persist_runner(self):
        config = self.config_parser
        logger.debug("initializing backend %s" % config.get('core', 'persist_backend'))
        persist_backend = getattr(persist, '%s_backend' %
                config.get('core', 'persist_backend'))

        persist_backend = persist_backend(config)

        spool = self.spool

        num_persist_workers = None
        if config.has_option('core', 'persist_workers'):
            num_persist_workers = abs(config.getint('core', 'persist_workers'))

        if not num_persist_workers:
            num_persist_workers = PERSIST_POOL_WORKERS

        self.persist_pool = pool = Pool(num_persist_workers)

        while 1:
            spool_file = None
            if self.stop_now:
                break
            pool.wait_available()
            try:
                spool_file = spool.get(5)
            except Empty:
                sleep(1)
                continue

            pool.spawn(self.persist_data, spool_file, persist_backend)

        pool.join()
Пример #42
0
    def run(self):
        logger.debug("initializing spool")
        config = self.config_parser

        self.spool = spool = Spooler(config)

        num_gather_workers = None
        if config.has_option('core', 'gather_workers'):
            num_gather_workers = abs(config.getint('core', 'gather_workers'))

        if not num_gather_workers:
            num_gather_workers = GATHER_POOL_WORKERS

        self._gather_pool = pool = Pool(num_gather_workers)

        persist_queue = self.persist_queue

        self.collect_manager = CollectPlugins(base_class=Collect,
                                              config=config,
                                              init_args=(config, persist_queue, spool, pool),
                                             entry_points='arke_plugins',
                                            )

        self.collect_manager.load(pool=self._gather_pool)
        try:
            self.persist_runner()
        except KeyboardInterrupt:
            pass

        self.shutdown()
Пример #43
0
    def persist_runner(self):
        config = self.config_parser
        logger.debug("initializing backend %s" %
                     config.get('core', 'persist_backend'))
        persist_backend = getattr(
            persist, '%s_backend' % config.get('core', 'persist_backend'))

        persist_backend = persist_backend(config)

        spool = self.spool

        num_persist_workers = None
        if config.has_option('core', 'persist_workers'):
            num_persist_workers = abs(config.getint('core', 'persist_workers'))

        if not num_persist_workers:
            num_persist_workers = PERSIST_POOL_WORKERS

        self.persist_pool = pool = Pool(num_persist_workers)

        while 1:
            spool_file = None
            if self.stop_now:
                break
            pool.wait_available()
            try:
                spool_file = spool.get(5)
            except Empty:
                sleep(1)
                continue

            pool.spawn(self.persist_data, spool_file, persist_backend)

        pool.join()
Пример #44
0
def main():
    config = create_config()

    # peer manager
    peer_manager = PeerManager(config=config)

    # chain manager
    chain_manager = ChainManager(config=config)

    # start tcp server
    try:
        tcp_server = TcpServer(peer_manager,
                               config.get('network', 'listen_host'),
                               config.getint('network', 'listen_port'))
    except IOError as e:
        logger.error("Could not start TCP server: \"{0}\"".format(str(e)))
        sys.exit(1)

    peer_manager.local_address = (tcp_server.ip, tcp_server.port)
    tcp_server.start()
    peer_manager.start()
    chain_manager.start()

    # handle termination signals
    def signal_handler(signum=None, frame=None):
        logger.info('Signal handler called with signal {0}'.format(signum))
        peer_manager.stop()
        chain_manager.stop()
        # tcp_server checks for peer_manager.stopped()
    for sig in [signal.SIGTERM, signal.SIGHUP, signal.SIGQUIT, signal.SIGINT]:
        signal.signal(sig, signal_handler)

    # connect peer
    if config.get('network', 'remote_host'):
        peer_manager.connect_peer(
            config.get('network', 'remote_host'),
            config.getint('network', 'remote_port'))

    # loop
    while not peer_manager.stopped():
        time.sleep(0.1)
        if len(peer_manager.get_connected_peer_addresses()) > 2:
            chain_manager.bootstrap_blockchain()

    logger.info('exiting')
    # tcp_server.join() # does not work!
    peer_manager.join()
Пример #45
0
def main():
    config = create_config()

    # chain manager
    chain_manager = ChainManager(config=config)

    # peer manager
    peer_manager = PeerManager(config=config)

    # start tcp server
    try:
        tcp_server = TcpServer(peer_manager,
                               config.get('network', 'listen_host'),
                               config.getint('network', 'listen_port'))
    except IOError as e:
        logger.error("Could not start TCP server: \"{0}\"".format(str(e)))
        sys.exit(1)

    peer_manager.local_address = (tcp_server.ip, tcp_server.port)
    tcp_server.start()
    peer_manager.start()
    chain_manager.start()

    # handle termination signals
    def signal_handler(signum=None, frame=None):
        logger.info('Signal handler called with signal {0}'.format(signum))
        peer_manager.stop()
        chain_manager.stop()
        # tcp_server checks for peer_manager.stopped()

    for sig in [signal.SIGTERM, signal.SIGHUP, signal.SIGQUIT, signal.SIGINT]:
        signal.signal(sig, signal_handler)

    # connect peer
    if config.get('network', 'remote_host'):
        peer_manager.connect_peer(config.get('network', 'remote_host'),
                                  config.getint('network', 'remote_port'))

    # loop
    while not peer_manager.stopped():
        time.sleep(0.1)
        if len(peer_manager.get_connected_peer_addresses()) > 2:
            chain_manager.bootstrap_blockchain()

    logger.info('exiting')
    # tcp_server.join() # does not work!
    peer_manager.join()
Пример #46
0
def _main(config):
    setup_logging(config)

    fp = config.get('ircbot', 'channel_config')
    if fp:
        fp = os.path.expanduser(fp)
        if not os.path.exists(fp):
            raise Exception("Unable to read layout config file at %s" % fp)
    else:
        raise Exception("Channel Config must be specified in config file.")

    try:
        channel_config = ChannelConfig(yaml.load(open(fp)))
    except Exception:
        log = logging.getLogger('gerritbot')
        log.exception("Syntax error in chanel config file")
        raise

    bot = GerritBot(channel_config.channels,
                    config.get('ircbot', 'nick'),
                    config.get('ircbot', 'pass'),
                    config.get('ircbot', 'server'),
                    config.getint('ircbot', 'port'),
                    config.getboolean('ircbot', 'force_ssl'),
                    config.get('ircbot', 'server_password'))
    if config.has_option('ircbot', 'use_mqtt'):
        use_mqtt = config.getboolean('ircbot', 'use_mqtt')
    else:
        use_mqtt = False

    if use_mqtt:
        g = GerritMQTT(bot,
                       channel_config,
                       config.get('mqtt', 'host'),
                       config.get('mqtt', 'base_topic'),
                       config.getint('mqtt', 'port'),
                       config.getboolean('mqtt', 'websocket'))
    else:
        g = Gerrit(bot,
                   channel_config,
                   config.get('gerrit', 'host'),
                   config.get('gerrit', 'user'),
                   config.getint('gerrit', 'port'),
                   config.get('gerrit', 'key'))
    g.start()
    bot.start()
Пример #47
0
 def create(cls, name, config):
     try:
         url = config.get('url', fallback='http://localhost:8080/jsonrpc')
         timeout = config.getint('timeout', fallback=5)
         return cls(name, url, timeout)
     except ValueError as error:
         raise ConfigurationError(
             'Url or timeout configuration wrong: {}'.format(error))
Пример #48
0
def main():
    if ( len(sys.argv) < 2 ):
        usage()
        exit(1)
    config = nfe204Projet.loadConfig(sys.argv[1])
    nfe204Projet.initLogging(config)
    
    logger = logging.getLogger(__name__)
    logger.info("Execution du script avec le fichier de configuration %s",sys.argv[1])
    
    conn = r.connect(host=config.get(nfe204Projet.SECTION_RETHINKDB, nfe204Projet.OPT_HOST),
                     port=config.getint(nfe204Projet.SECTION_RETHINKDB, nfe204Projet.OPT_PORT),
                     db=config.get(nfe204Projet.SECTION_RETHINKDB, nfe204Projet.OPT_DB),
                     auth_key=config.get(nfe204Projet.SECTION_RETHINKDB, nfe204Projet.OPT_PWD),
                     timeout=config.getint(nfe204Projet.SECTION_RETHINKDB, nfe204Projet.OPT_TIMEOUT),
                     ).repl()
    query(logger)
    conn.close(noreply_wait=False)
Пример #49
0
def read_model_configs(config):
    """[MODEL]."""
    CONFIGS['model_dir'] = config.get(
        'model_dir',
        fallback=op.join(CONFIGS['unique_temp_dir'], 'model')
    )
    CONFIGS['tcoffee_dir'] = op.join(CONFIGS['model_dir'], 'tcoffee')

    # Modeller
    CONFIGS['modeller_dir'] = op.join(CONFIGS['model_dir'], 'modeller')
    CONFIGS['modeller_runs'] = config.getint('modeller_runs', 1)

    # FoldX
    CONFIGS['foldx_water'] = config.get('foldx_water', '-IGNORE')
    CONFIGS['foldx_num_of_runs'] = config.getint('foldx_num_of_runs', 1)
    CONFIGS['matrix_type'] = config.get('matrix_type', 'blosum80')
    CONFIGS['gap_start'] = config.getint('gap_start', -16)
    CONFIGS['gap_extend'] = config.getint('gap_extend', -4)
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    cache_dir = utils.get_cache_dir(config)
    model_dir = utils.get_model_dir(config)
    category = utils.get_category(config, cache_dir if os.path.exists(cache_dir) else None)
    anchors = utils.get_anchors(config)
    anchors = torch.from_numpy(anchors).contiguous()
    dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(config), anchors, len(category))
    dnn.eval()
    logging.info(humanize.naturalsize(sum(var.cpu().numpy().nbytes for var in dnn.state_dict().values())))
    state_dict = dnn.state_dict()
    grouped_dict = group_state(state_dict)
    try:
        layers = []
        with open(os.path.expanduser(os.path.expandvars(args.file)), 'rb') as f:
            major, minor, revision, seen = struct.unpack('4i', f.read(16))
            logging.info('major=%d, minor=%d, revision=%d, seen=%d' % (major, minor, revision, seen))
            total = 0
            filesize = os.fstat(f.fileno()).st_size
            for layer in grouped_dict:
                group = grouped_dict[layer]
                for suffix in ['conv.bias', 'bn.bias', 'bn.weight', 'bn.running_mean', 'bn.running_var', 'conv.weight']:
                    if suffix in group:
                        var = group[suffix]
                        size = var.size()
                        cnt = np.multiply.reduce(size)
                        total += cnt
                        key = layer + '.' + suffix
                        val = np.array(struct.unpack('%df' % cnt, f.read(cnt * 4)), np.float32)
                        val = np.reshape(val, size)
                        remaining = filesize - f.tell()
                        logging.info('%s.%s: %s=%f (%s), remaining=%d' % (layer, suffix, 'x'.join(list(map(str, size))), utils.abs_mean(val), hashlib.md5(val.tostring()).hexdigest(), remaining))
                        layers.append([key, torch.from_numpy(val)])
                logging.info('%d parameters assigned' % total)
        layers[-1][1] = transpose_weight(layers[-1][1], len(anchors))
        layers[-2][1] = transpose_bias(layers[-2][1], len(anchors))
    finally:
        if remaining > 0:
            logging.warning('%d bytes remaining' % remaining)
        state_dict = collections.OrderedDict(layers)
        if args.delete:
            logging.warning('delete model directory: ' + model_dir)
            shutil.rmtree(model_dir, ignore_errors=True)
        saver = utils.train.Saver(model_dir, config.getint('save', 'keep'), logger=None)
        path = saver(state_dict, 0, 0) + saver.ext
        if args.copy is not None:
            _path = os.path.expandvars(os.path.expanduser(args.copy))
            logging.info('copy %s to %s' % (path, _path))
            shutil.copy(path, _path)
Пример #51
0
def main():
    def setlogger(conf_file=None):
        if conf_file:
            return logging.config.fileConfig(conf_file)

        logging.basicConfig(level=logging.INFO,
                            format='%(asctime)s %(levelname)s %(message)s',
                            datefmt='%Y/%m/%d %H:%M:%S')

    def loadmodules(path=None):
        from . import mmhandler    # load default modules

        if path:
            mmplugin.load(path)

        for name, action in mmplugin.iteractions():
            logging.info('[plugin] [action] [%s] <%s.%s> loaded',
                         name, action.__module__, action.__name__)

    def getsslctx():
        crt = config.get('outgoing', 'ssl_crt')
        key = config.get('outgoing', 'ssl_key')
        return (util.abspath(crt), util.abspath(key)) if crt and key else None

    def parseargs():
        ap = argparse.ArgumentParser()
        ap.add_argument('-l', '--logging-config', type=util.abspath,
                        help='configuration file for the logging')
        ap.add_argument('config', type=util.abspath,
                        help='configuration file for the Mattermost client')
        return ap.parse_args()

    args = parseargs()

    config.read(args.config)
    setlogger(args.logging_config)
    loadmodules(config.get('plugin', 'path'))

    sslctx = getsslctx()

    app = flask.Flask(__name__)
    app.config['JSON_AS_ASCII'] = False
    app.register_blueprint(bp, url_prefix=config.get('outgoing', 'path'))

    if config.getboolean('mm', 'daemon'):
        util.daemonize()

    sched = Scheduler()
    sched.start()

    app.run(host=config.get('outgoing', 'host'),
            port=config.getint('outgoing', 'port'),
            debug=config.getboolean('mm', 'debug'),
            ssl_context=sslctx, use_reloader=False, threaded=True)
Пример #52
0
def mirror(config):
    # Always reference those classes here with the fully qualified name to
    # allow them being patched by mock libraries!
    master = bandersnatch.master.Master(
        config.get('mirror', 'master'),
        float(config.get('mirror', 'timeout')))
    mirror = bandersnatch.mirror.Mirror(
        config.get('mirror', 'directory'), master,
        stop_on_error=config.getboolean('mirror', 'stop-on-error'),
        workers=config.getint('mirror', 'workers'),
        delete_packages=config.getboolean('mirror', 'delete-packages'))
    mirror.synchronize()
Пример #53
0
    def guess_galaxy_port(self):
        # Code derived from IPython work ie.mako
        config = ConfigParser.SafeConfigParser({'port': '8080'})
        if self.config_file:
            config.read( self.config_file )

        try:
            port = config.getint('server:%s' % self.server_name, 'port')
        except:
            # uWSGI galaxy installations don't use paster and only speak uWSGI not http
            port = None
        return port
Пример #54
0
 def create(cls, name, config):
     try:
         return cls(name, config.getint('timeout', fallback=600),
                    re.compile(config.get('ignore_if_process',
                                          fallback=r'a^')),
                    re.compile(config.get('ignore_users',
                                          fallback=r'^a')))
     except re.error as error:
         raise ConfigurationError(
             'Regular expression is invalid: {}'.format(error))
     except ValueError as error:
         raise ConfigurationError(
             'Unable to parse timeout as int: {}'.format(error))
Пример #55
0
    def __init__(self, config_path):
        # Setup config with defaults.
        config = configparser.ConfigParser()
        config['myapp'] = {}
        config['myapp']['fullscreen'] = 'false'
        config['myapp']['timeout'] = '60'

        # Update config from file.
        with open(config_path, 'r', encoding='utf-8') as config_file:
            config.read_file(config_file)

        self.connection = config.get('myapp', 'connection')
        self.is_fullscreen = config.getboolean('myapp', 'fullscreen')
        self.timeout = config.getint('myapp', 'timeout')
Пример #56
0
def main():
	parser = argparse.ArgumentParser(description="Bank webapp init")
	parser.add_argument('--config', dest='config', required=True)
	args = parser.parse_args()
	if not args.config:
		parser.print_help()
		sys.exit(1)

	config = parse_config(args.config)
	if not config.has_option('bankwebapp', 'port'):
		print('port is required')
		sys.exit(1)

	#logging.basicConfig(filename=config.get('bankwebapp', 'logpath'), level=logging.DEBUG)
	logging.config.fileConfig(args.config, disable_existing_loggers=0)
	logging.getLogger('tornado').setLevel(config.getint('bankwebapp', 'logging'))
	logging.getLogger('webserver').info('<!> bank webapp being initialized...')
	'''INIT SERVER'''
	main_loop = tornado.ioloop.IOLoop.instance()
	application = WebApplication(config=config, main_loop=main_loop)
	application.listen(config.getint('bankwebapp','port'))
	logging.getLogger('webserver').info('<!> bank webapp initialized (version = %s)' % ('0.0.1'))
	logging.info("hello")
	main_loop.start()
Пример #57
0
    def __init__(self, srcf, destf, config):
        """Get an NNTPSucka with two NNTPClient objects representing the
        source and destination."""
        self.log=logging.getLogger("NNTPSucka")
        self.src=srcf()
        self.dest=destf()

        self.reqQueue = Queue.Queue(1000)
        self.doneQueue = Queue.Queue(1000)

        # Figure out the maximum number of articles per group
        self.maxArticles=config.getint("misc", "maxArticles")
        self.log.debug("Max articles is configured as %d" %(self.maxArticles))

        # NewsDB setup
        self.db=NewsDB(config.get("misc","newsdb"))
        self.db.setShouldMarkArticles(config.getboolean("misc",
            "shouldMarkArticles"))

        # Initialize stats
        self.stats=Stats()

        self.workers = [Worker(srcf, destf, self.reqQueue, self.doneQueue)
                        for x in range(config.getint("misc", "workers"))]
Пример #58
0
def hueSendSeries(light, config):
	steps = config.getint('steps')
	for i in range(0, steps):
		logger.debug('Sending step %d', i)
		light.hue = config.getint('hue' + str(i))
		light.sat = config.getint('sat' + str(i))
		light.transitiontime = config.getint('transitionTime' + str(i))
		light.brightness = config.getint('brightness' + str(i))
		light.on = True
		time.sleep(config.getint('transitionTime' + str(i)) / 10.0)
Пример #59
0
def parse_options(config, section_name, config_class):
	options = config.options(section_name)
	for option in options:
		option = option.upper()
		if option in config_class.__dict__ and not option.startswith("__"):
			if isinstance(config_class.__dict__[option], bool):
				config_class.__dict__[option] = config.getboolean(section_name, option)
			elif isinstance(config_class.__dict__[option], float):
				config_class.__dict__[option] = config.getfloat(section_name, option)
			elif isinstance(config_class.__dict__[option], int):
				config_class.__dict__[option] = config.getint(section_name, option)
			else:
				config_class.__dict__[option] = config.get(section_name, option)
		else:
			logger = logging.getLogger('InfrastructureManager')
			logger.warn("Unknown option in the IM config file. Ignoring it: " + option)