Exemplo n.º 1
0
async def async_main(args: argparse.Namespace, config: ConfigParser) -> int:
    if args.op.lower() == "delete":
        async with bandersnatch.master.Master(
                config.get("mirror", "master"),
                config.getfloat("mirror", "timeout"),
                config.getfloat("mirror", "global-timeout", fallback=None),
        ) as master:
            return await bandersnatch.delete.delete_packages(
                config, args, master)
    elif args.op.lower() == "verify":
        return await bandersnatch.verify.metadata_verify(config, args)
    elif args.op.lower() == "sync":
        return await bandersnatch.mirror.mirror(config, args.packages)

    if args.force_check:
        storage_plugin = next(iter(storage_backend_plugins()))
        status_file = (
            storage_plugin.PATH_BACKEND(config.get("mirror", "directory")) /
            "status")
        if status_file.exists():
            tmp_status_file = Path(gettempdir()) / "status"
            try:
                shutil.move(str(status_file), tmp_status_file)
                logger.debug(
                    "Force bandersnatch to check everything against the master PyPI"
                    + f" - status file moved to {tmp_status_file}")
            except OSError as e:
                logger.error(f"Could not move status file ({status_file} to " +
                             f" {tmp_status_file}): {e}")
        else:
            logger.info(
                f"No status file to move ({status_file}) - Full sync will occur"
            )

    return await bandersnatch.mirror.mirror(config)
Exemplo n.º 2
0
def main():
    args = parser_arguments()
    configure_logging(args.logging)
    config = parse_config(args.config_file)
    set_up_checks(config)
    loop(config.getfloat('general', 'interval', fallback=60),
         config.getfloat('general', 'idle_time', fallback=300),
         functools.partial(execute_suspend, config.get('general',
                                                       'suspend_cmd')),
         all_checks=args.all_checks)
Exemplo n.º 3
0
def main():
    args = parser_arguments()
    configure_logging(args.logging)
    config = parse_config(args.config_file)
    set_up_checks(config)
    loop(config.getfloat('general', 'interval', fallback=60),
         config.getfloat('general', 'idle_time', fallback=300),
         functools.partial(execute_suspend,
                           config.get('general', 'suspend_cmd')),
         all_checks=args.all_checks)
Exemplo n.º 4
0
    def updateHeader(self, img, img_header_path, time, battery, network,
                     config, section):
        """set time and network. Time looks like 14:01. network is 3G, 4G and wifi"""
        if len(time) < 5:
            return False, None
        if battery > config.getfloat(
                section, 'capacity_max') or battery < config.getfloat(
                    section, 'capacity_min'):
            return False, None

        # Set default header firstly
        if "" != img_header_path:
            header_width, header_height = self.getImgWH(img_header_path)
            img[0:header_height, 0:header_width] = cv2.imread(img_header_path)

        # Set battery
        capacity_pos = self.parseArrStr(config.get(section, 'capacity_pos'),
                                        ',')
        battery_pos = self.parseArrStr(config.get(section, 'battery_pos'), ',')
        b_w, b_h = self.getImgWH(config.get(section, 'img_battery_full'))
        if battery > 0.9:
            img[battery_pos[1]:battery_pos[1]+b_h, battery_pos[0]:battery_pos[0]+b_w] = \
                cv2.imread(config.get(section, 'img_battery_full'))
        else:
            img[battery_pos[1]:battery_pos[1] + b_h, battery_pos[0]:battery_pos[0] + b_w] = \
                cv2.imread(config.get(section, 'img_battery'))
            capacity_width, capacity_height = self.getImgWH(
                config.get(section, 'img_capacity'))
            capacity_setting_width = int(capacity_width * battery)
            img_capacity = cv2.imread(config.get(section, 'img_capacity'))
            img_bc = cv2.resize(img_capacity,
                                (capacity_setting_width, capacity_height))
            img[capacity_pos[1]:capacity_pos[1] + capacity_height,
                capacity_pos[0]:capacity_pos[0] +
                capacity_setting_width] = img_bc

        # Set time
        num_size = self.getImgWH(config.get(section, 'img_0'))
        num_pos = self.parseArrStr(config.get(section, 'num_pos'), ',')
        time_x = num_pos[0]
        for i in range(0, len(time)):
            if ':' == time[i]:
                colon_width, _ = self.getImgWH(config.get(
                    section, 'img_colon'))
                img[num_pos[1]:num_pos[1]+num_size[1], time_x:time_x+colon_width] = \
                    cv2.imread(config.get(section, 'img_colon'))
                time_x += colon_width
            else:
                img[num_pos[1]:num_pos[1]+num_size[1], time_x:time_x+num_size[0]] = \
                    cv2.imread(config.get(section, 'img_'+time[i]))
                time_x += num_size[0]

        return True, img
Exemplo n.º 5
0
def filter_visible(config, iou, yx_min, yx_max, prob):
    prob_cls, cls = torch.max(prob, -1)
    if config.getboolean('detect', 'fix'):
        mask = (iou * prob_cls) > config.getfloat('detect', 'threshold_cls')
    else:
        mask = iou > config.getfloat('detect', 'threshold')
    iou, prob_cls, cls = (t[mask].view(-1) for t in (iou, prob_cls, cls))
    _mask = torch.unsqueeze(mask, -1).repeat(1, 2)  # PyTorch's bug
    yx_min, yx_max = (t[_mask].view(-1, 2) for t in (yx_min, yx_max))
    num = prob.size(-1)
    _mask = torch.unsqueeze(mask, -1).repeat(1, num)  # PyTorch's bug
    prob = prob[_mask].view(-1, num)
    return iou, yx_min, yx_max, prob, prob_cls, cls
Exemplo n.º 6
0
def filter_visible(config, iou, yx_min, yx_max, prob):
    prob_cls, cls = torch.max(prob, -1)
    if config.getboolean('detect', 'fix'):
        mask = (iou * prob_cls) > config.getfloat('detect', 'threshold_cls')
    else:
        mask = iou > config.getfloat('detect', 'threshold')
    iou, prob_cls, cls = (t[mask].view(-1) for t in (iou, prob_cls, cls))
    _mask = torch.unsqueeze(mask, -1).repeat(1, 2)  # PyTorch's bug
    yx_min, yx_max = (t[_mask].view(-1, 2) for t in (yx_min, yx_max))
    num = prob.size(-1)
    _mask = torch.unsqueeze(mask, -1).repeat(1, num)  # PyTorch's bug
    prob = prob[_mask].view(-1, num)
    return iou, yx_min, yx_max, prob, prob_cls, cls
Exemplo n.º 7
0
def postprocess(config, iou, yx_min, yx_max, prob):
    iou, yx_min, yx_max, prob, prob_cls, cls = filter_visible(config, iou, yx_min, yx_max, prob)
    keep = pybenchmark.profile('nms')(utils.postprocess.nms)(iou, yx_min, yx_max, config.getfloat('detect', 'overlap'))
    if keep:
        keep = utils.ensure_device(torch.LongTensor(keep))
        iou, yx_min, yx_max, prob, prob_cls, cls = (t[keep] for t in (iou, yx_min, yx_max, prob, prob_cls, cls))
        if config.getboolean('detect', 'fix'):
            score = torch.unsqueeze(iou, -1) * prob
            mask = score > config.getfloat('detect', 'threshold_cls')
            indices, cls = torch.unbind(mask.nonzero(), -1)
            yx_min, yx_max = (t[indices] for t in (yx_min, yx_max))
            score = score[mask]
        else:
            score = iou
        return iou, yx_min, yx_max, cls, score
Exemplo n.º 8
0
def postprocess(config, iou, yx_min, yx_max, prob):
    iou, yx_min, yx_max, prob, prob_cls, cls = filter_visible(config, iou, yx_min, yx_max, prob)
    keep = pybenchmark.profile('nms')(utils.postprocess.nms)(iou, yx_min, yx_max, config.getfloat('detect', 'overlap'))
    if keep:
        keep = utils.ensure_device(torch.LongTensor(keep))
        iou, yx_min, yx_max, prob, prob_cls, cls = (t[keep] for t in (iou, yx_min, yx_max, prob, prob_cls, cls))
        if config.getboolean('detect', 'fix'):
            score = torch.unsqueeze(iou, -1) * prob
            mask = score > config.getfloat('detect', 'threshold_cls')
            indices, cls = torch.unbind(mask.nonzero(), -1)
            yx_min, yx_max = (t[indices] for t in (yx_min, yx_max))
            score = score[mask]
        else:
            score = iou
        return iou, yx_min, yx_max, cls, score
Exemplo n.º 9
0
    def __init__(self):
        if config.getboolean('MAIN', 'DownloadNLTKResources'):
            download_nltk_resources()
        colorama.init()

        self.bearer = config.get('CONNECTION', 'Bearer')
        self.timeout = config.getfloat('CONNECTION', 'Timeout')
        self.show_next_info = config.getboolean('MAIN', 'ShowNextShowInfo')
        self.exit_if_offline = config.getboolean('MAIN', 'ExitIfShowOffline')
        self.show_bearer_info = config.getboolean('MAIN', 'ShowBearerInfo')
        self.headers = {
            'User-Agent': 'Android/1.40.0',
            'x-hq-client': 'Android/1.40.0',
            'x-hq-country': 'US',
            'x-hq-lang': 'en',
            'x-hq-timezone': 'America/New_York',
            'Authorization': f'Bearer {self.bearer}'
        }

        self.session = requests.Session()
        self.session.headers.update(self.headers)

        init_root_logger()
        self.logger = logging.getLogger(__name__)

        # Find local UTC offset
        now = time.time()
        self.local_utc_offset = datetime.fromtimestamp(
            now) - datetime.utcfromtimestamp(now)

        self.validate_bearer()
        self.logger.info('HackQ-Trivia initialized.\n',
                         extra={'pre': colorama.Fore.GREEN})
Exemplo n.º 10
0
def makeArraysFromCfg(config, value):

    x, y = array('f'), array('f')

    axislabels = []

    #TODO: Test for values not in config

    for isection, section in enumerate(config.sections()):
        logging.debug("Making x,y axis for {0} - {1}".format(
            section, config.getfloat(section, value)))
        axislabels.append(section)
        x.append(isection)
        y.append(config.getfloat(section, value))

    return axislabels, x, y
def main():
    """
    Main function. Contains variables that can be tweaked to your needs.
    Please look at the class object to see which attributes you can set.
    The pid values are tuned to my particular system and may require
    ajustment for your system(s).
    """
    config = read_config()
    polling_interval = config.getfloat("General", "polling_interval")

    chassis = get_chassis_settings(config)
    pid = get_pid_settings(config)
    temp_source = get_temp_source(config)

    # Set the fan to the chassis min on startup.
    chassis.set_pwm(chassis.pwm_min)

    try:
        while True:
            highest_temperature = temp_source.get_highest_temperature()
            fan_speed = pid.update(highest_temperature)
            chassis.set_fan_speed(fan_speed)
            log(highest_temperature, chassis, pid)
            time.sleep(polling_interval)

    except (KeyboardInterrupt, SystemExit):
        chassis.set_pwm(chassis.pwm_safety)
        sys.exit(1)
Exemplo n.º 12
0
 def create(cls, name, config):
     try:
         return cls(name,
                    config.getfloat('threshold', fallback=2.5))
     except ValueError as error:
         raise ConfigurationError(
             'Unable to parse threshold as float: {}'.format(error))
Exemplo n.º 13
0
def configure_processor(
    args: argparse.Namespace,
    config: configparser.ConfigParser,
    checks: Iterable[Activity],
    wakeups: Iterable[Wakeup],
) -> Processor:
    return Processor(
        checks,
        wakeups,
        config.getfloat("general", "idle_time", fallback=300),
        config.getfloat("general", "min_sleep_time", fallback=1200),
        get_wakeup_delta(config),
        get_notify_and_suspend_func(config),
        get_schedule_wakeup_func(config),
        all_activities=args.all_checks,
    )
Exemplo n.º 14
0
def main_daemon(args: argparse.Namespace,
                config: configparser.ConfigParser) -> None:
    """Run the daemon."""

    checks = set_up_checks(
        config,
        "check",
        "activity",
        Activity,  # type: ignore
        error_none=True,
    )
    wakeups = set_up_checks(
        config,
        "wakeup",
        "wakeup",
        Wakeup,  # type: ignore
    )

    processor = configure_processor(args, config, checks, wakeups)
    loop(
        processor,
        config.getfloat("general", "interval", fallback=60),
        run_for=args.run_for,
        woke_up_file=get_woke_up_file(config),
        lock_file=get_lock_file(config),
        lock_timeout=get_lock_timeout(config),
    )
Exemplo n.º 15
0
 def __init__(self, args, config):
     self.args = args
     self.config = config
     self.model_dir = utils.get_model_dir(config)
     self.cache_dir = utils.get_cache_dir(config)
     self.category = utils.get_category(config, self.cache_dir)
     self.anchors = torch.from_numpy(utils.get_anchors(config)).contiguous()
     logging.info('use cache directory ' + self.cache_dir)
     logging.info('tensorboard --logdir ' + self.model_dir)
     if args.delete:
         logging.warning('delete model directory: ' + self.model_dir)
         shutil.rmtree(self.model_dir, ignore_errors=True)
     os.makedirs(self.model_dir, exist_ok=True)
     with open(self.model_dir + '.ini', 'w') as f:
         config.write(f)
     self.saver = utils.train.Saver(self.model_dir,
                                    config.getint('save', 'keep'))
     self.timer_save = utils.train.Timer(config.getfloat('save', 'secs'),
                                         False)
     try:
         self.timer_eval = utils.train.Timer(
             eval(config.get('eval', 'secs')),
             config.getboolean('eval', 'first'))
     except configparser.NoOptionError:
         self.timer_eval = lambda: False
     self.summary_worker = SummaryWorker(self)
     self.summary_worker.start()
Exemplo n.º 16
0
    def __init__(self):
        HackQ.download_nltk_resources()
        colorama.init()

        self.bearer = config.get("CONNECTION", "BEARER")
        self.timeout = config.getfloat("CONNECTION", "Timeout")
        self.show_next_info = config.getboolean("MAIN", "ShowNextShowInfo")
        self.exit_if_offline = config.getboolean("MAIN", "ExitIfShowOffline")
        self.show_bearer_info = config.getboolean("MAIN", "ShowBearerInfo")
        self.headers = {
            "User-Agent": "Android/1.40.0",
            "x-hq-client": "Android/1.40.0",
            "x-hq-country": "US",
            "x-hq-lang": "en",
            "x-hq-timezone": "America/New_York",
            "Authorization": f"Bearer {self.bearer}",
            "Connection": "close"
        }

        self.session = requests.Session()
        self.session.headers.update(self.headers)

        self.init_root_logger()
        self.logger = logging.getLogger(__name__)

        # Find local UTC offset
        now = time.time()
        self.local_utc_offset = datetime.fromtimestamp(
            now) - datetime.utcfromtimestamp(now)

        self.validate_bearer()
        self.logger.info("HackQ-Trivia initialized.\n",
                         extra={"pre": colorama.Fore.GREEN})
Exemplo n.º 17
0
def main(args=None):
    """Run the daemon."""
    args = parse_arguments(args)

    configure_logging(args.logging)

    config = parse_config(args.config_file)

    checks = set_up_checks(config,
                           'check',
                           'activity',
                           Activity,
                           error_none=True)
    wakeups = set_up_checks(config, 'wakeup', 'wakeup', Wakeup)

    processor = Processor(checks,
                          wakeups,
                          config.getfloat('general', 'idle_time',
                                          fallback=300),
                          config.getfloat('general',
                                          'min_sleep_time',
                                          fallback=1200),
                          config.getfloat('general',
                                          'wakeup_delta',
                                          fallback=30),
                          functools.partial(
                              notify_and_suspend,
                              config.get('general', 'suspend_cmd'),
                              config.get('general',
                                         'notify_cmd_wakeup',
                                         fallback=None),
                              config.get('general',
                                         'notify_cmd_no_wakeup',
                                         fallback=None)),
                          functools.partial(
                              schedule_wakeup,
                              config.get('general', 'wakeup_cmd')),
                          all_activities=args.all_checks)
    loop(processor,
         config.getfloat('general', 'interval', fallback=60),
         run_for=args.run_for,
         woke_up_file=config.get('general',
                                 'woke_up_file',
                                 fallback='/var/run/autosuspend-just-woke-up'))
Exemplo n.º 18
0
def mirror(config):
    # Load the filter plugins so the loading doesn't happen in the fast path
    filter_project_plugins()
    filter_release_plugins()

    # Always reference those classes here with the fully qualified name to
    # allow them being patched by mock libraries!
    master = bandersnatch.master.Master(
        config.get("mirror", "master"), config.getfloat("mirror", "timeout")
    )

    # `json` boolean is a new optional option in 2.1.2 - want to support it
    # not existing in old configs and display an error saying that this will
    # error in the not to distance release
    try:
        json_save = config.getboolean("mirror", "json")
    except configparser.NoOptionError:
        logger.error(
            "Please update your config to include a json "
            + "boolean in the [mirror] section. Setting to False"
        )
        json_save = False

    try:
        root_uri = config.get("mirror", "root_uri")
    except configparser.NoOptionError:
        root_uri = None

    try:
        digest_name = config.get("mirror", "digest_name")
    except configparser.NoOptionError:
        digest_name = "sha256"
    if digest_name not in ("md5", "sha256"):
        raise ValueError(
            f"Supplied digest_name {digest_name} is not supported! Please "
            + "update digest_name to one of ('sha256', 'md5') in the [mirror] "
            + "section."
        )

    mirror = bandersnatch.mirror.Mirror(
        config.get("mirror", "directory"),
        master,
        stop_on_error=config.getboolean("mirror", "stop-on-error"),
        workers=config.getint("mirror", "workers"),
        hash_index=config.getboolean("mirror", "hash-index"),
        json_save=json_save,
        root_uri=root_uri,
        digest_name=digest_name,
        keep_index_versions=config.getint("mirror", "keep_index_versions", fallback=0),
    )

    changed_packages = mirror.synchronize()
    logger.info("{} packages had changes".format(len(changed_packages)))
    for package_name, changes in changed_packages.items():
        logger.debug(f"{package_name} added: {changes}")
Exemplo n.º 19
0
        def __init__(self, configfname, problem, codefun, agentclass):
                config = ConfigParser.ConfigParser()
                config.readfp(open(configfname))

                logging.config.fileConfig(config.get('default','logconf'))
                log.info('Setting up evolutionary workbench...')
                self.evolog.critical(evologhead)

                self.problem = problem
                self.codefun = codefun
                self.popsize = config.getint('default','popsize')
                self.parentpsize = config.getint('default','parentpopsize')
                self.maxiters = config.getint('default','numiters')
                self.popratio = self.popsize / self.parentpsize

                opnames = config.get('default','operators')
                oprates = config.get('default','oprates')
                self.opargs = config.get('default','opargs').split(',')
                self.ops_, self.oprates = _initialize_ops(opnames,oprates)
                log.debug(self.ops_)
                log.debug(self.oprates)
                arncfg = config.get('default','arnconf')
                self.arnconfig = ConfigParser.ConfigParser()
                self.arnconfig.readfp(open(arncfg))
                self.agentclass = partial(agentclass, config = self.arnconfig)
                self.mutrate = config.getfloat('default','mutrate')
                self.orig_mutrate = self.mutrate
                self.mutate_ = partial(bitflipmutation,
                                       mutrate = self.mutrate)
                self.improves = 0
                self.tempevals = 0
                self.adfcount = 0

                self.localsearch = config.get('default','localsearch')
                if self.localsearch:
                        log.info('Initializing local search holder')
                        mainmod = __import__('__main__')
                        self.localsearch = getattr(mainmod,
                                                   self.localsearch)(5,codefun)

                self.basicadf = config.get('default','adf')
                if self.basicadf:
                        log.info('Initializing multiplex adf skeleton')
                        mainmod = __import__('__main__')
                        self.basicadf = getattr(mainmod,
                                                   self.basicadf)

                self.numevals = None
                self.population = None
                self.parents = None
                self.best = None
                self.itercount = None
Exemplo n.º 20
0
def get_model(config, input_shape=None):
    '''Creates a model from config
    :param config: config parser
    :return: Keras model
    '''
    # Get config
    model_name = config.get("default", "target_net")
    crop_size = config.getint("training", "patch_crop_size")
    upscale = config.getint("fsrcnn", "upscale")

    # Create model
    logging.getLogger().info("Creating model: {}".format(model_name))

    if model_name == "FSRCNN":

        if not input_shape:
            input_shape = (crop_size // upscale, crop_size // upscale, 1)
        model = FSRCNN(input_shape, upscale)
        loss = tf.keras.losses.MeanSquaredError()
        optimizer = tf.keras.optimizers.Adam(config.getfloat("training", "init_lr"))

    elif model_name == "IRCNN":

        if not input_shape:
            input_shape = (crop_size, crop_size, 3)
        model = IRCNN(input_shape)
        loss = tf.keras.losses.MeanSquaredError()
        optimizer = tf.keras.optimizers.Adam(config.getfloat("training", "init_lr"))

    else:
        raise ValueError("Not supported network {}".format(model_name))
        
    # Compile model
    model.compile(
        loss = loss,
        optimizer = optimizer,
    )

    return model
Exemplo n.º 21
0
    def _read_config(self, config_path):
        """Read config.

        Exit on invalid config.

        """
        self.log.debug("Reading config...")
        conf = {}
        defaults = {
            'interval': '3600',
            'timeout': '16',
            'redirects': '2',
            'ip_mode': 'random',
            'loglevel': 'WARNING',
        }
        config = SafeConfigParser(defaults=defaults)
        try:
            # Check if config is even readable
            f = open(path.expanduser(config_path), 'r')

            # Read config
            config.readfp(f)
            f.close()

            conf['user'] = config.get('general', 'user')
            conf['token'] = config.get('general', 'token')
            conf['url'] = self._is_url(config.get('general', 'host_url'))
            conf['interval'] = config.getfloat('general', 'interval')
            conf['timeout'] = config.getfloat('general', 'timeout')
            conf['redirects'] = config.getint('general', 'redirects')
            conf['ip_mode'] = self._is_mode(config.get('ip_service', 'mode'))
            conf['ip_url'] = self._is_url(config.get('ip_service', 'ip_urls'))
            conf['loglevel'] = config.get('logging', 'level',
                                          fallback='WARNING')
        except (MissingSectionHeaderError, NoSectionError, NoOptionError,
                ValueError, IOError) as e:
            self.log.critical("Configuration error: %s" % e)
            exit(1)
        return conf
Exemplo n.º 22
0
def get_train_callbacks(config):
    '''Returns a list of keras callbacks
    :param config: config parser
    :return: list of keras callbacks
    :rtype: list(tf.keras.Callback)
    '''
    callbacks = []

    callbacks.append(tf.keras.callbacks.CSVLogger(
        config.get("default", "csv_log"),
        separator=',',
        append=True
    ))

    callbacks.append(tf.keras.callbacks.ReduceLROnPlateau(
        monitor='val_loss',
        factor=0.1,
        patience=config.getint("training", "reduce_lr_patience"),
        verbose=1,
        mode='min',
        min_lr=config.getfloat("training", "min_lr"),
    ))

    callbacks.append(tf.keras.callbacks.EarlyStopping(
        monitor='val_loss',
        min_delta=0,
        patience=config.getint("training", "early_stopping_patience"),
        verbose=1,
        mode='min',
        baseline=None,
        restore_best_weights=True
    ))

    callbacks.append(tf.keras.callbacks.ModelCheckpoint(
        config.get("default", "checkpoint_path"),
        monitor='val_loss',
        verbose=1,
        save_best_only=True,
        save_weights_only=True,
        mode="min",
        period=config.getint("training", "checkpoint_period"),
    ))
    
    def epoch_begin(epoch, logs):
        print("Resetting seed")
        np.random.seed(1)

    callbacks.append(tf.keras.callbacks.LambdaCallback(on_epoch_begin=epoch_begin))

    return callbacks
Exemplo n.º 23
0
def __process(config):
    """Consolidates all project logic - from data acquisition to storage.

    Args:
        config (configparser.ConfigParser): project configuration expecting
        aws, request and io sections.
    """
    local_tz = pytz.timezone(config.get("general", "timezone"))
    endpoint = config.get("request", "endpoint")
    max_retries = config.getint("request", "max_retries")
    timeout = config.getfloat("request", "timeout")

    logging.info("Getting GPS points from {endpoint}.")
    gps_data = request.get_gps_points(endpoint, max_retries, timeout)

    temp_dir = Path(config.get("io", "temp_data_dir"))
    date_now_tz = local_tz.localize(datetime.now())
    date_now_str = datetime.strftime(date_now_tz, '%Y%m%d%H%M%S')
    file_name = f"{date_now_str}.csv"
    file_path = temp_dir / file_name

    logging.info(f"Storing GPS points in {file_path}.")
    persistence.write_data_to_csv_file(file_path, gps_data)

    access_key_id = None
    secret_access_key = None
    if config.get("general", "environment") == "dev":
        access_key_id = config.get("aws", "access_key_id")
        secret_access_key = config.get("aws", "secret_access_key")
    s3_bucket = config.get("aws", "s3_bucket")

    year = datetime.strftime(date_now_tz, '%Y')
    month = datetime.strftime(date_now_tz, '%Y%m')
    day = datetime.strftime(date_now_tz, '%Y%m%d')
    object_name = f"year={year}/month={month}/day={day}/{Path(file_path).name}"

    logging.info(f"Uploading raw gps file to s3://{object_name}.")
    persistence.upload_file_to_s3(
        str(file_path),
        s3_bucket,
        object_name=object_name,
        access_key_id=access_key_id,
        secret_access_key=secret_access_key
    )

    # Removes temp file
    if file_path.is_file():
        file_path.unlink()
Exemplo n.º 24
0
def parse_options(config, section_name, config_class):
	options = config.options(section_name)
	for option in options:
		option = option.upper()
		if option in config_class.__dict__ and not option.startswith("__"):
			if isinstance(config_class.__dict__[option], bool):
				config_class.__dict__[option] = config.getboolean(section_name, option)
			elif isinstance(config_class.__dict__[option], float):
				config_class.__dict__[option] = config.getfloat(section_name, option)
			elif isinstance(config_class.__dict__[option], int):
				config_class.__dict__[option] = config.getint(section_name, option)
			else:
				config_class.__dict__[option] = config.get(section_name, option)
		else:
			logger = logging.getLogger('InfrastructureManager')
			logger.warn("Unknown option in the IM config file. Ignoring it: " + option)
    def __init__(self, args, config):
        self.args = args
        self.config = config
        self.model_dir = utils.get_model_dir(config)
        self.cache_dir = utils.get_cache_dir(config)
        self.category = utils.get_category(config, self.cache_dir)
        self.anchors = torch.from_numpy(utils.get_anchors(config)).contiguous()
        logging.info('use cache directory ' + self.cache_dir)
        logging.info('tensorboard --logdir ' + self.model_dir)
        if args.delete:
            logging.warning('delete model directory: ' + self.model_dir)
            shutil.rmtree(self.model_dir, ignore_errors=True)
        os.makedirs(self.model_dir, exist_ok=True)
        with open(self.model_dir + '.ini', 'w') as f:
            config.write(f)

        self.step, self.epoch, self.dnn = self.load()
        self.inference = model.Inference(self.config, self.dnn, self.anchors)
        logging.info(
            humanize.naturalsize(
                sum(var.cpu().numpy().nbytes
                    for var in self.inference.state_dict().values())))
        if self.args.finetune:
            path = os.path.expanduser(os.path.expandvars(self.args.finetune))
            logging.info('finetune from ' + path)
            self.finetune(self.dnn, path)
        self.inference = ensure_model(self.inference)
        self.inference.train()
        self.optimizer = eval(self.config.get('train', 'optimizer'))(filter(
            lambda p: p.requires_grad,
            self.inference.parameters()), self.args.learning_rate)

        self.saver = utils.train.Saver(self.model_dir,
                                       config.getint('save', 'keep'))
        self.timer_save = utils.train.Timer(config.getfloat('save', 'secs'),
                                            False)
        try:
            self.timer_eval = utils.train.Timer(
                eval(config.get('eval', 'secs')),
                config.getboolean('eval', 'first'))
        except configparser.NoOptionError:
            self.timer_eval = lambda: False
        self.summary_worker = SummaryWorker(self)
        self.summary_worker.start()
Exemplo n.º 26
0
 def _from_config(self, in_dict, section, name, dict_key=None,
                  opt_type='str'):
     config = self._config
     if dict_key is None:
         dict_key = name
     try:
         if opt_type == 'int':
             in_dict[dict_key] = config.getint(section, name)
         elif opt_type == 'float':
             in_dict[dict_key] = config.getfloat(section, name)
         elif opt_type == 'bool':
             in_dict[dict_key] = config.getboolean(section, name)
         elif opt_type == 'list':
             values = config.get(section, name)
             in_dict[dict_key] = re.split(_COMMA_DELIMITER, values)
         else:
             in_dict[dict_key] = config.get(section, name)
     except (NoOptionError, NoSectionError):
         pass
Exemplo n.º 27
0
def parse_options(config, section_name, config_class):
    options = config.options(section_name)
    for option in options:
        option = option.upper()
        if option in config_class.__dict__ and not option.startswith("__"):
            if isinstance(config_class.__dict__[option], bool):
                config_class.__dict__[option] = config.getboolean(
                    section_name, option)
            elif isinstance(config_class.__dict__[option], float):
                config_class.__dict__[option] = config.getfloat(
                    section_name, option)
            elif isinstance(config_class.__dict__[option], int):
                config_class.__dict__[option] = config.getint(
                    section_name, option)
            else:
                config_class.__dict__[option] = config.get(
                    section_name, option)
        else:
            logger = logging.getLogger('InfrastructureManager')
            logger.warn("Unknown option in the IM config file. Ignoring it: " +
                        option)
Exemplo n.º 28
0
 def __init__(self, args, config):
     self.args = args
     self.config = config
     self.model_dir = utils.get_model_dir(config)
     self.cache_dir = utils.get_cache_dir(config)
     self.category = utils.get_category(config, self.cache_dir)
     self.anchors = torch.from_numpy(utils.get_anchors(config)).contiguous()
     logging.info('use cache directory ' + self.cache_dir)
     logging.info('tensorboard --logdir ' + self.model_dir)
     if args.delete:
         logging.warning('delete model directory: ' + self.model_dir)
         shutil.rmtree(self.model_dir, ignore_errors=True)
     os.makedirs(self.model_dir, exist_ok=True)
     with open(self.model_dir + '.ini', 'w') as f:
         config.write(f)
     self.saver = utils.train.Saver(self.model_dir, config.getint('save', 'keep'))
     self.timer_save = utils.train.Timer(config.getfloat('save', 'secs'), False)
     try:
         self.timer_eval = utils.train.Timer(eval(config.get('eval', 'secs')), config.getboolean('eval', 'first'))
     except configparser.NoOptionError:
         self.timer_eval = lambda: False
     self.summary_worker = SummaryWorker(self)
     self.summary_worker.start()
Exemplo n.º 29
0
def get_dict_from_old_falcon_cfg(config):
    job_type = "SGE"
    section = 'General'
    if config.has_option(section, 'job_type'):
        job_type = config.get(section, 'job_type')

    # This was not set in the past, so we must treat is specially.
    if config.has_option(section, 'sge_option'):
        sge_option = config.get(section, 'sge_option')
    else:
        sge_option = config.get(section, 'sge_option_da')

    job_queue = ""
    if config.has_option(section, 'job_queue'):
        job_queue = config.get(section, 'job_queue')

    job_name_style = ""
    if config.has_option(section, 'job_name_style'):
        job_name_style = config.get(section, 'job_name_style')

    pwatcher_type = 'fs_based'
    if config.has_option(section, 'pwatcher_type'):
        pwatcher_type = config.get(section, 'pwatcher_type')

    default_concurrent_jobs = 8
    if config.has_option(section, 'default_concurrent_jobs'):
        default_concurrent_jobs = config.getint(
            section, 'default_concurrent_jobs')

    pwatcher_directory = 'mypwatcher'
    if config.has_option(section, 'pwatcher_directory'):
        pwatcher_directory = config.get(section, 'pwatcher_directory')

    da_concurrent_jobs = default_concurrent_jobs
    la_concurrent_jobs = default_concurrent_jobs
    cns_concurrent_jobs = default_concurrent_jobs
    pda_concurrent_jobs = default_concurrent_jobs
    pla_concurrent_jobs = default_concurrent_jobs
    fc_concurrent_jobs = default_concurrent_jobs

    if config.has_option(section, 'pa_concurrent_jobs'):
        pa_concurrent_jobs = config.getint(section, 'pa_concurrent_jobs')
        warnings.warn(
            "Deprecated setting in config: 'pa_concurrent_jobs' -- Prefer da_concurrent_jobs and la_concurrent_jobs separately")
        da_concurrent_jobs = la_concurrent_jobs = pa_concurrent_jobs
    if config.has_option(section, 'ovlp_concurrent_jobs'):
        ovlp_concurrent_jobs = config.getint(section, 'ovlp_concurrent_jobs')
        warnings.warn(
            "Deprecated setting in config: 'ovlp_concurrent_jobs' -- Prefer pda_concurrent_jobs and pla_concurrent_jobs separately")
        pda_concurrent_jobs = pla_concurrent_jobs = ovlp_concurrent_jobs
    if config.has_option(section, 'da_concurrent_jobs'):
        da_concurrent_jobs = config.getint(section, 'da_concurrent_jobs')
    if config.has_option(section, 'la_concurrent_jobs'):
        la_concurrent_jobs = config.getint(section, 'la_concurrent_jobs')
    if config.has_option(section, 'cns_concurrent_jobs'):
        cns_concurrent_jobs = config.getint(section, 'cns_concurrent_jobs')
    if config.has_option(section, 'pda_concurrent_jobs'):
        pda_concurrent_jobs = config.getint(section, 'pda_concurrent_jobs')
    if config.has_option(section, 'pla_concurrent_jobs'):
        pla_concurrent_jobs = config.getint(section, 'pla_concurrent_jobs')
    if config.has_option(section, 'fc_concurrent_jobs'):
        fc_concurrent_jobs = config.getint(section, 'fc_concurrent_jobs')

    #appending = False
    # if config.has_option(section, 'appending'):
    #    appending = config.get(section, 'appending')
    #    if appending == "True":
    #        appending = True

    #openending = False
    # if config.has_option(section, 'openending'):
    #    openending = config.get(section, 'openending')
    #    if openending == "True":
    #        openending = True

    input_type = "raw"
    if config.has_option(section, 'input_type'):
        input_type = config.get(section, 'input_type')

    overlap_filtering_setting = """--max_diff 1000 --max_cov 1000 --min_cov 2"""
    if config.has_option(section, 'overlap_filtering_setting'):
        overlap_filtering_setting = config.get(
            section, 'overlap_filtering_setting')

    pa_HPCdaligner_option = """-v -D24 -t16 -e.70 -l1000 -s100"""
    if config.has_option(section, 'pa_HPCdaligner_option'):
        pa_HPCdaligner_option = config.get(section, 'pa_HPCdaligner_option')

    ovlp_HPCdaligner_option = """ -v -D24 -t32 -h60 -e.96 -l500 -s1000"""
    if config.has_option(section, 'ovlp_HPCdaligner_option'):
        ovlp_HPCdaligner_option = config.get(
            section, 'ovlp_HPCdaligner_option')

    pa_HPCdaligner_option = update_HPCdaligner_option(pa_HPCdaligner_option)
    ovlp_HPCdaligner_option = update_HPCdaligner_option(
        ovlp_HPCdaligner_option)

    pa_DBsplit_option = """ -x500 -s200"""
    if config.has_option(section, 'pa_DBsplit_option'):
        pa_DBsplit_option = config.get(section, 'pa_DBsplit_option')

    skip_checks = False
    if config.has_option(section, 'skip_checks'):
        skip_checks = config.getboolean(section, 'skip_checks')

    if config.has_option(section, 'dust'):
        warnings.warn(
            "The 'dust' option is deprecated and ignored. We always run DBdust now. Use pa_DBdust_option to override its default arguments.")

    #pa_DBdust_option = "-w128 -t2.5 -m20"
    pa_DBdust_option = ""  # Gene recommends the defaults.
    if config.has_option(section, 'pa_DBdust_option'):
        pa_DBdust_option = config.get(section, 'pa_DBdust_option')

    dazcon = False
    if config.has_option(section, 'dazcon'):
        dazcon = config.getboolean(section, 'dazcon')

    pa_dazcon_option = "-j 4 -x -l 500"
    if config.has_option(section, 'pa_dazcon_option'):
        pa_dazcon_option = config.get(section, 'pa_dazcon_option')

    # DAMASKER options
    """
    Example config usage:
    pa_use_tanmask = true
    pa_use_repmask = true
    pa_HPCtanmask_option =
    pa_repmask_levels = 2
    pa_HPCrepmask_1_option = -g1 -c20 -mtan
    pa_HPCrepmask_2_option = -g10 -c15 -mtan -mrep1
    pa_damasker_HPCdaligner_option = -mtan -mrep1 -mrep10
    """
    pa_use_tanmask = False
    if config.has_option(section, 'pa_use_tanmask'):
        pa_use_tanmask = config.getboolean(section, 'pa_use_tanmask')

    pa_HPCtanmask_option = ""
    if config.has_option(section, 'pa_HPCtanmask_option'):
        pa_HPCtanmask_option = config.get(section, 'pa_HPCtanmask_option')

    pa_use_repmask = False
    if config.has_option(section, 'pa_use_repmask'):
        pa_use_repmask = config.getboolean(section, 'pa_use_repmask')

    pa_repmask_levels = 0   # REPmask tool can be used multiple times.
    if config.has_option(section, 'pa_repmask_levels'):
        pa_repmask_levels = config.getint(section, 'pa_repmask_levels')

    pa_HPCrepmask_1_option = """ -g1 -c20 -mtan"""
    if config.has_option(section, 'pa_HPCrepmask_1_option'):
        pa_HPCrepmask_1_option = config.get(section, 'pa_HPCrepmask_1_option')

    pa_HPCrepmask_2_option = """ -g10 -c15 -mtan -mrep1"""
    if config.has_option(section, 'pa_HPCrepmask_2_option'):
        pa_HPCrepmask_2_option = config.get(section, 'pa_HPCrepmask_2_option')

    pa_damasker_HPCdaligner_option = """ -mtan -mrep1 -mrep10"""    # Repeat masks need to be passed to Daligner.
    if config.has_option(section, 'pa_damasker_HPCdaligner_option'):
        pa_damasker_HPCdaligner_option = config.get(
            section, 'pa_damasker_HPCdaligner_option')
    # End of DAMASKER options.

    ovlp_DBsplit_option = """ -x500 -s200"""
    if config.has_option(section, 'ovlp_DBsplit_option'):
        ovlp_DBsplit_option = config.get(section, 'ovlp_DBsplit_option')

    falcon_sense_option = """ --output_multi --min_idt 0.70 --min_cov 2 --max_n_read 1800 --n_core 6"""
    if config.has_option(section, 'falcon_sense_option'):
        falcon_sense_option = config.get(section, 'falcon_sense_option')
    if 'local_match_count' in falcon_sense_option or 'output_dformat' in falcon_sense_option:
        raise Exception('Please remove obsolete "--local_match_count_*" or "--output_dformat"' +
                        ' from "falcon_sense_option" in your cfg: %s' % repr(falcon_sense_option))

    falcon_sense_skip_contained = False
    if config.has_option(section, 'falcon_sense_skip_contained'):
        falcon_sense_skip_contained = config.getboolean(
            section, 'falcon_sense_skip_contained')

    falcon_sense_greedy = False
    if config.has_option(section, 'falcon_sense_greedy'):
        falcon_sense_greedy = config.getboolean(section, 'falcon_sense_greedy')

    LA4Falcon_preload = ""
    if config.has_option(section, 'la4falcon_preload'):
        LA4Falcon_preload = config.getboolean(section, 'la4falcon_preload')

    genome_size = 0
    if config.has_option(section, 'genome_size'):
        genome_size = config.getint(section, 'genome_size')

    seed_coverage = 20
    if config.has_option(section, 'seed_coverage'):
        seed_coverage = config.getfloat(section, 'seed_coverage')

    length_cutoff = -1
    if config.has_option(section, 'length_cutoff'):
        length_cutoff = config.getint(section, 'length_cutoff')
    if length_cutoff < 0:
        if genome_size < 1:
            raise Exception(
                'Must specify either length_cutoff>0 or genome_size>0')

    length_cutoff_pr = config.getint(section, 'length_cutoff_pr')
    input_fofn_fn = config.get(section, 'input_fofn')

    # This one depends on length_cutoff_pr for its default.
    fc_ovlp_to_graph_option = ''
    if config.has_option(section, 'fc_ovlp_to_graph_option'):
        fc_ovlp_to_graph_option = config.get(
            section, 'fc_ovlp_to_graph_option')
    if '--min_len' not in fc_ovlp_to_graph_option:
        fc_ovlp_to_graph_option += ' --min_len %d' % length_cutoff_pr

    bestn = 12
    if config.has_option(section, 'bestn'):
        bestn = config.getint(section, 'bestn')

    if config.has_option(section, 'target'):
        target = config.get(section, 'target')
        if target not in ["overlapping", "pre-assembly", "assembly"]:
            msg = """ Target has to be "overlapping", "pre-assembly" or "assembly" in this verison. You have an unknown target %s in the configuration file.  """ % target
            raise Exception(msg)
    else:
        logger.info(""" No target specified, assuming "assembly" as target """)
        target = "assembly"

    if config.has_option(section, 'stop_all_jobs_on_failure'):
        stop_all_jobs_on_failure = config.getboolean(
            section, 'stop_all_jobs_on_failure')
    else:
        # Good default. Rarely needed, since we already stop early if *all* tasks fail
        # in a given refresh.
        stop_all_jobs_on_failure = False
    if config.has_option(section, 'use_tmpdir'):
        tmpdir = config.get(section, 'use_tmpdir')
        if '/' in tmpdir:
            tempfile.tempdir = tmpdir
            use_tmpdir = tmpdir
        else:
            use_tmpdir = config.getboolean(section, 'use_tmpdir')
    else:
        use_tmpdir = False

    TEXT_FILE_BUSY = 'avoid_text_file_busy'
    if config.has_option(section, TEXT_FILE_BUSY):
        bash.BUG_avoid_Text_file_busy = config.getboolean(
            section, TEXT_FILE_BUSY)

    hgap_config = {  # "input_fofn_fn" : input_fofn_fn, # deprecated
        "input_fofn": input_fofn_fn,
        "target": target,
        "job_type": job_type,
        "job_queue": job_queue,
        "job_name_style": job_name_style,
        "input_type": input_type,
        #"openending": openending,
        "default_concurrent_jobs": default_concurrent_jobs,
        "da_concurrent_jobs": da_concurrent_jobs,
        "la_concurrent_jobs": la_concurrent_jobs,
        "cns_concurrent_jobs": cns_concurrent_jobs,
        "pda_concurrent_jobs": pda_concurrent_jobs,
        "pla_concurrent_jobs": pla_concurrent_jobs,
        "fc_concurrent_jobs": fc_concurrent_jobs,
        "overlap_filtering_setting": overlap_filtering_setting,
        "genome_size": genome_size,
        "seed_coverage": seed_coverage,
        "length_cutoff": length_cutoff,
        "length_cutoff_pr": length_cutoff_pr,
        "sge_option": sge_option,
        "sge_option_da": config.get(section, 'sge_option_da'),
        "sge_option_la": config.get(section, 'sge_option_la'),
        "sge_option_pda": config.get(section, 'sge_option_pda'),
        "sge_option_pla": config.get(section, 'sge_option_pla'),
        "sge_option_fc": config.get(section, 'sge_option_fc'),
        "sge_option_cns": config.get(section, 'sge_option_cns'),
        "pa_HPCdaligner_option": pa_HPCdaligner_option,
        "pa_use_tanmask": pa_use_tanmask,
        "pa_HPCtanmask_option": pa_HPCtanmask_option,
        "pa_use_repmask": pa_use_repmask,
        "pa_repmask_levels": pa_repmask_levels,
        "pa_HPCrepmask_1_option": pa_HPCrepmask_1_option,
        "pa_HPCrepmask_2_option": pa_HPCrepmask_2_option,
        "pa_damasker_HPCdaligner_option": pa_damasker_HPCdaligner_option,
        "ovlp_HPCdaligner_option": ovlp_HPCdaligner_option,
        "pa_DBsplit_option": pa_DBsplit_option,
        "skip_checks": skip_checks,
        "pa_DBdust_option": pa_DBdust_option,
        "dazcon": dazcon,
        "pa_dazcon_option": pa_dazcon_option,
        "ovlp_DBsplit_option": ovlp_DBsplit_option,
        "fc_ovlp_to_graph_option": fc_ovlp_to_graph_option,
        "falcon_sense_option": falcon_sense_option,
        "falcon_sense_skip_contained": falcon_sense_skip_contained,
        "falcon_sense_greedy": falcon_sense_greedy,
        "LA4Falcon_preload": LA4Falcon_preload,
        "stop_all_jobs_on_failure": stop_all_jobs_on_failure,
        "use_tmpdir": use_tmpdir,
        "pwatcher_type": pwatcher_type,
        "pwatcher_directory": pwatcher_directory,
        TEXT_FILE_BUSY: bash.BUG_avoid_Text_file_busy,
    }
    provided = dict(config.items(section))
    unused = set(provided) - set(k.lower() for k in hgap_config)
    if unused:
        warnings.warn("Unexpected keys in input config: %s" % repr(unused))

    hgap_config["install_prefix"] = sys.prefix

    return hgap_config
Exemplo n.º 30
0
import serial
import os
import sqlite3
import logging
import logging.config
import ConfigParser
from lcd16x2 import lcd16x2

# Read parameters from configuration file
config = ConfigParser.ConfigParser()
config.read('./livLCD.config')

LCDAddress = config.get('LCD_DISPLAY', 'I2C_address')
LCDPort = config.getint('LCD_DISPLAY', 'port_no')

displayTime = config.getfloat('LCD_CYCLE_TIME', 'display_time')
displayCycles = config.getint('LCD_CYCLE_TIME', 'no_of_cycles')

displayClock = config.getboolean('LCD_DISPLAY', 'display_clock')

logging.config.fileConfig('logging.ini')
logger = logging.getLogger(__name__)

#read software version
config.read('../.livpi')
version = config.get('LIV_SOFTWARE', 'version')
supported_devices = config.get('LIV_SOFTWARE', 'supported_devices')

#read format C/F, hPa/inchHg
config.read('../livDB/livDB.config')
tempFormat = config.get('FORMAT', 'temperature')
# 整体模型测试


import cPickle
import sys
import os
import logging
import logging.config
from utils import process_image_file
from smokeDetection_config import config

from importlib import import_module
feature = import_module(config.get("feature", "feature_file"))

# 阈值
threshold = config.getfloat("model", "threshold")

logging.config.fileConfig("logger.conf")
logger = logging.getLogger("smoke_logger")

if __name__ == '__main__':
    if(len(sys.argv) != 2):
        image_path = config.get("data", "image_eval_dir")
    else:
        image_path = sys.argv[1]

    # 全局模型
    overallModel_file = config["model"]["overallModel_file"]
    with open(overallModel_file, 'rb') as fid:
        classifier = cPickle.load(fid)
        logger.info("overall model imported successfully.")
Exemplo n.º 32
0
        def __init__(self, loadedconfig, problem, logmanager = DefaultRunLog,
                     **kwargs):
                config, self.arnconfig = loadedconfig
                logging.config.fileConfig(config.get('default','logconf'),
                                          disable_existing_loggers=False)
                log.info('Setting up evolutionary workbench...')
                self.runlog = logmanager(log)
                self.trace =  config.getboolean('default',
                                                'ancestortrace')
                mainmod = __import__('__main__')

                self.problem = problem
                self.popsize = config.getint('default','popsize')
                self.parentpsize = config.getint('default','parentpopsize')
                self.maxiters = config.getint('default','numiters')
                self.popratio = self.popsize / self.parentpsize

                opnames = config.get('default','operators')
                oprates = config.get('default','oprates')
                self.opargs = config.get('default','opargs').split(',')
                self.ops_, self.oprates = _initialize_ops(opnames,oprates)
                log.debug(self.ops_)
                log.debug(self.oprates)

                self.xover_ = config.get('default','xover')
                if self.xover_:
                    log.info('Initializing crossover operator...')
                    self.xover_ = getattr(mainmod,self.xover_)
                    self.xrate = config.getfloat('default','xrate')

                aclass = config.get('default','agent').split('.')
                mod = aclass[0] + "." + aclass[1]
                self.device = __import__(mod,fromlist=aclass[2])
                self.problem.eval_ = partial(self.problem.eval_,
                                             device = self.device)
                log.info("CoDe module: %s"%(mod,))
                log.info("Agent class: %s"%(aclass[2],))
                self.agentclass = partial(getattr(self.device, aclass[2]),
                                          config = self.arnconfig,
                                          problem = problem )
                self.mutrate = config.getfloat('default','mutrate')
                self.orig_mutrate = self.mutrate
                self.mutate_ = partial(bitflipmutation,
                                       mutrate = self.mutrate)
                self.improves = 0
                self.tempevals = 0
                self.adfcount = 0

                self.localsearch = config.get('default','localsearch')
                if self.localsearch:
                        log.info('Initializing local search holder')
                        self.localsearch = getattr(mainmod,
                                                   self.localsearch)(5,codefun)

                self.basicadf = config.get('default','adf')
                if self.basicadf:
                        log.info('Initializing multiplex adf skeleton')
                        self.basicadf = getattr(mainmod,
                                                   self.basicadf)

                self.interactive = config.get('default','gui')
                if self.interactive:
                        log.info('Interactive mode enabled. Initialiazing GUI.')
                        _guiclass = getattr(mainmod,
                                           self.interactive)
                        self.gui = _guiclass(self.popsize,problem=self.problem)

                self.numevals = None
                self.population = None
                self.parents = None
                self.best = None
                self.itercount = None

                self._selectionop = config.get('default','selectionop')
                if self._selectionop:
                        log.info('Initializing selection operator...')
                        self._selectionop = getattr(mainmod, self._selectionop)
                else:
                        self._selectionop = greedyselection
Exemplo n.º 33
0
 def create(cls, name, config):
     try:
         return cls(name, config.getfloat('threshold', fallback=2.5))
     except ValueError as error:
         raise ConfigurationError(
             'Unable to parse threshold as float: {}'.format(error))
Exemplo n.º 34
0
def get_parse_args():
    '''
    parse command line input arguments
    '''
    parser = argparse.ArgumentParser(
        description='Calculates: I_{scaled}(Q) = K*I(Q)+b')
    parser.add_argument('-r',
                        '--reference',
                        help='File used as reference to scale all curves',
                        required=True)
    parser.add_argument(
        '-i',
        '--input',
        help=
        'Input files (if the reference file is included here it will ignored). Use wildcards, e.g., \'xpto_*\'.',
        required=True)
    parser.add_argument(
        '-q',
        '--qmin',
        help='Q min. If not given, gets it from the config file.',
        required=False,
        type=float,
        default=config.getfloat('General', 'qmin'))
    parser.add_argument(
        '-m',
        '--qmax',
        help='Q max. If not given, gets it from the config file.',
        required=False,
        type=float,
        default=config.getfloat('General', 'qmax'))
    parser.add_argument(
        '-g',
        '--discard-begin',
        help='Discard n points from the beginning of every dataset.',
        required=False,
        type=int,
        default=config.getfloat('General', 'discard_points_begin'))
    parser.add_argument('-e',
                        '--discard-end',
                        help='Discard n points from the end of every dataset.',
                        required=False,
                        type=int,
                        default=config.getfloat('General',
                                                'discard_points_end'))
    parser.add_argument(
        '-n',
        '--no-save',
        action='store_true',
        help='Do not save the scaled datasets as _scaled.csv.',
        required=False,
        default=(not config.getboolean('General', 'save_scaled_files')))

    groupk = parser.add_mutually_exclusive_group()
    groupk.add_argument('-k',
                        help='Default K value for I_{scaled}(Q) = K*I(Q)+b.',
                        required=False,
                        type=float)
    groupk.add_argument(
        '--k-list',
        nargs='+',
        help='List of K values. Must be the same lenght has data -1',
        type=float,
        required=False)

    groupb = parser.add_mutually_exclusive_group()
    groupb.add_argument('-b',
                        help='Default b value for I_{scaled}(Q) = K*I(Q)+b.',
                        required=False,
                        type=float)
    groupb.add_argument(
        '--b-list',
        nargs='+',
        help='List of B values. Must be the same lenght has data -1',
        type=float,
        required=False)

    args = vars(parser.parse_args())
    return args
Exemplo n.º 35
0
def main(field,region,spws=[],stackedspws=[],stackedlabels=[],
         fluxtype='flux',linetype='dirty',weight=False,
         outfile='electron_temps.txt',config_file=None,auto=False):
    """
   Extract spectrum from region in each RRL image, measure continuum
   brightess and fit Gaussian to measure RRL properties. Also fit stacked 
   lines. Compute electron temperature.

    Inputs:
      field       = field to analyze
      region = filename of region where to extract spectrum
      spws = spws to analyze
      stackedspws = list of comma separated string of spws to stack
      stackedlabels = labels for stacked lines
      fluxtype = what type of flux to measure ('flux' or 'mean')
      linetype = 'clean' or 'dirty'
      weight = if True, weight the stacked spectra by continuum/rms^2
      outfile = where the results go
      config_file = configuration file

    Returns:
      Nothing
    """
    #
    # start logger
    #
    logger = logging.getLogger("main")
    #
    # Check inputs
    #
    if not os.path.exists(config_file):
        logger.critical('Configuration file not found')
        raise ValueError('Configuration file not found!')
    #
    # load configuration file
    #
    config = ConfigParser.ConfigParser()
    logger.info("Reading configuration file {0}".format(config_file))
    config.read(config_file)
    logger.info("Done.")
    #
    # Check if we supplied spws, if not, use all
    #
    if len(spws) == 0:
        spws = config.get("Spectral Windows","Line")
    #
    # Check spws exist
    #
    goodspws = []
    for spw in spws.split(','):
        if os.path.isdir('{0}.spw{1}.channel.{2}.pbcor.image'.format(field,spw,linetype)):
            goodspws.append(spw)
    spws = ','.join(goodspws)
    #
    # Get lineid, line frequency for each spw
    #
    alllinespws = config.get("Spectral Windows","Line").split(',')
    alllineids = config.get("Clean","lineids").split(',')
    allrestfreqs = config.get("Clean","restfreqs").split(',')
    lineids = [alllineids[alllinespws.index(spw)] for spw in spws.split(',')]
    restfreqs = [float(allrestfreqs[alllinespws.index(spw)].replace('MHz','')) for spw in spws.split(',')]
    #
    # Set-up file
    #
    with open(outfile,'w') as f:
        # 0       1           2      3      4        5        6     7      8        9        10        11          12        13          14
        # lineid  frequency   velo   e_velo line     e_line   fwhm  e_fwhm cont     rms      line2cont e_line2cont elec_temp e_elec_temp linesnr
        # #       MHz         km/s   km/s   mJy/beam mJy/beam km/s  km/s   mJy/beam mJy/beam                       K         K           
        # H122a   9494.152594 -100.0 50.0   1000.0   100.0    100.0 10.0   1000.0   100.0    0.050     0.001       10000.0   1000.0      10000.0
        # stacked 9494.152594 -100.0 50.0   1000.0   100.0    100.0 10.0   1000.0   100.0    0.050     0.001       10000.0   1000.0      10000.0
        # 1234567 12345678902 123456 123456 12345678 12345678 12345 123456 12345678 12345678 123456789 12345678901 123456789 12345678901 1234567
        #
        headerfmt = '{0:12} {1:12} {2:6} {3:6} {4:8} {5:8} {6:5} {7:6} {8:8} {9:8} {10:9} {11:11} {12:9} {13:11} {14:7}\n'
        rowfmt = '{0:12} {1:12.6f} {2:6.1f} {3:6.1f} {4:8.1f} {5:8.1f} {6:5.1f} {7:6.1f} {8:8.1f} {9:8.1f} {10:9.3f} {11:11.3f} {12:9.1f} {13:11.1f} {14:7.1f}\n'
        f.write(headerfmt.format('lineid','frequency','velo','e_velo',
                                 'line','e_line','fwhm','e_fwhm',
                                 'cont','rms','line2cont','e_line2cont',
                                 'elec_temp','e_elec_temp','linesnr'))
        if fluxtype == 'flux':
            fluxunit = 'mJy'
        else:
            fluxunit = 'mJy/beam'
        f.write(headerfmt.format('#','MHz','km/s','km/s',
                                 fluxunit,fluxunit,'km/s','km/s',
                                 fluxunit,fluxunit,'','','K','K',''))
        #
        # Fit RRLs
        #
        goodplots = []
        for spw,lineid,restfreq in zip(spws.split(','),lineids,restfreqs):
            imagename = '{0}.spw{1}.channel.{2}.pbcor.image'.format(field,spw,linetype)
            imagetitle = '{0}.{1}.channel.{2}.pbcor.image'.format(field,lineid,linetype)
            outfile = '{0}.{1}.channel.{2}.pbcor.image.{3}.spec.pdf'.format(field,lineid,linetype,region)
            # extract spectrum
            specdata = dump_spec(imagename,region,fluxtype)
            if specdata is None:
                # outside of primary beam
                continue
            # fit RRL
            line_brightness, e_line_brightness, line_fwhm, e_line_fwhm, \
              line_center, e_line_center, cont_brightness, rms = \
              fit_line(imagetitle,region,fluxtype,specdata,outfile,auto=auto)
            if line_brightness is None:
                # skipping line
                continue
            channel_width = config.getfloat("Clean","chanwidth")
            linesnr = 0.7*line_brightness/rms * (line_fwhm/channel_width)**0.5
            # calc Te
            line_to_cont, e_line_to_cont, elec_temp, e_elec_temp = \
              calc_te(line_brightness, e_line_brightness, line_fwhm,
                      e_line_fwhm, line_center, e_line_center,
                      cont_brightness, rms, restfreq)
            #
            # Check crazy, wonky fits if we're in auto mode
            #
            if auto:
                if np.any(line_brightness > 1.e6): # 1000 Jy
                    continue
                if np.any(line_to_cont > 10.):
                    continue
                if np.any(np.isinf(e_line_fwhm)) or np.any(np.isinf(e_line_center)):
                    continue
            #
            # Sort line parameters by brightness
            #
            sortind = np.argsort(line_brightness)[::-1]
            # write line
            multcomps = ['(a)','(b)','(c)','(d)','(e)']
            for multcomp,c,e_c,b,e_b,fw,e_fw,l2c,e_l2c,te,e_te,snr in \
                zip(multcomps,line_center[sortind],e_line_center[sortind],
                    line_brightness[sortind],e_line_brightness[sortind],
                    line_fwhm[sortind],e_line_fwhm[sortind],
                    line_to_cont[sortind],e_line_to_cont[sortind],
                    elec_temp[sortind],e_elec_temp[sortind],
                    linesnr[sortind]):
                if len(line_brightness) == 1:
                    mylineid = lineid
                else:
                    mylineid = lineid+multcomp
                f.write(rowfmt.format(mylineid, restfreq,
                                      c,e_c,b,e_b,fw,e_fw,
                                      cont_brightness, rms,
                                      l2c,e_l2c,te,e_te,snr))
            goodplots.append(outfile)
        #
        # Fit stacked RRLs
        #
        for my_stackedspws, stackedlabel in zip(stackedspws,stackedlabels):
            # extract spectrum from each image
            specdatas = []
            specfreqs = []
            weights = []
            avgspecdata = None
            for spw in my_stackedspws.split(','):
                imagename = '{0}.spw{1}.channel.{2}.pbcor.image'.format(field,spw,linetype)
                specdata = dump_spec(imagename,region,fluxtype)
                if specdata is None:
                    # outside of primary beam
                    continue
                avgspecdata = specdata
                specdatas.append(specdata['flux'])
                specfreqs.append(np.nanmean(specdata['freq']))
                # estimate rms
                if weight:
                    rms = calc_rms(specdata['flux'])
                    weights.append(np.nanmean(specdata['flux'])/rms**2.)
            specdatas = np.array(specdatas)
            specfreqs = np.array(specfreqs)
            weights = np.array(weights)
            # average spectrum
            if weight:
                specaverage = np.average(specdatas,axis=0,weights=weights)
                stackedrestfreq = np.average(specfreqs,weights=weights)
                outfile = '{0}.{1}.channel.{2}.pbcor.image.{3}.wt.spec.pdf'.format(field,stackedlabel,linetype,region)
                imagetitle = '{0}.{1}.channel.{2}.pbcor.image (wt)'.format(field,stackedlabel,linetype)
            else:
                specaverage = np.nanmean(specdatas,axis=0)
                stackedrestfreq = np.nanmean(specfreqs)
                outfile = '{0}.{1}.channel.{2}.pbcor.image.{3}.spec.pdf'.format(field,stackedlabel,linetype,region)
                imagetitle = '{0}.{1}.channel.{2}.pbcor.image'.format(field,stackedlabel,linetype)
            avgspecdata['flux'] = specaverage
            # fit RRL
            line_brightness, e_line_brightness, line_fwhm, e_line_fwhm, \
              line_center, e_line_center, cont_brightness, rms = \
              fit_line(imagetitle,region,fluxtype,avgspecdata,outfile,auto=auto)
            if line_brightness is None:
                # skipping line
                continue
            channel_width = config.getfloat("Clean","chanwidth")
            linesnr = 0.7*line_brightness/rms * (line_fwhm/channel_width)**0.5
            # calc Te
            line_to_cont, e_line_to_cont, elec_temp, e_elec_temp = \
              calc_te(line_brightness, e_line_brightness, line_fwhm,
                      e_line_fwhm, line_center, e_line_center,
                      cont_brightness, rms, stackedrestfreq)
            #
            # Check crazy, wonky fits if we're in auto mode
            #
            if auto:
                if np.any(line_brightness > 1.e6): # 1000 Jy
                    continue
                if np.any(line_to_cont > 10.):
                    continue
                if np.any(np.isinf(e_line_fwhm)) or np.any(np.isinf(e_line_center)):
                    continue
            #
            # Sort line parameters by line_brightness
            #
            sortind = np.argsort(line_brightness)[::-1]
            # write line
            multcomps = ['(a)','(b)','(c)','(d)','(e)']
            for multcomp,c,e_c,b,e_b,fw,e_fw,l2c,e_l2c,te,e_te,snr in \
                zip(multcomps,line_center[sortind],e_line_center[sortind],
                    line_brightness[sortind],e_line_brightness[sortind],
                    line_fwhm[sortind],e_line_fwhm[sortind],
                    line_to_cont[sortind],e_line_to_cont[sortind],
                    elec_temp[sortind],e_elec_temp[sortind],
                    linesnr[sortind]):
                if len(line_brightness) == 1:
                    mylineid = stackedlabel
                else:
                    mylineid = stackedlabel+multcomp
                f.write(rowfmt.format(mylineid, stackedrestfreq,
                                      c,e_c,b,e_b,fw,e_fw,
                                      cont_brightness, rms,
                                      l2c,e_l2c,te,e_te,snr))
            goodplots.append(outfile)
    #
    # Generate TeX file of all plots
    #
    logger.info("Generating PDF...")
    # fix filenames so LaTeX doesn't complain
    plots = ['{'+fn.replace('.pdf','')+'}.pdf' for fn in goodplots]
    fname = '{0}.{1}.spectra.tex'.format(region,linetype)
    if weight:
        fname = '{0}.{1}.wt.spectra.tex'.format(region,linetype)
    with open(fname,'w') as f:
        f.write(r"\documentclass{article}"+"\n")
        f.write(r"\usepackage{graphicx}"+"\n")
        f.write(r"\usepackage[margin=0.1cm]{geometry}"+"\n")
        f.write(r"\begin{document}"+"\n")
        for i in range(0,len(plots),6):
            f.write(r"\begin{figure}"+"\n")
            f.write(r"\centering"+"\n")
            if len(plots) > i: f.write(r"\includegraphics[width=0.45\textwidth]{"+plots[i]+"}\n")
            if len(plots) > i+3: f.write(r"\includegraphics[width=0.45\textwidth]{"+plots[i+3]+"}\n")
            if len(plots) > i+1: f.write(r"\includegraphics[width=0.45\textwidth]{"+plots[i+1]+"}\n")
            if len(plots) > i+4: f.write(r"\includegraphics[width=0.45\textwidth]{"+plots[i+4]+"}\n")
            if len(plots) > i+2: f.write(r"\includegraphics[width=0.45\textwidth]{"+plots[i+2]+"}\n")
            if len(plots) > i+5: f.write(r"\includegraphics[width=0.45\textwidth]{"+plots[i+5]+"}\n")
            f.write(r"\end{figure}"+"\n")
            f.write(r"\clearpage"+"\n")
        f.write(r"\end{document}")
    os.system('pdflatex -interaction=batchmode {0}'.format(fname))
    logger.info("Done.")
Exemplo n.º 36
0
        if not script:
            continue
    except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
        continue

    hook_manager.register_hook(hook, script)
    logger.info("Registered script '%s' for hook '%s'." % (script, hook))

# Initialize the tunnel manager.
tunnel_manager = broker.TunnelManager(
    hook_manager=hook_manager,
    max_tunnels=config.getint('broker', 'max_tunnels'),
    tunnel_id_base=config.getint('broker', 'tunnel_id_base'),
    tunnel_port_base=config.getint('broker', 'port_base'),
    namespace=config.get('broker', 'namespace'),
    connection_rate_limit=config.getfloat('broker', 'connection_rate_limit'),
    pmtu_fixed=config.getint('broker', 'pmtu'),
    log_ip_addresses=config.getboolean('log', 'log_ip_addresses'),
)
tunnel_manager.initialize()

logger.info("Maximum number of tunnels is %d." % tunnel_manager.max_tunnels)
logger.info("Tunnel identifier base is %d." % tunnel_manager.tunnel_id_base)
logger.info("Tunnel port base is %d." % tunnel_manager.tunnel_port_base)
logger.info("Namespace is %s." % tunnel_manager.namespace)

# Initialize one broker for each port.
brokers = []
broker_host = config.get('broker', 'address')
for port in config.get('broker', 'port').split(','):
    try:
Exemplo n.º 37
0
    logging.config.fileConfig(sololink_conf)
    logger = logging.getLogger("pair")

    logger.info("pair_solo.py starting")

    config = ConfigParser.SafeConfigParser()

    # if the config file is not found, an empty list is returned and the "get"
    # operations below fail
    config.read(sololink_conf)

    # read configuration items
    try:
        controller_link_port = config.getint("pairing", "controller_link_port")
        wifi_connect_timeout = \
                config.getfloat("pairing", "wifi_connect_timeout")
        connect_request_interval = \
                config.getfloat("pairing", "connect_request_interval")
        connect_ack_timeout = config.getfloat("pairing", "connect_ack_timeout")
        button_filename = config.get("pairing", "button_filename")
        solo_ip = config.get("solo", "soloIp")
        controller_ip = config.get("solo", "artooIp")
    except:
        logger.error("error reading config from %s", sololink_conf)
        sys.exit(1)

    try:
        check_versions = sololink_config.getboolean("solo",
                                                    "pairCheckVersions")
    except:
        check_versions = True  # default
Exemplo n.º 38
0
def get_dict_from_old_falcon_cfg(config):
    global job_type  # TODO: Stop using global for wait_for_file().
    job_type = "SGE"
    section = 'General'
    if config.has_option(section, 'job_type'):
        job_type = config.get(section, 'job_type')

    pa_concurrent_jobs = 8
    if config.has_option(section, 'pa_concurrent_jobs'):
        pa_concurrent_jobs = config.getint(section, 'pa_concurrent_jobs')

    cns_concurrent_jobs = 8
    if config.has_option(section, 'cns_concurrent_jobs'):
        cns_concurrent_jobs = config.getint(section, 'cns_concurrent_jobs')

    ovlp_concurrent_jobs = 8
    if config.has_option(section, 'ovlp_concurrent_jobs'):
        ovlp_concurrent_jobs = config.getint(section, 'ovlp_concurrent_jobs')

    #appending = False
    #if config.has_option(section, 'appending'):
    #    appending = config.get(section, 'appending')
    #    if appending == "True":
    #        appending = True

    openending = False
    if config.has_option(section, 'openending'):
        openending = config.get(section, 'openending')
        if openending == "True":
            openending = True

    input_type = "raw"
    if config.has_option(section, 'input_type'):
        input_type = config.get(section, 'input_type')

    overlap_filtering_setting =  """--max_diff 1000 --max_cov 1000 --min_cov 2"""
    if config.has_option(section, 'overlap_filtering_setting'):
        overlap_filtering_setting = config.get(section, 'overlap_filtering_setting')

    pa_HPCdaligner_option = """-v -dal4 -t16 -e.70 -l1000 -s100"""
    if config.has_option(section, 'pa_HPCdaligner_option'):
        pa_HPCdaligner_option = config.get(section, 'pa_HPCdaligner_option')

    ovlp_HPCdaligner_option = """ -v -dal24 -t32 -h60 -e.96 -l500 -s1000"""
    if config.has_option(section, 'ovlp_HPCdaligner_option'):
        ovlp_HPCdaligner_option = config.get(section, 'ovlp_HPCdaligner_option')

    pa_DBsplit_option = """ -x500 -s200"""
    if config.has_option(section, 'pa_DBsplit_option'):
        pa_DBsplit_option = config.get(section, 'pa_DBsplit_option')

    dust = False
    if config.has_option(section, 'dust'):
        dust = config.getboolean(section, 'dust')

    pa_DBdust_option = "-w128 -t2.5 -m20"
    if config.has_option(section, 'pa_DBdust_option'):
        pa_DBdust_option = config.get(section, 'pa_DBdust_option')

    dazcon = False
    if config.has_option(section, 'dazcon'):
        dazcon = config.getboolean(section, 'dazcon')

    pa_dazcon_option = "-j 4 -x -l 500"
    if config.has_option(section, 'pa_dazcon_option'):
        pa_dazcon_option = config.get(section, 'pa_dazcon_option')

    ovlp_DBsplit_option = """ -x500 -s200"""
    if config.has_option(section, 'ovlp_DBsplit_option'):
        ovlp_DBsplit_option = config.get(section, 'ovlp_DBsplit_option')

    falcon_sense_option = """ --output_multi --min_idt 0.70 --min_cov 2 --max_n_read 1800 --n_core 6"""
    if config.has_option(section, 'falcon_sense_option'):
        falcon_sense_option = config.get(section, 'falcon_sense_option')
    if 'local_match_count' in falcon_sense_option or 'output_dformat' in falcon_sense_option:
        raise Exception('Please remove obsolete "--local_match_count_*" or "--output_dformat"' +
                        ' from "falcon_sense_option" in your cfg: %s' %repr(falcon_sense_option))

    falcon_sense_skip_contained = False
    if config.has_option(section, 'falcon_sense_skip_contained'):
        falcon_sense_skip_contained = config.get(section, 'falcon_sense_skip_contained')
        if falcon_sense_skip_contained in ["True", "true", "1"]:
            falcon_sense_skip_contained = True
        else:
            falcon_sense_skip_contained = False

    genome_size = 0
    if config.has_option(section, 'genome_size'):
        genome_size = config.getint(section, 'genome_size')

    seed_coverage = 20
    if config.has_option(section, 'seed_coverage'):
        seed_coverage = config.getfloat(section, 'seed_coverage')

    length_cutoff = -1
    if config.has_option(section, 'length_cutoff'):
        length_cutoff = config.getint(section, 'length_cutoff')
    if length_cutoff < 0:
        if genome_size < 1:
            raise Exception('Must specify either length_cutoff>0 or genome_size>0')

    length_cutoff_pr = config.getint(section, 'length_cutoff_pr')
    input_fofn_fn = config.get(section, 'input_fofn')

    # This one depends on length_cutoff_pr for its default.
    fc_ovlp_to_graph_option = ''
    if config.has_option(section, 'fc_ovlp_to_graph_option'):
        fc_ovlp_to_graph_option = config.get(section, 'fc_ovlp_to_graph_option')
    if '--min_len' not in fc_ovlp_to_graph_option:
        fc_ovlp_to_graph_option += ' --min_len %d' %length_cutoff_pr

    bestn = 12
    if config.has_option(section, 'bestn'):
        bestn = config.getint(section, 'bestn')

    if config.has_option(section, 'target'):
        target = config.get(section, 'target')
        if target not in ["overlapping", "pre-assembly", "assembly"]:
            msg = """ Target has to be "overlapping", "pre-assembly" or "assembly" in this verison. You have an unknown target %s in the configuration file.  """ % target
            raise Exception(msg)
    else:
        logger.info(""" No target specified, assuming "assembly" as target """)
        target = "assembly"

    if config.has_option(section, 'stop_all_jobs_on_failure'):
        stop_all_jobs_on_failure = config.getboolean(section, 'stop_all_jobs_on_failure')
    else:
        # Good default. Rarely needed, since we already stop early if *all* tasks fail
        # in a given refresh.
        stop_all_jobs_on_failure = False
    if config.has_option(section, 'use_tmpdir'):
        use_tmpdir = config.getboolean(section, 'use_tmpdir')
    else:
        use_tmpdir = False

    TEXT_FILE_BUSY = 'avoid_text_file_busy'
    if config.has_option(section, TEXT_FILE_BUSY):
        bash.BUG_avoid_Text_file_busy = config.getboolean(section, TEXT_FILE_BUSY)

    hgap_config = {#"input_fofn_fn" : input_fofn_fn, # deprecated
                   "input_fofn" : input_fofn_fn,
                   "target" : target,
                   "job_type" : job_type,
                   "input_type": input_type,
                   "openending": openending,
                   "pa_concurrent_jobs" : pa_concurrent_jobs,
                   "ovlp_concurrent_jobs" : ovlp_concurrent_jobs,
                   "cns_concurrent_jobs" : cns_concurrent_jobs,
                   "overlap_filtering_setting": overlap_filtering_setting,
                   "genome_size" : genome_size,
                   "seed_coverage" : seed_coverage,
                   "length_cutoff" : length_cutoff,
                   "length_cutoff_pr" : length_cutoff_pr,
                   "sge_option_da": config.get(section, 'sge_option_da'),
                   "sge_option_la": config.get(section, 'sge_option_la'),
                   "sge_option_pda": config.get(section, 'sge_option_pda'),
                   "sge_option_pla": config.get(section, 'sge_option_pla'),
                   "sge_option_fc": config.get(section, 'sge_option_fc'),
                   "sge_option_cns": config.get(section, 'sge_option_cns'),
                   "pa_HPCdaligner_option": pa_HPCdaligner_option,
                   "ovlp_HPCdaligner_option": ovlp_HPCdaligner_option,
                   "pa_DBsplit_option": pa_DBsplit_option,
                   "dust": dust,
                   "pa_DBdust_option": pa_DBdust_option,
                   "dazcon": dazcon,
                   "pa_dazcon_option": pa_dazcon_option,
                   "ovlp_DBsplit_option": ovlp_DBsplit_option,
                   "fc_ovlp_to_graph_option": fc_ovlp_to_graph_option,
                   "falcon_sense_option": falcon_sense_option,
                   "falcon_sense_skip_contained": falcon_sense_skip_contained,
                   "stop_all_jobs_on_failure": stop_all_jobs_on_failure,
                   "use_tmpdir": use_tmpdir,
                   TEXT_FILE_BUSY: bash.BUG_avoid_Text_file_busy,
                   }
    provided = dict(config.items(section))
    unused = set(provided) - set(k.lower() for k in hgap_config)
    if unused:
        import warnings
        warnings.warn("Unexpected keys in input config: %s" %repr(unused))

    hgap_config["install_prefix"] = sys.prefix

    return hgap_config
Exemplo n.º 39
0
def get_dict_from_old_falcon_cfg(config):
    job_type = "SGE"
    section = 'General'
    if config.has_option(section, 'job_type'):
        job_type = config.get(section, 'job_type')

    job_queue = "default"
    if config.has_option(section, 'job_queue'):
        job_queue = config.get(section, 'job_queue')

    pwatcher_type = 'fs_based'
    if config.has_option(section, 'pwatcher_type'):
        pwatcher_type = config.get(section, 'pwatcher_type')

    default_concurrent_jobs = 8
    if config.has_option(section, 'default_concurrent_jobs'):
        default_concurrent_jobs = config.getint(section,
                                                'default_concurrent_jobs')

    pwatcher_directory = 'mypwatcher'
    if config.has_option(section, 'pwatcher_directory'):
        pwatcher_directory = config.get(section, 'pwatcher_directory')

    pa_concurrent_jobs = default_concurrent_jobs
    if config.has_option(section, 'pa_concurrent_jobs'):
        pa_concurrent_jobs = config.getint(section, 'pa_concurrent_jobs')

    cns_concurrent_jobs = default_concurrent_jobs
    if config.has_option(section, 'cns_concurrent_jobs'):
        cns_concurrent_jobs = config.getint(section, 'cns_concurrent_jobs')

    ovlp_concurrent_jobs = default_concurrent_jobs
    if config.has_option(section, 'ovlp_concurrent_jobs'):
        ovlp_concurrent_jobs = config.getint(section, 'ovlp_concurrent_jobs')

    #appending = False
    #if config.has_option(section, 'appending'):
    #    appending = config.get(section, 'appending')
    #    if appending == "True":
    #        appending = True

    #openending = False
    #if config.has_option(section, 'openending'):
    #    openending = config.get(section, 'openending')
    #    if openending == "True":
    #        openending = True

    input_type = "raw"
    if config.has_option(section, 'input_type'):
        input_type = config.get(section, 'input_type')

    overlap_filtering_setting = """--max_diff 1000 --max_cov 1000 --min_cov 2"""
    if config.has_option(section, 'overlap_filtering_setting'):
        overlap_filtering_setting = config.get(section,
                                               'overlap_filtering_setting')

    pa_HPCdaligner_option = """-v -D24 -t16 -e.70 -l1000 -s100"""
    if config.has_option(section, 'pa_HPCdaligner_option'):
        pa_HPCdaligner_option = config.get(section, 'pa_HPCdaligner_option')

    ovlp_HPCdaligner_option = """ -v -D24 -t32 -h60 -e.96 -l500 -s1000"""
    if config.has_option(section, 'ovlp_HPCdaligner_option'):
        ovlp_HPCdaligner_option = config.get(section,
                                             'ovlp_HPCdaligner_option')

    pa_HPCdaligner_option = update_HPCdaligner_option(pa_HPCdaligner_option)
    ovlp_HPCdaligner_option = update_HPCdaligner_option(
        ovlp_HPCdaligner_option)

    pa_DBsplit_option = """ -x500 -s200"""
    if config.has_option(section, 'pa_DBsplit_option'):
        pa_DBsplit_option = config.get(section, 'pa_DBsplit_option')

    skip_checks = False
    if config.has_option(section, 'skip_checks'):
        skip_checks = config.getboolean(section, 'skip_checks')

    dust = False
    if config.has_option(section, 'dust'):
        dust = config.getboolean(section, 'dust')

    pa_DBdust_option = "-w128 -t2.5 -m20"
    if config.has_option(section, 'pa_DBdust_option'):
        pa_DBdust_option = config.get(section, 'pa_DBdust_option')

    dazcon = False
    if config.has_option(section, 'dazcon'):
        dazcon = config.getboolean(section, 'dazcon')

    pa_dazcon_option = "-j 4 -x -l 500"
    if config.has_option(section, 'pa_dazcon_option'):
        pa_dazcon_option = config.get(section, 'pa_dazcon_option')

    ovlp_DBsplit_option = """ -x500 -s200"""
    if config.has_option(section, 'ovlp_DBsplit_option'):
        ovlp_DBsplit_option = config.get(section, 'ovlp_DBsplit_option')

    falcon_sense_option = """ --output_multi --min_idt 0.70 --min_cov 2 --max_n_read 1800 --n_core 6"""
    if config.has_option(section, 'falcon_sense_option'):
        falcon_sense_option = config.get(section, 'falcon_sense_option')
    if 'local_match_count' in falcon_sense_option or 'output_dformat' in falcon_sense_option:
        raise Exception(
            'Please remove obsolete "--local_match_count_*" or "--output_dformat"'
            + ' from "falcon_sense_option" in your cfg: %s' %
            repr(falcon_sense_option))

    falcon_sense_skip_contained = False
    if config.has_option(section, 'falcon_sense_skip_contained'):
        falcon_sense_skip_contained = config.get(
            section, 'falcon_sense_skip_contained')
        if falcon_sense_skip_contained in ["True", "true", "1"]:
            falcon_sense_skip_contained = True
        else:
            falcon_sense_skip_contained = False

    genome_size = 0
    if config.has_option(section, 'genome_size'):
        genome_size = config.getint(section, 'genome_size')

    seed_coverage = 20
    if config.has_option(section, 'seed_coverage'):
        seed_coverage = config.getfloat(section, 'seed_coverage')

    length_cutoff = -1
    if config.has_option(section, 'length_cutoff'):
        length_cutoff = config.getint(section, 'length_cutoff')
    if length_cutoff < 0:
        if genome_size < 1:
            raise Exception(
                'Must specify either length_cutoff>0 or genome_size>0')

    length_cutoff_pr = config.getint(section, 'length_cutoff_pr')
    input_fofn_fn = config.get(section, 'input_fofn')

    # This one depends on length_cutoff_pr for its default.
    fc_ovlp_to_graph_option = ''
    if config.has_option(section, 'fc_ovlp_to_graph_option'):
        fc_ovlp_to_graph_option = config.get(section,
                                             'fc_ovlp_to_graph_option')
    if '--min_len' not in fc_ovlp_to_graph_option:
        fc_ovlp_to_graph_option += ' --min_len %d' % length_cutoff_pr

    bestn = 12
    if config.has_option(section, 'bestn'):
        bestn = config.getint(section, 'bestn')

    if config.has_option(section, 'target'):
        target = config.get(section, 'target')
        if target not in ["overlapping", "pre-assembly", "assembly"]:
            msg = """ Target has to be "overlapping", "pre-assembly" or "assembly" in this verison. You have an unknown target %s in the configuration file.  """ % target
            raise Exception(msg)
    else:
        logger.info(""" No target specified, assuming "assembly" as target """)
        target = "assembly"

    if config.has_option(section, 'stop_all_jobs_on_failure'):
        stop_all_jobs_on_failure = config.getboolean(
            section, 'stop_all_jobs_on_failure')
    else:
        # Good default. Rarely needed, since we already stop early if *all* tasks fail
        # in a given refresh.
        stop_all_jobs_on_failure = False
    if config.has_option(section, 'use_tmpdir'):
        tmpdir = config.get(section, 'use_tmpdir')
        if '/' in tmpdir:
            tempfile.tempdir = tmpdir
            use_tmpdir = True
        else:
            use_tmpdir = config.getboolean(section, 'use_tmpdir')
    else:
        use_tmpdir = False

    TEXT_FILE_BUSY = 'avoid_text_file_busy'
    if config.has_option(section, TEXT_FILE_BUSY):
        bash.BUG_avoid_Text_file_busy = config.getboolean(
            section, TEXT_FILE_BUSY)

    hgap_config = {#"input_fofn_fn" : input_fofn_fn, # deprecated
                   "input_fofn" : input_fofn_fn,
                   "target" : target,
                   "job_type" : job_type,
                   "job_queue" : job_queue,
                   "input_type": input_type,
                   #"openending": openending,
                   "pa_concurrent_jobs" : pa_concurrent_jobs,
                   "ovlp_concurrent_jobs" : ovlp_concurrent_jobs,
                   "cns_concurrent_jobs" : cns_concurrent_jobs,
                   "overlap_filtering_setting": overlap_filtering_setting,
                   "genome_size" : genome_size,
                   "seed_coverage" : seed_coverage,
                   "length_cutoff" : length_cutoff,
                   "length_cutoff_pr" : length_cutoff_pr,
                   "sge_option_da": config.get(section, 'sge_option_da'),
                   "sge_option_la": config.get(section, 'sge_option_la'),
                   "sge_option_pda": config.get(section, 'sge_option_pda'),
                   "sge_option_pla": config.get(section, 'sge_option_pla'),
                   "sge_option_fc": config.get(section, 'sge_option_fc'),
                   "sge_option_cns": config.get(section, 'sge_option_cns'),
                   "pa_HPCdaligner_option": pa_HPCdaligner_option,
                   "ovlp_HPCdaligner_option": ovlp_HPCdaligner_option,
                   "pa_DBsplit_option": pa_DBsplit_option,
                   "skip_checks": skip_checks,
                   "dust": dust,
                   "pa_DBdust_option": pa_DBdust_option,
                   "dazcon": dazcon,
                   "pa_dazcon_option": pa_dazcon_option,
                   "ovlp_DBsplit_option": ovlp_DBsplit_option,
                   "fc_ovlp_to_graph_option": fc_ovlp_to_graph_option,
                   "falcon_sense_option": falcon_sense_option,
                   "falcon_sense_skip_contained": falcon_sense_skip_contained,
                   "stop_all_jobs_on_failure": stop_all_jobs_on_failure,
                   "use_tmpdir": use_tmpdir,
                   "pwatcher_type": pwatcher_type,
                   "pwatcher_directory": pwatcher_directory,
                   TEXT_FILE_BUSY: bash.BUG_avoid_Text_file_busy,
                   }
    provided = dict(config.items(section))
    unused = set(provided) - set(k.lower() for k in hgap_config)
    if unused:
        import warnings
        warnings.warn("Unexpected keys in input config: %s" % repr(unused))

    hgap_config["install_prefix"] = sys.prefix

    return hgap_config
import os
import cv2
import matplotlib.pyplot as plt
from utils import process_image, sliding_window

# 导入配置
from smokeDetection_config import config

from importlib import import_module
feature = import_module(config.get("feature", "feature_file"))

logging.config.fileConfig("logger.conf")
logger = logging.getLogger("smoke_logger")

# 阈值
threshold = config.getfloat("model", "threshold")

# 窗宽
winH = config.getint("window", "winH")
winW = config.getint("window", "winW")
stepSize = config.getint("window", "stepSize")

if __name__ == '__main__':
    if (len(sys.argv) != 2):
        logger.critical("请输入测试图片路径!")
        sys.exit()
    else:
        image_path = sys.argv[1]
        # 全局模型
        overallModel_file = config["model"]["overallModel_file"]
        with open(overallModel_file, 'rb') as fid:
Exemplo n.º 41
0
def getPixelAR(ref):
    if config.has_option("Server", "par"):
        try:
            return (True, config.getfloat("Server", "par"))[ref]
        except NoOptionError, ValueError:
            pass
Exemplo n.º 42
0
sys.path.append("/usr/share/archivematica/dashboard")
from main.models import Job, SIP, Task, WatchedDirectory

global countOfCreateUnitAndJobChainThreaded
countOfCreateUnitAndJobChainThreaded = 0

config = ConfigParser.SafeConfigParser()
config.read("/etc/archivematica/MCPServer/serverConfig.conf")

#time to sleep to allow db to be updated with the new location of a SIP
dbWaitSleep = 2


limitTaskThreads = config.getint('Protocol', "limitTaskThreads")
limitTaskThreadsSleep = config.getfloat('Protocol', "limitTaskThreadsSleep")
limitGearmanConnectionsSemaphore = threading.Semaphore(value=config.getint('Protocol', "limitGearmanConnections"))
reservedAsTaskProcessingThreads = config.getint('Protocol', "reservedAsTaskProcessingThreads")
debug = False #Used to print additional debugging information
stopSignalReceived = False #Tracks whether a sigkill has been received or not

def isUUID(uuid):
    """Return boolean of whether it's string representation of a UUID v4"""
    split = uuid.split("-")
    if len(split) != 5 \
    or len(split[0]) != 8 \
    or len(split[1]) != 4 \
    or len(split[2]) != 4 \
    or len(split[3]) != 4 \
    or len(split[4]) != 12 :
        return False
Exemplo n.º 43
0
                    messages += groups[error['id']]
                raise Exception("Failed to flush %d groups: %s" % (len(br.errors), br.errors))
        except Exception, e:
            if retries:
                log.warn("Error flushing %d messages, trying smaller batch (%d tries left)",
                        len(messages), retries, exc_info=e)
                flush_fn(messages[::2], retries - 1)
                flush_fn(messages[1::2], retries - 1)
            else:
                log.error("Failed to flush %d messages", len(messages), exc_info=e)
                for message in messages:
                    log.error("Failed: %s", message)

    kwargs = {"flush_fn": flush_fn}
    if config.has_option("Flush", "Bytes"):
        kwargs["flush_bytes"] = config.getfloat("Flush", "Bytes")
    else:
        kwargs["flush_bytes"] = 250000 # Safely below SQS limit
    if config.has_option("Flush", "Seconds"):
        kwargs["flush_seconds"] = config.getfloat("Flush", "Seconds")
    if config.has_option("Flush", "Lines"):
        flush_lines = config.getint("Flush", "Lines")
        if flush_single and flush_lines > 10:
            raise Error("Cannot send more than 10 messages to SQS in a flush")
        kwargs["flush_lines"] = flush_lines

    _queue = TimeAndSizeFlushingQueue(**kwargs)

def deinit():
    log.info("Deinitializing Python SQS")
    global _queue
Exemplo n.º 44
0
from temperatureHumiditySensor import temperatureHumiditySensor

# Read parameters from configuration file
config = ConfigParser.ConfigParser()
config.read('./livDB.config')

thSensorOn = config.getboolean('ON_BOARD_SENSORS', 'temp_humidity_sensor')
apSensorOn = config.getboolean('ON_BOARD_SENSORS', 'air_pressure_sensor')
co2SensorOn = config.getboolean('ON_BOARD_SENSORS', 'co2_sensor')

apAddress = config.get('AIR_PRESSURE_SENSOR', 'I2C_address')

thType = config.get('TEMP_HUMIDITY_SENSOR', 'type')
thGPIO = config.get('TEMP_HUMIDITY_SENSOR', 'gpio_no')

sensorReadTime = config.getfloat('READ_CYCLE', 'read_time')

# logging.basicConfig(filename='liv.log', level=logging.DEBUG, format='%(asctime)s %(message)s')

logging.config.fileConfig('logging.ini')
logger = logging.getLogger(__name__)

logger.info('--------------------------------------------')
logger.info('LIV STARTED')

temperatureString = 'NO TEMP SENSOR'
humidityString = 'NO HUM SENSOR'
airPressureString = 'NO AP SENSOR'
co2String = 'NO CO2 SENSOR'

# Initialize airPressure sensor if present
Exemplo n.º 45
0
    try:
        script = config.get('hooks', hook).strip()
        if not script:
            continue
    except (configparser.NoOptionError, configparser.NoSectionError):
        continue

    hook_manager.register_hook(hook, script)
    logger.info("Registered script '%s' for hook '%s'." % (script, hook))

# Initialize the tunnel manager.
tunnel_manager = broker.TunnelManager(
    hook_manager=hook_manager,
    max_tunnels=config.getint('broker', 'max_tunnels'),
    tunnel_id_base=config.getint('broker', 'tunnel_id_base'),
    connection_rate_limit=config.getfloat('broker', 'connection_rate_limit'),
    pmtu_fixed=config.getint('broker', 'pmtu'),
    log_ip_addresses=config.getboolean('log', 'log_ip_addresses'),
)
tunnel_manager.initialize()

logger.info("Maximum number of tunnels is %d." % tunnel_manager.max_tunnels)
logger.info("Tunnel identifier base is %d." % tunnel_manager.tunnel_id_base)

# Initialize one broker for each port.
brokers = []
broker_host = config.get('broker', 'address')
for port in config.get('broker', 'port').split(','):
    try:
        broker_instance = broker.Broker(
            (broker_host, int(port)),
Exemplo n.º 46
0
    def __init__(self,
                 loadedconfig,
                 problem,
                 logmanager=DefaultRunLog,
                 **kwargs):
        config, self.arnconfig = loadedconfig
        logging.config.fileConfig(config.get('default', 'logconf'),
                                  disable_existing_loggers=False)
        log.info('Setting up evolutionary workbench...')
        self.runlog = logmanager(log)
        self.trace = config.getboolean('default', 'ancestortrace')
        mainmod = __import__('__main__')

        self.problem = problem
        self.popsize = config.getint('default', 'popsize')
        self.parentpsize = config.getint('default', 'parentpopsize')
        self.maxiters = config.getint('default', 'numiters')
        self.popratio = self.popsize / self.parentpsize

        opnames = config.get('default', 'operators')
        oprates = config.get('default', 'oprates')
        self.opargs = config.get('default', 'opargs').split(',')
        self.ops_, self.oprates = _initialize_ops(opnames, oprates)
        log.debug(self.ops_)
        log.debug(self.oprates)

        self.xover_ = config.get('default', 'xover')
        if self.xover_:
            log.info('Initializing crossover operator...')
            self.xover_ = getattr(mainmod, self.xover_)
            self.xrate = config.getfloat('default', 'xrate')

        aclass = config.get('default', 'agent').split('.')
        mod = aclass[0] + "." + aclass[1]
        self.device = __import__(mod, fromlist=aclass[2])
        self.problem.eval_ = partial(self.problem.eval_, device=self.device)
        log.info("CoDe module: %s" % (mod, ))
        log.info("Agent class: %s" % (aclass[2], ))
        self.agentclass = partial(getattr(self.device, aclass[2]),
                                  config=self.arnconfig,
                                  problem=problem)
        self.mutrate = config.getfloat('default', 'mutrate')
        self.orig_mutrate = self.mutrate
        self.mutate_ = partial(bitflipmutation, mutrate=self.mutrate)
        self.improves = 0
        self.tempevals = 0
        self.adfcount = 0

        self.localsearch = config.get('default', 'localsearch')
        if self.localsearch:
            log.info('Initializing local search holder')
            self.localsearch = getattr(mainmod, self.localsearch)(5, codefun)

        self.basicadf = config.get('default', 'adf')
        if self.basicadf:
            log.info('Initializing multiplex adf skeleton')
            self.basicadf = getattr(mainmod, self.basicadf)

        self.interactive = config.get('default', 'gui')
        if self.interactive:
            log.info('Interactive mode enabled. Initialiazing GUI.')
            _guiclass = getattr(mainmod, self.interactive)
            self.gui = _guiclass(self.popsize, problem=self.problem)

        self.numevals = None
        self.population = None
        self.parents = None
        self.best = None
        self.itercount = None

        self._selectionop = config.get('default', 'selectionop')
        if self._selectionop:
            log.info('Initializing selection operator...')
            self._selectionop = getattr(mainmod, self._selectionop)
        else:
            self._selectionop = greedyselection
Exemplo n.º 47
0
def getPixelAR(ref):
    if config.has_option('Server', 'par'):
        try:
            return (True, config.getfloat('Server', 'par'))[ref]
        except NoOptionError, ValueError:
            pass