Exemple #1
0
def load_config(
    config_file_path: Path = DEFAULT_CONFIG_FILE_PATH,
) -> Tuple[Path, Path, str, Path]:
    """Load configuration settings from a config file (yaml)

    :config_file_path: pathlib.Path object to the config file.
    :returns: CONFIG_FILE_PATH, DATA_FOLDER_PATH, DB_BACKEND, DB_PATH
    """
    try:
        config_data = YAML().load(config_file_path)
    except (AttributeError, FileNotFoundError) as e:
        config_data = None
        click.echo(f"Failed to load config file!\n{e}", err=True)
    finally:
        if config_data is None:
            config_data = dict()
    data_folder_path = config_data.get("data folder", DEFAULT_DATA_FOLDER_PATH)
    """Get configuration of the database.

    + DB backend
    + DB path
    """
    db_config: dict = config_data.get("db", dict())
    # Note: If we drop support for python<=3.8
    # switch to using walrus operator here.
    db_backend: str = db_config.get("backend", "sqlite").lower()
    if db_backend not in SUPPORTED_DB_BACKEND:
        raise ValueError(
            f"Only values from {SUPPORTED_DB_BACKEND} are supported. "
            f"{db_backend} was supplied instead.")
    db_path: Path = data_folder_path.joinpath(db_config.get(
        "path", "tasks.db"))
    return (config_file_path, data_folder_path, db_backend, db_path)
def init():
    'Program initialisation'
    # Process command line options
    opt = argparse.ArgumentParser(description=__doc__.strip())
    opt.add_argument('-c', '--config', help='alternative configuration file')
    opt.add_argument('-p', '--plugin-dir', help='alternative plugin dir')
    opt.add_argument('-s', '--sleep', type=float, help=argparse.SUPPRESS)
    opt.add_argument('-i', '--inhibit', help=argparse.SUPPRESS)
    args = opt.parse_args()

    # This instance may be a child invocation merely to run and check
    # the plugin while it is inhibiting.
    if args.inhibit:
        cmd = shlex.split(args.inhibit)
        while True:
            time.sleep(args.sleep)
            res = subprocess.run(cmd)
            if res.returncode != SUSP_CODE:
                sys.exit(res.returncode)

    prog = Path(sys.argv[0]).resolve()

    # Work out plugin and base dirs for this installation
    for bdir in (f'/usr/share/{prog.name}', f'/usr/local/share/{prog.name}'):
        plugin_dir = Path(bdir) / 'plugins'
        if plugin_dir.exists():
            base_dir = plugin_dir.parent
            break
    else:
        plugin_dir = None
        base_dir = None

    # Determine config file path
    cname = prog.name + '.conf'
    cfile = Path(args.config).expanduser() if args.config else \
            Path(f'/etc/{cname}')

    if not cfile.exists():
        print(f'Configuration file {cfile} does not exist.', file=sys.stderr)
        if base_dir and not args.config:
            print(f'Copy {base_dir}/{cname} to /etc and edit appropriately.',
                  file=sys.stderr)
        sys.exit()

    from ruamel.yaml import YAML
    conf = YAML(typ='safe').load(cfile)

    plugins = conf.get('plugins')
    if not plugins:
        sys.exit('No plugins configured')

    # Work out plugin dir
    plugin_dir = args.plugin_dir or conf.get('plugin_dir', plugin_dir)

    # Iterate to create each configured plugins
    for index, plugin in enumerate(plugins, 1):
        Plugin(index, prog, plugin, plugin_dir)
def load_config() -> None:
    """Read and load the instance configuration."""
    # Read args
    parser = argparse.ArgumentParser(prog=config.PACKAGE_NAME,
                                     description="IRC URL title posting bot")
    parser.add_argument(
        "--config-path",
        required=True,
        help="Configuration file path, e.g. /some/dir/config.yaml")
    instance_config_path = Path(parser.parse_args().config_path)

    # Read user config
    log.debug("Reading instance configuration file %s", instance_config_path)
    instance_config = YAML().load(instance_config_path)
    instance_config = json.loads(json.dumps(
        instance_config))  # Recursively use a dict as the data structure.

    # Log user config
    logged_instance_config = instance_config.copy()
    if "sites" in logged_instance_config:
        del logged_instance_config["sites"]
    log.info(
        "Read user configuration file %s having excerpted configuration: %s",
        instance_config_path, logged_instance_config)
    for site, site_config in instance_config.get("sites", {}).items():
        log.info("User configuration for site %s is: %s", site, site_config)

    # Set alerts channel
    if "alerts_channel" not in instance_config:
        instance_config[
            "alerts_channel"] = config.ALERTS_CHANNEL_FORMAT_DEFAULT
    instance_config["alerts_channel"] = instance_config[
        "alerts_channel"].format(nick=instance_config["nick"])
    if instance_config["alerts_channel"] not in instance_config["channels"]:
        instance_config["channels"].append(instance_config["alerts_channel"])

    # Process user config
    instance_config["nick:casefold"] = instance_config["nick"].casefold()
    instance_config["channels:casefold"] = [
        channel.casefold() for channel in instance_config["channels"]
    ]
    instance_config["ignores:casefold"] = [
        ignore.casefold() for ignore in instance_config.get("ignores", [])
    ]

    # Process blacklist
    blacklists = instance_config["blacklist"] = instance_config.get(
        "blacklist", {})
    blacklists["title"] = set(blacklists.get("title", set()))
    blacklists["title"] = {entry.casefold() for entry in blacklists["title"]}
    blacklists["url"] = set(blacklists.get("url", set()))
    blacklists["url"] = {entry.casefold() for entry in blacklists["url"]}

    config.INSTANCE = instance_config
Exemple #4
0
def load_config() -> Dict[str, Any]:
    # Include custom channels
    custom_channel_path = utils.get_custom_modules_path()
    if custom_channel_path not in sys.path:
        sys.path.insert(0, custom_channel_path)

    conf_path = utils.get_config_path()
    if not os.path.exists(conf_path):
        raise FileNotFoundError("Config File does not exist. (%s)" % conf_path)
    with open(conf_path) as f:
        data = YAML().load(f)

        # Verify configuration

        # - Master channel
        if not isinstance(data.get("master_channel", None), str):
            raise ValueError("Master Channel path must be a string.")
        channel = utils.locate_module(data['master_channel'], 'master')
        if not channel:
            raise ValueError("\"%s\" is not found." % data['master_channel'])
        if not issubclass(channel, EFBChannel):
            raise ValueError("\"%s\" is not a channel." %
                             data['master_channel'])
        if not channel.channel_type == ChannelType.Master:
            raise ValueError("\"%s\" is not a master channel." %
                             data['master_channel'])

        # - Slave channels
        if not isinstance(data.get("slave_channels", None), list):
            raise ValueError("Slave Channel paths must be a list.")
        for i in data['slave_channels']:
            channel = utils.locate_module(i, 'slave')
            if not channel:
                raise ValueError("\"%s\" is not found." % i)
            if not issubclass(channel, EFBChannel):
                raise ValueError("\"%s\" is not a channel." % i)
            if not channel.channel_type == ChannelType.Slave:
                raise ValueError("\"%s\" is not a slave channel." % i)

        # - Middlewares
        if data.get("middlewares", None) is not None:
            if not isinstance(data.get("middlewares"), list):
                raise ValueError("Middleware paths must be a list")
            for i in data['middlewares']:
                middleware = utils.locate_module(i, 'middleware')
                if not middleware:
                    raise ValueError("\"%s\" is not found." % i)
                if not issubclass(middleware, EFBMiddleware):
                    raise ValueError("\"%s\" is not a middleware." % i)
        else:
            data['middlewares'] = list()
    return data
Exemple #5
0
class Configuration(object):
    def __init__(self, config_file):
        with open(config_file) as f:
            self.data = YAML(typ='safe').load(f)
        self.sender = self.data[SENDER]

    def get_delay(self):
        return self.data.get(DELAY) or DEFAULT_DELAY

    def get_prefix(self):
        return self.data[PREFIX]

    def get_relay_type(self):
        return self.data[RECEIVER]

    def get_mqtt(self):
        return self.data[MQTT]

    def get_carbon(self):
        return self.data[CARBON]

    def get_amqp(self):
        return self.data[AMQP]

    def get_fping(self):
        return self.data[FPING]
Exemple #6
0
    def python_version(self):
        """
        Detect the Python version for a given environment.yml

        Will return 'x.y' if found, or Falsy '' if not.
        """
        environment_yml = self.binder_path('environment.yml')
        if not os.path.exists(environment_yml):
            return ''

        if not hasattr(self, '_python_version'):
            py_version = None
            with open(environment_yml) as f:
                env = YAML().load(f)
                for dep in env.get('dependencies', []):
                    if not isinstance(dep, str):
                        continue
                    match = PYTHON_REGEX.match(dep)
                    if not match:
                        continue
                    py_version = match.group(1)
                    break

            # extract major.minor
            if py_version:
                if len(py_version) == 1:
                    self._python_version = self.major_pythons.get(
                        py_version[0])
                else:
                    # return major.minor
                    self._python_version = '.'.join(py_version.split('.')[:2])
            else:
                self._python_version = ''

        return self._python_version
Exemple #7
0
def get_deployment_output(project, deployment, resource, name):
    manifest = get_manifest(project, deployment)
    layout = YAML().load(manifest.layout)
    for r in layout.get('resources', []):
        if r['name'] != resource:
            continue
        for output in r.get('outputs', []):
            if output['name'] == name:
                return output['finalValue']
Exemple #8
0
 def __init__(self):
     self.use_cache = False
     path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
     path = os.path.join(path, 'igdiscover.conf')
     if os.path.exists(path):
         with open(path) as f:
             config = YAML().load(f)
         if config is None:
             return
         self.use_cache = config.get('use_cache', False)
Exemple #9
0
    def _from_text(cls, filename, is_json=False):
        """deserialize from a json/yaml file"""

        if not is_json:
            if not filename.split('.')[-1].startswith('y'):
                filename += '.yml'
        else:
            if not filename.endswith('.json'):  # pragma: no branch
                filename += '.json'

        with open(filename, 'r') as f:
            data = YAML().load(f)

        excel = _CompiledImporter(filename, data)
        excel_compiler = cls(excel=excel, cycles=data.get('cycles', False))
        excel.compiler = excel_compiler
        if 'cycles' in data:
            del data['cycles']

        def add_line_numbers(cell_addr, line_number):
            formula = excel_compiler.cell_map[cell_addr].formula
            if formula is not None:
                formula.lineno = line_number
                formula.filename = filename

        # populate the cells
        range_todos = []
        for address, python_code in data['cell_map'].items():
            lineno = data['cell_map'].lc.data[address][0] + 1
            address = AddressRange(address)
            if address.is_range:
                range_todos.append((address, lineno))
            else:
                excel_compiler._make_cells(address)
                add_line_numbers(address.address, lineno)

        # populate the ranges and dependant graph
        for address, lineno in range_todos:
            excel_compiler._make_cells(address)
            add_line_numbers(address.address, lineno)

        excel_compiler._process_gen_graph()
        del data['cell_map']

        # process the rest of the data from the file
        excel_compiler._excel_file_md5_digest = data['excel_hash']
        del data['excel_hash']
        excel_compiler.extra_data = data

        # remove "excel" file references for GC
        excel_compiler.excel = None
        return excel_compiler
    def load_config(self):
        """
        Load configuration from path specified by the framework.

        Configuration file is in YAML format.
        """
        config_path = efb_utils.get_config_path(self.channel_id)
        if not config_path.exists():
            raise FileNotFoundError(
                self._("Config File does not exist. ({path})").format(
                    path=config_path))
        with config_path.open() as f:
            data = YAML().load(f)

            # Verify configuration
            if not isinstance(data.get('token', None), str):
                raise ValueError(self._('Telegram bot token must be a string'))
            if isinstance(data.get('admins', None), int):
                data['admins'] = [data['admins']]
            if isinstance(data.get('admins', None),
                          str) and data['admins'].isdigit():
                data['admins'] = [int(data['admins'])]
            if not isinstance(data.get('admins', None),
                              list) or not data['admins']:
                raise ValueError(
                    self.
                    _("Admins' user IDs must be a list of one number or more."
                      ))
            for i in range(len(data['admins'])):
                if isinstance(data['admins'][i],
                              str) and data['admins'][i].isdigit():
                    data['admins'][i] = int(data['admins'][i])
                if not isinstance(data['admins'][i], int):
                    raise ValueError(
                        self.
                        _('Admin ID is expected to be an int, but {data} is found.'
                          ).format(data=data['admins'][i]))

            self.config = data.copy()
Exemple #11
0
def main():
    raw_req = YAML(typ="safe").load(Path("templates/echo.yaml"))

    req = {"_" + k: v for k, v in raw_req.items() if k != "parameters"}
    req.update(raw_req.get("parameters", {}))

    futures = []
    sys_client = SystemClient(**load_config(), blocking=False)

    start = datetime.now()

    for _ in range(250_000):
        futures.append(sys_client.send_bg_request(**req))
Exemple #12
0
    def acronyms_from(path):
        """Load definitions from a file. Return a dictionary."""
        try:
            with open(path, "r") as stream:
                content = YAML(typ="safe").load(stream)
        except FileNotFoundError as err:
            # File has disappeared or was incorrectly put in exclude_flies or prefer_files.
            logging.error("%s: %s", err.strerror, err.filename)
            sys.exit(1)

        acronyms = content.get("acronyms", {})
        for _, val in acronyms.items():
            val["source"] = path  # Show where acronym came from.
        return acronyms
    def __init__(self, spec_path):
        """
    Args:
      spec_path: a YAML panoptic parts dataset specification
    """
        with open(spec_path) as fd:
            spec = YAML().load(fd)

        self._spec_version = spec['version']
        self._dataset_name = spec['name']
        # describes the semantic information layer
        self._scene_class2part_classes = spec['scene_class2part_classes']
        # describes the instance information layer
        self._scene_classes_with_instances = spec[
            'scene_classes_with_instances']
        self._scene_class2color = spec.get('scene_class2color')
        if self._scene_class2color is None:
            raise ValueError(
                '"scene_class2color" in dataset_spec must be provided for now. '
                'In the future random color assignment will be implemented.')
        self._countable_pids_groupings = spec.get('countable_pids_groupings')

        self._extract_attributes()
Exemple #14
0
    def python_version(self):
        """Detect the Python version for a given `environment.yml`

        Will return 'x.y' if version is found (e.g '3.6'),
        or a Falsy empty string '' if not found.

        """
        environment_yml = self.binder_path('environment.yml')
        if not os.path.exists(environment_yml):
            return ''

        if not hasattr(self, '_python_version'):
            py_version = None
            with open(environment_yml) as f:
                env = YAML().load(f)
                # check if the env file is empty, if so instantiate an empty dictionary.
                if env is None:
                    env = {}
                # check if the env file provided a dick-like thing not a list or other data structure.
                if not isinstance(env, Mapping):
                    raise TypeError(
                        "environment.yml should contain a dictionary. Got %r" %
                        type(env))
                for dep in env.get('dependencies', []):
                    if not isinstance(dep, str):
                        continue
                    match = PYTHON_REGEX.match(dep)
                    if not match:
                        continue
                    py_version = match.group(1)
                    break

            # extract major.minor
            if py_version:
                if len(py_version) == 1:
                    self._python_version = self.major_pythons.get(
                        py_version[0])
                else:
                    # return major.minor
                    self._python_version = '.'.join(py_version.split('.')[:2])
            else:
                self._python_version = ''

        return self._python_version
Exemple #15
0
def main():
    """Main application entry point."""
    if len(sys.argv) != 3:
        print("Usage: yc-calc <input-file> <output-file>")
        sys.exit(1)

    infile = sys.argv[1]
    outfile = sys.argv[2]

    mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
    sequence_tag = yaml.resolver.BaseResolver.DEFAULT_SEQUENCE_TAG

    yaml.add_constructor(mapping_tag, dict_constructor,
                         Loader=RoundTripConstructor)
    yaml.add_constructor(sequence_tag, list_constructor,
                         Loader=RoundTripConstructor)

    yaml.add_representer(CalcDict, dict_representer,
                         Dumper=RoundTripRepresenter)
    yaml.add_representer(CalcList, list_representer,
                         Dumper=RoundTripRepresenter)

    try:
        with open(infile) as infp:
            top = YAML().load(infp)

            if not isinstance(top, CalcDict):
                type_name = type(top).__name__
                err("Top level element should be dict not {0}".format(type_name))

            defs = {}
            defs_str = top.get("DEFS", "")

            try:
                exec(defs_str, defs)
            except Exception as exc:
                err("Error executing DEFS: {0}".format(exc))

            CalcContainer.set_top(defs, top)
            write(top, outfile)
    except IOError as exc:
        err("Error opening file: {0}".format(exc))
    except yaml.YAMLError as exc:
        err("Error parsing input: {0}".format(exc))
Exemple #16
0
 def load(cls, path: Path, basename: str) -> "Schema":
     data = YAML().load(path / f"{basename}.pvi.yaml")
     local = data.get("local", None)
     if local:
         local_path = path / local.replace("$(basename)", basename)
         overrides = YAML().load(local_path)
         for k, v in overrides.items():
             if k == "components":
                 # Merge
                 by_name = {}
                 for existing in walk_dicts(data["components"]):
                     by_name[existing["name"]] = existing
                 for component in v:
                     by_name[component["name"]].update(component)
             else:
                 # Replace
                 data[k] = v
     schema = cls(**data)
     return schema
Exemple #17
0
def run_user():
    'Run as user to read user config and save environment'
    from ruamel.yaml import YAML
    # Search for configuration file. Use file given as command line
    # argument, else look for file in search dir order.
    if args.conffile:
        conffile = Path(args.conffile)
        if not conffile.exists():
            return f'Conf file "{conffile}" does not exist.'
    else:
        for confdir in CONFDIRS:
            conffile = Path(confdir, CONFNAME)
            if conffile.exists():
                break
        else:
            dirs = ' or '.join(CONFDIRS)
            return f'No file {CONFNAME} in {dirs}.'

    conf = YAML(typ='safe').load(conffile)

    # Clonedir may either be a single dir, or a list of dirs
    clonedir = conf.get('clonedir', [])
    if isinstance(clonedir, str):
        clonedirs = [Path(clonedir).expanduser()]
    else:
        clonedirs = [Path(c).expanduser() for c in clonedir]

    # Can immediately filter out dirs which don't exist
    clonedirs = [c for c in clonedirs if c.exists()]

    # Save ordinary user environment to reference for running as root
    fp = tempfile.NamedTemporaryFile()
    pickle.dump(clonedirs, fp)
    fp.flush()

    # Pass ssh auth so that root uses sudo user's ssh cached key and
    # also pass user environment
    sock = os.getenv('SSH_AUTH_SOCK')
    cmd = ['/usr/bin/sudo', f'SSH_AUTH_SOCK={sock}'] + sys.argv + \
            [f'--env={fp.name}']
    return subprocess.run(cmd).returncode
Exemple #18
0
def get_default_event_log_dir(**kwargs) -> str:
    if "access_key" not in kwargs or "secret_key" not in kwargs:
        access_key, secret_key = get_aws_credentials(**kwargs)
    else:
        access_key, secret_key = kwargs["access_key"], kwargs["secret_key"]
    if access_key is None:
        log.warning(
            "Since no AWS credentials were provided, spark event logging "
            "will be disabled"
        )
        return None

    try:
        with open(DEFAULT_SPARK_RUN_CONFIG) as fp:
            spark_run_conf = YAML().load(fp.read())
    except Exception as e:
        log.warning(f"Failed to load {DEFAULT_SPARK_RUN_CONFIG}: {e}")
        log.warning("Returning empty default configuration")
        spark_run_conf = {}

    try:
        account_id = (
            boto3.client(
                "sts", aws_access_key_id=access_key, aws_secret_access_key=secret_key
            )
            .get_caller_identity()
            .get("Account")
        )
    except Exception as e:
        log.warning("Failed to identify account ID, error: {}".format(str(e)))
        return None

    for conf in spark_run_conf.get("environments", {}).values():
        if account_id == conf["account_id"]:
            default_event_log_dir = conf["default_event_log_dir"]
            print(f"default event logging at: {default_event_log_dir}")
            return default_event_log_dir
    return None
Exemple #19
0
def build_page(path_ntbk,
               path_html_output,
               path_media_output=None,
               execute=False,
               path_template=None,
               verbose=False,
               kernel_name=None):
    """Build the HTML for a single notebook page.

    Inputs
    ======

    path_ntbk : string
        The path to a notebook or text file we want to convert. If a text
        file, then Jupytext will be used to convert into a notebook. This
        will also cause the notebook to be *run* (e.g. execute=True).
    path_html_output : string
        The path to the folder where the HTML will be output.
    path_media_output : string | None
        If a string, the path to where images should be extracted. If None,
        images will be embedded in the HTML.
    execute : bool
        Whether to execute the notebook before converting
    path_template : string
        A path to the template used in conversion.
    kernel_name : string
        The name of the kernel to use if we execute notebooks.
    """

    ########################################
    # Load in the notebook
    notebook_name, suff = op.splitext(op.basename(path_ntbk))

    is_raw_markdown_file = False
    if suff in ['.md', '.markdown']:
        # If it's a markdown file, we need to check whether it's a jupytext format
        with open(path_ntbk, 'r') as ff:
            lines = ff.readlines()
            yaml_lines, content = _split_yaml(lines)
            yaml = YAML().load(''.join(yaml_lines))

        if (yaml is not None) and yaml.get('jupyter', {}).get('jupytext'):
            # If we have jupytext metadata, then use it to read the markdown file
            ntbk = jpt.reads(''.join(lines), 'md')
        else:
            # Otherwise, create an empty notebook and add all of the file contents as a markdown file
            is_raw_markdown_file = True
            ntbk = nbf.v4.new_notebook()
            ntbk['cells'].append(
                nbf.v4.new_markdown_cell(source=''.join(content)))
    else:
        # If it's not markdown, we assume it's either ipynb or a jupytext format
        ntbk = jpt.read(path_ntbk)

    if _is_jupytext_file(ntbk):
        execute = True

    ########################################
    # Notebook cleaning

    # Minor edits to cells
    _clean_markdown_cells(ntbk)

    #############################################
    # Conversion to HTML
    # create a configuration object that changes the preprocessors
    c = Config()

    c.FilesWriter.build_directory = path_html_output

    # Remove cell elements using tags
    c.TagRemovePreprocessor.remove_cell_tags = ("remove_cell", "removecell")
    c.TagRemovePreprocessor.remove_all_outputs_tags = ('remove_output', )
    c.TagRemovePreprocessor.remove_input_tags = ('remove_input', )

    # Remove any cells that are *only* whitespace
    c.RegexRemovePreprocessor.patterns = ["\\s*\\Z"]

    c.HTMLExporter.preprocessors = [
        'nbconvert.preprocessors.TagRemovePreprocessor',
        'nbconvert.preprocessors.RegexRemovePreprocessor',
        # So the images are written to disk
        'nbconvert.preprocessors.ExtractOutputPreprocessor',
        # Wrap cells in Jekyll raw tags
        _RawCellPreprocessor,
    ]

    # The text used as the text for anchor links.
    # TEMPORATILY Set to empty since we'll use anchor.js for the links
    # Once https://github.com/jupyter/nbconvert/pull/1101 is fixed
    # set to '<i class="fas fa-link"> </i>'
    c.HTMLExporter.anchor_link_text = ' '

    # Excluding input/output prompts
    c.HTMLExporter.exclude_input_prompt = True
    c.HTMLExporter.exclude_output_prompt = True

    # Excution of the notebook if we wish
    if execute is True:
        ntbk = run_ntbk(ntbk, op.dirname(path_ntbk))

    # Define the path to images and then the relative path to where they'll originally be placed
    if isinstance(path_media_output, str):
        path_media_output_rel = op.relpath(path_media_output, path_html_output)

    # Generate HTML from our notebook using the template
    output_resources = {
        'output_files_dir': path_media_output_rel,
        'unique_key': notebook_name
    }
    exp = HTMLExporter(template_file=path_template, config=c)
    html, resources = exp.from_notebook_node(ntbk, resources=output_resources)
    html = '<main class="jupyter-page">\n' + html + '\n</main>\n'

    # Now write the markdown and resources
    writer = FilesWriter(config=c)
    writer.write(html, resources, notebook_name=notebook_name)

    # Add the frontmatter to the yaml file in case it's wanted
    if is_raw_markdown_file and len(yaml_lines) > 0:
        with open(op.join(path_html_output, notebook_name + '.html'),
                  'r') as ff:
            md_lines = ff.readlines()
        md_lines.insert(0, '---\n')
        for iline in yaml_lines[::-1]:
            md_lines.insert(0, iline + '\n')
        md_lines.insert(0, '---\n')
        with open(op.join(path_html_output, notebook_name + '.html'),
                  'w') as ff:
            ff.writelines(md_lines)

    if verbose:
        print("Finished writing notebook to {}".format(path_html_output))
Exemple #20
0
def main(arguments: argparse.Namespace) -> None:
    with open(arguments.config, "r", encoding=CONFIG_ENCODING) as file:
        config = YAML().load(file)

    config_global_download_rate = config.get("download")
    config_global_upload_rate = config.get("upload")
    if arguments.speed_test:
        logger.info("Running speed test...")

        try:
            result = test_speed()
        except MissingDependencyError as error:
            logger.error("Missing dependency: {}", error)
            result = None
        except DependencyOutputError as error:
            logger.error("Dependency output error: {}", error)
            result = None

        if result:
            logger.info(
                "Determined download speed: {}bps, upload speed: {}bps",
                *result)
            config_global_download_rate, config_global_upload_rate = result
        else:
            logger.error(
                "Failed to automatically determine download and upload speed, falling "
                "back to configuration values")

    if config_global_download_rate is None:
        logger.info(
            "No global download rate specified, download traffic prioritization won't "
            "work")
        global_download_rate = MAX_RATE
    else:
        global_download_rate = config_global_download_rate

    if config_global_upload_rate is None:
        logger.info(
            "No global upload rate specified, upload traffic prioritization won't work"
        )
        global_upload_rate = MAX_RATE
    else:
        global_upload_rate = config_global_upload_rate

    # Determine the priority we want the global default classes to have: this is n+1
    # where n is the lowest defined (=highest integer) priority for any processes in the
    # configuration file. Processes that do not explicitly specify a priority will use
    # this default priority and therefore have the same priority as the global default
    # classes
    lowest_priority = -1
    for name, process in (config.get("processes", {}) or {}).items():
        lowest_priority = max(process.get("upload-priority", -1),
                              lowest_priority)
        lowest_priority = max(process.get("download-priority", -1),
                              lowest_priority)
    lowest_priority += 1

    global_download_priority = config.get("download-priority", lowest_priority)
    global_upload_priority = config.get("upload-priority", lowest_priority)

    config_global_download_minimum_rate = config.get("download-minimum")
    global_download_minimum_rate = (GLOBAL_MINIMUM_DOWNLOAD_RATE if
                                    config_global_download_minimum_rate is None
                                    else config_global_download_minimum_rate)
    if config_global_download_rate is not None:
        logger.info(
            "Setting up global class with max download rate: {} (minimum: {}) and "
            "priority: {}",
            global_download_rate,
            global_download_minimum_rate,
            global_download_priority,
        )
    else:
        logger.info(
            "Setting up global class with unlimited download rate (minimum: {}) and "
            "priority: {}",
            global_download_minimum_rate,
            global_download_priority,
        )

    config_global_upload_minimum_rate = config.get("upload-minimum")
    global_upload_minimum_rate = (GLOBAL_MINIMUM_UPLOAD_RATE
                                  if config_global_upload_minimum_rate is None
                                  else config_global_upload_minimum_rate)
    if config_global_upload_rate is not None:
        logger.info(
            "Setting up global class with max upload rate: {} (minimum: {}) and "
            "priority: {}",
            global_upload_rate,
            global_upload_minimum_rate,
            global_upload_priority,
        )
    else:
        logger.info(
            "Setting up global class with unlimited upload rate (minimum: {}) and "
            "priority: {}",
            global_upload_minimum_rate,
            global_upload_priority,
        )

    ingress_qdisc, egress_qdisc = tc_setup(
        arguments.device,
        global_download_rate,
        global_download_minimum_rate,
        global_upload_rate,
        global_upload_minimum_rate,
        global_download_priority,
        global_upload_priority,
    )
    atexit.register(_clean_up, ingress_qdisc.device, egress_qdisc.device)

    process_filter_predicates = []
    class_ids: Dict[_TrafficType, Dict[str, int]] = {
        _TrafficType.Ingress: {},
        _TrafficType.Egress: {},
    }
    for name, process in (config.get("processes", {}) or {}).items():
        # Prepare process filter predicates to match network connections
        conditions = [
            list(match.items())[0] for match in process.get("match", [])
        ]
        if not conditions:
            logger.warning(
                "No conditions for: {!r} specified, it will never be matched",
                name)
            continue

        predicate = ProcessFilterPredicate(name, conditions,
                                           process.get("recursive", False))
        process_filter_predicates.append(predicate)

        # Set up classes for download/upload limiting
        config_download_rate = process.get("download")
        config_download_minimum_rate = process.get("download-minimum")
        config_download_priority = process.get("download-priority")
        download_rate = (global_download_rate if config_download_rate is None
                         else config_download_rate)
        download_minimum_rate = (MINIMUM_DOWNLOAD_RATE
                                 if config_download_minimum_rate is None else
                                 config_download_minimum_rate)
        download_priority = (lowest_priority
                             if config_download_priority is None else
                             config_download_priority)

        config_upload_rate = process.get("upload")
        config_upload_minimum_rate = process.get("upload-minimum")
        config_upload_priority = process.get("upload-priority")
        upload_rate = (global_upload_rate
                       if config_upload_rate is None else config_upload_rate)
        upload_minimum_rate = (MINIMUM_UPLOAD_RATE
                               if config_upload_minimum_rate is None else
                               config_upload_minimum_rate)
        upload_priority = (lowest_priority if config_upload_priority is None
                           else config_upload_priority)

        if config_download_rate is not None:
            logger.info(
                "Setting up class for: {!r} with max download rate: {} (minimum: {}) "
                "and priority: {}",
                name,
                download_rate,
                download_minimum_rate,
                download_priority,
            )
            ingress_class_id = tc_add_htb_class(
                ingress_qdisc,
                download_rate,
                download_minimum_rate,
                download_priority,
            )
            class_ids[_TrafficType.Ingress][name] = ingress_class_id
        elif config_download_priority is not None:
            logger.info(
                "Setting up class for: {!r} with unlimited download rate (minimum: {}) "
                "and priority: {}",
                name,
                download_minimum_rate,
                download_priority,
            )
            ingress_class_id = tc_add_htb_class(
                ingress_qdisc,
                download_rate,
                download_minimum_rate,
                download_priority,
            )
            class_ids[_TrafficType.Ingress][name] = ingress_class_id

        if config_upload_rate is not None:
            logger.info(
                "Setting up class for: {!r} with max upload rate: {} (minimum: {}) and "
                "priority: {}",
                name,
                upload_rate,
                upload_minimum_rate,
                upload_priority,
            )
            egress_class_id = tc_add_htb_class(
                egress_qdisc,
                upload_rate,
                upload_minimum_rate,
                upload_priority,
            )
            class_ids[_TrafficType.Egress][name] = egress_class_id
        elif config_upload_priority is not None:
            logger.info(
                "Setting up class for: {!r} with unlimited upload rate (minimum: {}) "
                "and priority: {}",
                name,
                upload_minimum_rate,
                upload_priority,
            )
            egress_class_id = tc_add_htb_class(
                egress_qdisc,
                upload_rate,
                upload_minimum_rate,
                upload_priority,
            )
            class_ids[_TrafficType.Egress][name] = egress_class_id

    port_to_filter_id: Dict[_TrafficType, Dict[int, str]] = {
        _TrafficType.Ingress: {},
        _TrafficType.Egress: {},
    }

    def add_ingress_filter(port: int, class_id: int) -> None:
        filter_id = tc_add_u32_filter(
            ingress_qdisc,
            f"match ip dport {port} 0xffff",
            class_id,
        )
        port_to_filter_id[_TrafficType.Ingress][port] = filter_id

    def add_egress_filter(port: int, class_id: int) -> None:
        filter_id = tc_add_u32_filter(
            egress_qdisc,
            f"match ip sport {port} 0xffff",
            class_id,
        )
        port_to_filter_id[_TrafficType.Egress][port] = filter_id

    def remove_filters(port: int) -> None:
        ingress_filter_id = port_to_filter_id[_TrafficType.Ingress].get(port)
        if ingress_filter_id:
            tc_remove_u32_filter(ingress_qdisc, ingress_filter_id)
            del port_to_filter_id[_TrafficType.Ingress][port]

        egress_filter_id = port_to_filter_id[_TrafficType.Egress].get(port)
        if egress_filter_id:
            tc_remove_u32_filter(egress_qdisc, egress_filter_id)
            del port_to_filter_id[_TrafficType.Egress][port]

    filtered_ports: DefaultDict[str, Set[int]] = collections.defaultdict(set)
    while True:
        filtered_connections = filter_net_connections(
            process_filter_predicates)
        for name, connections in filtered_connections.items():
            ports = set(connection.laddr.port for connection in connections)
            active_ingress_class_id = class_ids[_TrafficType.Ingress].get(name)
            active_egress_class_id = class_ids[_TrafficType.Egress].get(name)

            # Add new port filters
            new_ports = sorted(ports.difference(filtered_ports[name]))
            if new_ports:
                logger.info(
                    "Shaping traffic for {!r} on local ports {}",
                    name,
                    ", ".join(map(str, new_ports)),
                )
                for port in new_ports:
                    if active_ingress_class_id:
                        add_ingress_filter(port, active_ingress_class_id)
                    if active_egress_class_id:
                        add_egress_filter(port, active_egress_class_id)

            # Remove old port filters
            freed_ports = sorted(filtered_ports[name].difference(ports))
            if freed_ports:
                logger.info(
                    "Removing filters for {!r} on local ports {}",
                    name,
                    ", ".join(map(str, freed_ports)),
                )
                for port in freed_ports:
                    remove_filters(port)

            filtered_ports[name] = ports

        # Remove freed ports for unmatched processes (process died or predicate
        # conditions stopped matching)
        for name in set(filtered_ports).difference(filtered_connections):
            freed_ports = sorted(filtered_ports[name])
            if freed_ports:
                logger.info(
                    "Removing filters for {!r} on local ports {}",
                    name,
                    ", ".join(map(str, freed_ports)),
                )
                for port in freed_ports:
                    remove_filters(port)
            del filtered_ports[name]

        time.sleep(arguments.delay)
Exemple #21
0
def main(arguments):
    logger.stop(0)
    logger.add(sys.stderr, level=arguments.logging_level)
    with open(arguments.config, 'r', encoding=CONFIG_ENCODING) as file:
        config = YAML().load(file)

    # TODO: Parse download rate
    global_download_rate = config.get('download')
    global_upload_rate = config.get('upload')
    if global_download_rate:
        logger.info('Setting up global download limiter with max rate {}',
                    global_download_rate)
    if global_upload_rate:
        logger.info('Setting up global upload limiter with max rate {}',
                    global_upload_rate)

    ingress, egress = tc_setup(arguments.device, global_download_rate,
                               global_upload_rate)
    ingress_interface, ingress_qdisc_id, ingress_root_class_id = ingress
    egress_interface, egress_qdisc_id, egress_root_class_id = egress

    atexit.register(_clean_up, ingress_interface, egress_interface)

    process_filter_predicates = []
    class_ids = {'ingress': {}, 'egress': {}}
    for name, process in (config.get('processes', {}) or {}).items():
        # Prepare process filter predicates to match network connections
        conditions = [
            list(match.items())[0] for match in process.get('match', [])
        ]
        if not conditions:
            logger.warning(
                'No conditions for {!r} specified, it will never be matched',
                name)

        predicate = ProcessFilterPredicate(name, conditions)
        process_filter_predicates.append(predicate)

        # Set up classes for download/upload limiting
        download_rate = process.get('download')
        upload_rate = process.get('upload')
        if download_rate:
            logger.info(
                'Setting up download limiter for {!r} with max rate {}', name,
                download_rate)
            egress_class_id = tc_add_htb_class(ingress_interface,
                                               ingress_qdisc_id,
                                               ingress_root_class_id,
                                               download_rate)
            class_ids['ingress'][name] = egress_class_id
        if upload_rate:
            logger.info('Setting up upload limiter for {!r} with max rate {}',
                        name, upload_rate)
            ingress_class_id = tc_add_htb_class(egress_interface,
                                                egress_qdisc_id,
                                                egress_root_class_id,
                                                upload_rate)
            class_ids['egress'][name] = ingress_class_id

    port_to_filter_id = {'ingress': {}, 'egress': {}}

    def add_ingress_filter(port, class_id):
        filter_id = tc_add_u32_filter(ingress_interface,
                                      f'match ip dport {port} 0xffff',
                                      ingress_qdisc_id, class_id)
        port_to_filter_id['ingress'][port] = filter_id

    def add_egress_filter(port, class_id):
        filter_id = tc_add_u32_filter(egress_interface,
                                      f'match ip sport {port} 0xffff',
                                      egress_qdisc_id, class_id)
        port_to_filter_id['egress'][port] = filter_id

    def remove_filters(port):
        ingress_filter_id = port_to_filter_id['ingress'].get(port)
        if ingress_filter_id:
            tc_remove_u32_filter(ingress_interface, ingress_filter_id,
                                 ingress_qdisc_id)
            del port_to_filter_id['ingress'][port]

        egress_filter_id = port_to_filter_id['egress'].get(port)
        if egress_filter_id:
            tc_remove_u32_filter(egress_interface, egress_filter_id,
                                 egress_qdisc_id)
            del port_to_filter_id['egress'][port]

    filtered_ports = collections.defaultdict(set)
    while True:
        filtered_connections = filter_net_connections(
            process_filter_predicates)
        for name, connections in filtered_connections.items():
            ports = set(connection.laddr.port for connection in connections)
            ingress_class_id = class_ids['ingress'].get(name)
            egress_class_id = class_ids['egress'].get(name)

            # Add new port filters
            new_ports = sorted(ports.difference(filtered_ports[name]))
            if new_ports:
                logger.info('Filtering traffic for {!r} on local ports {}',
                            name, ', '.join(map(str, new_ports)))
                for port in new_ports:
                    if ingress_class_id:
                        add_ingress_filter(port, ingress_class_id)
                    if egress_class_id:
                        add_egress_filter(port, egress_class_id)

            # Remove old port filters
            freed_ports = sorted(filtered_ports[name].difference(ports))
            if freed_ports:
                logger.info('Removing filters for {!r} on local ports {}',
                            name, ', '.join(map(str, freed_ports)))
                for port in freed_ports:
                    remove_filters(port)

            filtered_ports[name] = ports

        # Remove freed ports for unmatched processes (process died or predicate conditions stopped matching)
        for name in set(filtered_ports).difference(filtered_connections):
            freed_ports = sorted(filtered_ports[name])
            if freed_ports:
                logger.info('Removing filters for {!r} on local ports {}',
                            name, ', '.join(map(str, freed_ports)))
                for port in freed_ports:
                    remove_filters(port)
            del filtered_ports[name]

        time.sleep(arguments.delay)
Exemple #22
0
def main(arguments: argparse.Namespace) -> None:
    with open(arguments.config, "r", encoding=CONFIG_ENCODING) as file:
        config = YAML().load(file)

    # TODO: Parse download rate
    global_download_rate = config.get("download")
    global_upload_rate = config.get("upload")
    if global_download_rate:
        logger.info("Setting up global download limiter with max rate {}",
                    global_download_rate)
    if global_upload_rate:
        logger.info("Setting up global upload limiter with max rate {}",
                    global_upload_rate)

    ingress, egress = tc_setup(arguments.device, global_download_rate,
                               global_upload_rate)
    ingress_interface, ingress_qdisc_id, ingress_root_class_id = ingress
    egress_interface, egress_qdisc_id, egress_root_class_id = egress

    atexit.register(_clean_up, ingress_interface, egress_interface)

    process_filter_predicates = []
    class_ids: Dict[_TrafficType, Dict[str, int]] = {
        _TrafficType.Ingress: {},
        _TrafficType.Egress: {},
    }
    for name, process in (config.get("processes", {}) or {}).items():
        # Prepare process filter predicates to match network connections
        conditions = [
            list(match.items())[0] for match in process.get("match", [])
        ]
        if not conditions:
            logger.warning(
                "No conditions for {!r} specified, it will never be matched",
                name)

        predicate = ProcessFilterPredicate(name, conditions)
        process_filter_predicates.append(predicate)

        # Set up classes for download/upload limiting
        download_rate = process.get("download")
        upload_rate = process.get("upload")
        if download_rate:
            logger.info(
                "Setting up download limiter for {!r} with max rate {}",
                name,
                download_rate,
            )
            egress_class_id = tc_add_htb_class(
                ingress_interface,
                ingress_qdisc_id,
                ingress_root_class_id,
                download_rate,
            )
            class_ids[_TrafficType.Ingress][name] = egress_class_id
        if upload_rate:
            logger.info("Setting up upload limiter for {!r} with max rate {}",
                        name, upload_rate)
            ingress_class_id = tc_add_htb_class(egress_interface,
                                                egress_qdisc_id,
                                                egress_root_class_id,
                                                upload_rate)
            class_ids[_TrafficType.Egress][name] = ingress_class_id

    port_to_filter_id: Dict[_TrafficType, Dict[int, str]] = {
        _TrafficType.Ingress: {},
        _TrafficType.Egress: {},
    }

    def add_ingress_filter(port: int, class_id: int) -> None:
        filter_id = tc_add_u32_filter(
            ingress_interface,
            f"match ip dport {port} 0xffff",
            ingress_qdisc_id,
            class_id,
        )
        port_to_filter_id[_TrafficType.Ingress][port] = filter_id

    def add_egress_filter(port: int, class_id: int) -> None:
        filter_id = tc_add_u32_filter(egress_interface,
                                      f"match ip sport {port} 0xffff",
                                      egress_qdisc_id, class_id)
        port_to_filter_id[_TrafficType.Egress][port] = filter_id

    def remove_filters(port: int) -> None:
        ingress_filter_id = port_to_filter_id[_TrafficType.Ingress].get(port)
        if ingress_filter_id:
            tc_remove_u32_filter(ingress_interface, ingress_filter_id,
                                 ingress_qdisc_id)
            del port_to_filter_id[_TrafficType.Ingress][port]

        egress_filter_id = port_to_filter_id[_TrafficType.Egress].get(port)
        if egress_filter_id:
            tc_remove_u32_filter(egress_interface, egress_filter_id,
                                 egress_qdisc_id)
            del port_to_filter_id[_TrafficType.Egress][port]

    filtered_ports: DefaultDict[str, Set[int]] = collections.defaultdict(set)
    while True:
        filtered_connections = filter_net_connections(
            process_filter_predicates)
        for name, connections in filtered_connections.items():
            ports = set(connection.laddr.port for connection in connections)
            ingress_class_id = class_ids[_TrafficType.Ingress].get(name)
            egress_class_id = class_ids[_TrafficType.Egress].get(name)

            # Add new port filters
            new_ports = sorted(ports.difference(filtered_ports[name]))
            if new_ports:
                logger.info(
                    "Filtering traffic for {!r} on local ports {}",
                    name,
                    ", ".join(map(str, new_ports)),
                )
                for port in new_ports:
                    if ingress_class_id:
                        add_ingress_filter(port, ingress_class_id)
                    if egress_class_id:
                        add_egress_filter(port, egress_class_id)

            # Remove old port filters
            freed_ports = sorted(filtered_ports[name].difference(ports))
            if freed_ports:
                logger.info(
                    "Removing filters for {!r} on local ports {}",
                    name,
                    ", ".join(map(str, freed_ports)),
                )
                for port in freed_ports:
                    remove_filters(port)

            filtered_ports[name] = ports

        # Remove freed ports for unmatched processes (process died or predicate
        # conditions stopped matching)
        for name in set(filtered_ports).difference(filtered_connections):
            freed_ports = sorted(filtered_ports[name])
            if freed_ports:
                logger.info(
                    "Removing filters for {!r} on local ports {}",
                    name,
                    ", ".join(map(str, freed_ports)),
                )
                for port in freed_ports:
                    remove_filters(port)
            del filtered_ports[name]

        time.sleep(arguments.delay)
Exemple #23
0
def load_instance_config(log_details: bool = True) -> None:  # pylint: disable=too-many-locals
    """Read and load the instance configuration."""
    # Read args
    parser = argparse.ArgumentParser(prog=config.PACKAGE_NAME,
                                     description="IRC RSS feed posting bot")
    parser.add_argument(
        "--config-path",
        required=True,
        help="Configuration file path, e.g. /some/dir/config.yaml")
    instance_config_path = Path(parser.parse_args().config_path)

    # Read instance config
    log.debug("Reading instance configuration file %s", instance_config_path)
    instance_config = YAML().load(instance_config_path)
    instance_config = json.loads(json.dumps(
        instance_config))  # Recursively use a dict as the data structure.
    log.info("Read user configuration file %s", instance_config_path)
    if "taxonomies" in instance_config:
        del instance_config["taxonomies"]

    if instance_config.get("tracemalloc"):
        TraceMalloc().start()

    if not instance_config["feeds"]:
        instance_config["feeds"] = {}

    url_counter = collections.Counter(
        feed_url for channel_cfg in instance_config["feeds"].values()
        for feed_cfg in channel_cfg.values()
        for feed_url in ensure_list(feed_cfg["url"]))

    if log_details:

        # Log instance config
        logged_instance_config = instance_config.copy()
        del logged_instance_config["feeds"]
        log.info(
            "The excerpted configuration for %s channels with %s feeds having %s unique URLs is:\n%s",
            len(instance_config["feeds"]),
            len([
                feed for channel in instance_config["feeds"].values()
                for feed in channel
            ]),
            len(url_counter),
            logged_instance_config,
        )

        # Log channel config
        for channel, channel_config in instance_config["feeds"].items():
            feed_names = sorted(channel_config)
            log.info("%s has %s feeds: %s", channel, len(feed_names),
                     ", ".join(feed_names))
            for feed, feed_config in channel_config.items():
                log.debug("%s has feed %s having config: %s", channel, feed,
                          feed_config)

        # Log unused channel colors
        unclear_colors = {"white", "black", "grey", "silver"}
        clear_colors = config.IRC_COLORS - unclear_colors
        for channel, channel_config in instance_config["feeds"].items():
            if not (used_colors := {
                    fg_color
                    for feed_config in channel_config.values()
                    if (fg_color := feed_config.get("style", {}).get(
                        "name", {}).get("fg")) is not None
            }):
                log.info("%s has no foreground colors in use.", channel)
                continue
            if not (unused_colors := clear_colors - used_colors):  # pylint: disable=superfluous-parens
                log.info("%s has all foreground colors in use.", channel)
                continue
            log.info("%s has %s unused foreground colors: %s", channel,
                     len(unused_colors), ", ".join(sorted(unused_colors)))
    os.makedirs(args.output, exist_ok=True)

# cd to the directory which the specified configuration is in
if (dir_ := os.path.dirname(args.configuration)):
    os.chdir(dir_)

# unpickle context
context_f = os.path.abspath(os.path.expandvars(config["context"]))
_logger.info("Unpickling architecture context: {}".format(config["context"]))
context = Context.unpickle(context_f)

r = FileRenderer(os.path.join(os.path.dirname(__file__), "templates"))
config = deepcopy(config)
config["context"] = context_f

# generate Verilog-to-Bitstream project
_logger.info("Generating Verilog-to-Bitstream project")
generate_v2b_project(context, r, config,
                     (v2b_dir := os.path.join(output, "app")))

# if there are tests, generate test projects
for test_name, test in config.get("tests", {}).items():
    _logger.info(
        "Generating verification Makefile for test: {}".format(test_name))
    generate_verif_makefile(context.summary, r, v2b_dir, config_f,
                            config, test_name,
                            os.path.join(output, "tests", test_name))

r.render()
_logger.info("CAD project generated. Bye")
Exemple #25
0
def create_local_provider(provider_name, yaml_input, args):
    monkeyfs_path = os.path.join(os.getcwd(), f"ansible/monkeyfs")
    local_monkeyfs_path = monkeyfs_path
    monkeyfs_path = args.monkeyfs_path or "~/monkeyfs"
    monkeyfs_scratch = args.monkeyfs_scratch or "~/monkey-scratch"
    monkeyfs_public_ip = args.monkeyfs_public_ip or "localhost"
    monkeyfs_public_port = args.monkeyfs_public_port or "22"
    local_instances_file = args.local_instances_file or "local.yml"
    if not args.noinput:
        if not args.monkeyfs_path:
            monkeyfs_path = input(
                f"Set remote filesystem mount path ({monkeyfs_path}): "
            ) or monkeyfs_path
        print(f"Monkeyfs mount path: {monkeyfs_path}")
        if not args.monkeyfs_scratch:
            monkeyfs_scratch = input(
                f"Set remote scratch ({monkeyfs_scratch}): "
            ) or monkeyfs_scratch
        print(f"Monkeyfs scratch path: {monkeyfs_scratch}")
        ip_found = scan_for_local_ip() or monkeyfs_public_ip
        if not args.monkeyfs_public_ip:
            monkeyfs_public_ip = input(
                f"SSHable IP from remote computers ({ip_found}): ") or ip_found
        print(f"Monkeyfs public ip: {monkeyfs_public_ip}")
        if not args.monkeyfs_public_port:
            monkeyfs_public_port = input(
                f"SSH port ({monkeyfs_public_port}): ") or monkeyfs_public_port
        print(f"Monkeyfs public port: {monkeyfs_public_port}")
        if not args.local_instances_file:
            local_instances_file = input(
                f"Set a file for local instance details ({local_instances_file}): "
            ) or local_instances_file
        print(f"Local Instance information file: {local_instances_file}")
    print("\nWriting local vars file...")
    local_vars = round_trip_load(
        str({
            "name": provider_name,
            "type": "local",
            "monkeyfs_path": monkeyfs_path,
            "monkeyfs_scratch": monkeyfs_scratch,
            "local_monkeyfs_path": local_monkeyfs_path,
            "local_instance_details": local_instances_file,
            "monkeyfs_public_ip": monkeyfs_public_ip,
            "monkeyfs_public_port": monkeyfs_public_port,
            "monkeyfs_user": getpass.getuser(),
        }))
    if args.localhost_only:
        local_vars["localhost_only"] = True
    local_vars.fa.set_block_style()
    local_vars.yaml_set_start_comment(
        "\nLocal Provider: {}".format(provider_name))
    local_vars.yaml_add_eol_comment("Defaults to ~/monkeyfs", "monkeyfs_path")
    local_vars.yaml_add_eol_comment("Defaults to ~/monkey-scratch",
                                    "monkeyfs_scratch")
    local_vars.yaml_add_eol_comment(f"Defaults to local.yml",
                                    "local_instance_details")
    write_vars_to_provider(yaml_input, local_vars)
    write_vars_file(local_vars)
    create_local_monkeyfs()

    instance_details_yaml = YAML()
    existing_hosts = OrderedDict()
    try:
        with open(local_instances_file) as f:
            instance_details_yaml = YAML().load(f)
            existing_hosts = instance_details_yaml.get("hosts", OrderedDict())
    except Exception as e:
        print(f"No Local Instances File found: {local_instances_file}...\n" +
              f"Creating {local_instances_file} ")
        instance_details_yaml = CommentedMap()

    print(f"{len(existing_hosts)} existing hosts found")

    local_provider = MonkeyProviderLocal(local_vars)
    for host in existing_hosts:
        print(f"Checking integrity for host: {host}")
        instance = local_provider.create_local_instance(name=host,
                                                        hostname=host)
        if instance is None:
            print(f"FAILED: to create instance {host}")

    if not args.noinput:
        check_inventory_file_for_more_hosts(
            local_provider=local_provider,
            local_vars=local_vars,
            existing_hosts=existing_hosts,
            instance_details_yaml=instance_details_yaml)
    else:

        if args.local_hosts:
            print("Adding specified local hosts...")
            for host_items in args.local_hosts:
                if not host_items:
                    print("Please provide the local_host name to add")
                    continue
                new_host = host_items[0]
                add_and_test_host(local_provider=local_provider,
                                  local_vars=local_vars,
                                  instance_details_yaml=instance_details_yaml,
                                  existing_hosts=existing_hosts,
                                  new_host=new_host)

        write_instance_details(local_vars["local_instance_details"],
                               instance_details_yaml, existing_hosts)
def init():
    'Program initialisation'
    # Process command line options
    opt = argparse.ArgumentParser(description=__doc__.strip())
    opt.add_argument('-c', '--config', help='alternative configuration file')
    opt.add_argument('-p', '--plugin-dir', help='alternative plugin dir')
    opt.add_argument('-s', '--sleep', type=float, help=argparse.SUPPRESS)
    opt.add_argument('-i', '--inhibit', help=argparse.SUPPRESS)
    args = opt.parse_args()

    # This instance may be a child invocation merely to run and check
    # the plugin while it is inhibiting.
    if args.inhibit:
        cmd = shlex.split(args.inhibit)
        while True:
            time.sleep(args.sleep)
            res = subprocess.run(cmd)
            if res.returncode != SUSP_CODE:
                sys.exit(res.returncode)

    prog = Path(sys.argv[0]).resolve()

    # Work out what sleep inhibitor program to use
    inhibitor_prog = None
    for iprog in SYSTEMD_SLEEP_PROGS:
        try:
            res = subprocess.run(f'{iprog} --version'.split(),
                                 check=True,
                                 universal_newlines=True,
                                 stderr=subprocess.DEVNULL,
                                 stdout=subprocess.PIPE)
        except Exception:
            continue

        vers = res.stdout.split('\n')[0].strip()
        print(f'{prog.name} using {iprog}, {vers}')
        inhibitor_prog = iprog

    if not inhibitor_prog:
        opts = ' or '.join(SYSTEMD_SLEEP_PROGS)
        sys.exit(f'No systemd-inhibitor app installed from one of {opts}.')

    # Work out plugin and base dirs for this installation
    for bdir in (f'/usr/share/{prog.name}', f'/usr/local/share/{prog.name}'):
        plugin_dir = Path(bdir) / 'plugins'
        if plugin_dir.exists():
            base_dir = plugin_dir.parent
            break
    else:
        plugin_dir = None
        base_dir = None

    # Determine config file path
    cname = prog.name + '.conf'
    cfile = Path(args.config).expanduser() if args.config else \
            Path(f'/etc/{cname}')

    if not cfile.exists():
        print(f'{prog.name} configuration file {cfile} does not exist.',
              file=sys.stderr)
        if base_dir and not args.config:
            print(f'Copy {base_dir}/{cname} to /etc and edit appropriately.',
                  file=sys.stderr)
        sys.exit()

    from ruamel.yaml import YAML
    conf = YAML(typ='safe').load(cfile)

    plugins = conf.get('plugins')
    if not plugins:
        sys.exit('No plugins configured')

    # Work out plugin dir
    plugin_dir = args.plugin_dir or conf.get('plugin_dir', plugin_dir)

    # Iterate to create each configured plugins
    for index, plugin in enumerate(plugins, 1):
        Plugin(index, prog, plugin, plugin_dir, inhibitor_prog)
def upload_codebase():
    job_uid = request.args.get('job_uid', None)
    provider = request.args.get('provider', None)
    run_name = request.args.get('run_name', None)
    checksum = request.args.get('codebase_checksum', None)
    already_uploaded = request.args.get("already_uploaded", False)
    codebase_extension = request.args.get('codebase_extension', None)

    logger.info(f"Already uploaded: {already_uploaded is not None}")
    if already_uploaded is not None:
        try:
            if type(already_uploaded) is str:
                if already_uploaded.lower() == "false":
                    already_uploaded = False
                elif already_uploaded.lower() == "true":
                    already_uploaded = True
                else:
                    already_uploaded = False
        except Exception:
            already_uploaded = False

    codebase_yaml = {
        "run_name": run_name,
        "checksum": checksum,
        "provider": provider,
        "codebase_extension": codebase_extension
    }

    logger.info(f"Received upload codebase request: {job_uid}")
    if job_uid is None or provider is None:
        return jsonify({
            "msg": "Did not provide job_uid or provider",
            "success": False
        })
    monkeyfs_path = get_local_filesystem_for_provider(provider)

    job_folder_path = os.path.join(MONKEYFS_LOCAL_PATH, "jobs", job_uid)
    provider_job_folder_path = os.path.join(monkeyfs_path, "jobs", job_uid)
    os.makedirs(os.path.join(job_folder_path, "logs"), exist_ok=True)
    os.makedirs(os.path.join(provider_job_folder_path, "logs"), exist_ok=True)
    if not os.path.exists(os.path.join(job_folder_path, "logs", "run.log")):
        with open(os.path.join(job_folder_path, "logs", "run.log"), "a") as f:
            f.write("Initializing machines...")
    if not os.path.exists(
            os.path.join(provider_job_folder_path, "logs", "run.log")):
        with open(os.path.join(job_folder_path, "logs", "run.log"), "a") as f:
            f.write("Initializing machines...")
    logger.info("Writing local code.yaml")
    try:
        with open(os.path.join(job_folder_path, "code.yaml"), "r") as f:
            code_yaml = YAML().load(f)
    except Exception:
        code_yaml = round_trip_load("---\ncodebases: []")

    code_array = code_yaml.get("codebases", [])
    code_array.append(codebase_yaml)
    code_yaml["codebases"] = code_array
    with open(os.path.join(job_folder_path, "code.yaml"), "w") as f:
        y = YAML()
        code_yaml.fa.set_block_style()
        y.explicit_start = True
        y.default_flow_style = False
        y.dump(code_yaml, f)

    with open(os.path.join(provider_job_folder_path, "code.yaml"), "w") as f:
        y = YAML()
        code_yaml.fa.set_block_style()
        y.explicit_start = True
        y.default_flow_style = False
        y.dump(code_yaml, f)

    def get_codebase_folder_path(base_path):
        path = os.path.abspath(
            os.path.join(base_path, "code", run_name, checksum, ""))
        os.makedirs(path, exist_ok=True)
        return path

    local_codebase_folder_path = get_codebase_folder_path(MONKEYFS_LOCAL_PATH)
    provider_codebase_folder_path = get_codebase_folder_path(monkeyfs_path)

    logger.info(f"Already uploaded: {already_uploaded}")

    if not already_uploaded:
        local_path = os.path.join(local_codebase_folder_path,
                                  "code" + codebase_extension)
        logger.info(f"Local Path: {local_path}")
        destination_path = os.path.join(local_codebase_folder_path,
                                        "code" + codebase_extension)
        FileStorage(request.stream).save(destination_path)

        logger.info(f"Saved file to: {destination_path}")
        with open(os.path.join(local_codebase_folder_path, "code.yaml"),
                  "w") as f:
            y = YAML()
            code_yaml.fa.set_block_style()
            y.explicit_start = True
            y.default_flow_style = False
            y.dump(code_yaml, f)
        logger.info("Syncing codebase folder")
        sync_directories(local_codebase_folder_path,
                         provider_codebase_folder_path)
        logger.info("Syncing codebase folder: DONE")
    else:
        logger.info("Skipping uploading codebase")

    return jsonify({"msg": "Successfully uploaded codebase", "success": True})