Пример #1
0
def parse(filename='default.conf'):
    config = configparser.ConfigParser()
    config._interpolation = configparser.ExtendedInterpolation()
    config.read(filename)
    return config
Пример #2
0
    parser.add_argument(
        "--trapping_clips",
        help=
        "A csv containing urls of all trapping clips. Columns 'trapping_clips'"
        "and 'trapping_ans'")
    parser.add_argument('--num_responses_per_clip',
                        help='Number of response per clip required',
                        default=5,
                        type=int)
    # check input arguments
    args = parser.parse_args()

    assert os.path.exists(args.cfg), f"No config file in {args.cfg}"

    cfg = CP.ConfigParser()
    cfg._interpolation = CP.ExtendedInterpolation()
    cfg.read(args.cfg)

    if args.clips:
        assert os.path.exists(
            args.clips), f"No csv file containing clips in {args.clips}"
    elif cfg.has_option('RatingClips', 'RatingClipsConfigurations'):
        assert len(cfg['RatingClips']['RatingClipsConfigurations']
                   ) > 0, f"No cloud store for clips specified in config"
    else:
        assert True, "Neither clips file not cloud store provided for rating clips"

    if args.gold_clips:
        assert os.path.exists(
            args.gold_clips
        ), f"No csv file containing gold clips in {args.gold_clips}"
Пример #3
0
import boto3
from utils import message_generator
import os, json, uuid
import configparser
settings = configparser.ConfigParser()
settings._interpolation = configparser.ExtendedInterpolation()
settings.read('config.ini')

# Create SQS client
sqs = boto3.client('sqs')

queue_url = settings.get('AccessKeys', 'queue_url')


num_msgs = 100

for i,x in enumerate(range(0,num_msgs)):
    file_name = str(uuid.uuid1())
    data = message_generator.gen_req(file_name, i)
    # Send message to SQS queue
    response = sqs.send_message(
        QueueUrl=queue_url,
        DelaySeconds=10,
        MessageAttributes=data['MessageAttributes'],
        MessageBody=data['MessageBody']
    )

    print(i, response['MessageId'])


Пример #4
0
#!/usr/bin/python

import praw
import urllib.request
import os
import time
import datetime
import re
import configparser

# Parse config file
config = configparser.ConfigParser()
config._DEFAULT_INTERPOLATION = configparser.ExtendedInterpolation()
config.read('config.ini')

# Path to save images to.
PATH = str(config.get('APPLICATION', 'path'))

# Get bot data and subreddit instance.
reddit = praw.Reddit('BOT')
subreddit_str = str(input("Enter subreddit: "))
subreddit = reddit.subreddit(subreddit_str)

print("")
print("Bot running. Parsing posts...")
print("")

# Iterate through posts in hot section of subreddit. Only iterates limit number of times.
for submission in subreddit.hot(limit=60):
    valid_types = (".jpg", ".png")
    url_string = str(submission.url)
Пример #5
0
def configure_suite(config_file):
    """
    Populates a global datastructure with the parameters from the suite's
    configuration file.

    This function performs the following steps:
    1. read in the file ``config_file`` via the ConfigParser module using
       extended interpolation
    2. check that the sections ``variables`` and ``paths`` are disjoint
    3. extract the environment variables given in the ``ENV`` section
    4. save all entries from the ``variables`` section in the global
       datastructure
    5. interpret all entries in the ``paths`` section as relative paths from the
       configuration file, expand them to absolute paths and save them in the
       global datastructure

    For further information concerning the rationale behind this, please consult
    the documentation in ``doc.md``.
    """

    if not os.path.exists(config_file):
        raise ValueError("Test suite config file {:s} does not exist".format(
            os.path.abspath(config_file)))

    config = configparser.ConfigParser(
        interpolation=configparser.ExtendedInterpolation(),
        delimiters=(':'),
        comment_prefixes=('#'))
    config.read(config_file)

    _parameters["suite_root"] = os.path.split(os.path.abspath(config_file))[0]
    _parameters["timeout"] = config.getfloat("General",
                                             "timeout",
                                             fallback=1.0)

    if 'variables' in config and 'paths' in config:
        intersecting_keys = set(config["paths"].keys())\
                            .intersection(set(config["variables"].keys()))
        if len(intersecting_keys) > 0:
            raise ValueError(
                "The sections 'paths' and 'variables' must not share keys, "
                "but they have the following common key{:s}: {:s}".format(
                    's' if len(intersecting_keys) > 1 else '',
                    ', '.join(k for k in intersecting_keys)))

    # extract variables from the environment
    for key in config['ENV']:
        if key in config['ENV fallback']:
            fallback = config['ENV fallback'][key]
        else:
            fallback = ""
        config['ENV'][key] = os.getenv(config['ENV'][key]) or fallback

    if 'variables' in config:
        for key in config['variables']:
            _parameters[key] = config['variables'][key]

    if 'paths' in config:
        for key in config['paths']:
            rel_path = config['paths'][key]
            abs_path = os.path.abspath(
                os.path.join(_parameters["suite_root"], rel_path))
            if not os.path.exists(abs_path):
                raise ValueError(
                    "Path replacement for {short}: {abspath} does not exist"
                    " (was expanded from {rel})".format(short=key,
                                                        abspath=abs_path,
                                                        rel=rel_path))
            _parameters[key] = abs_path
Пример #6
0
 def __init__(self, configfile, **kwargs):
     kwargs.setdefault("interpolation",
                       configparser.ExtendedInterpolation())
     super().__init__(**kwargs)
     assert configfile is not None
     self.configfile = configfile
Пример #7
0
def read_config(path):
    config = My_Config_Parser(inline_comment_prefixes=['#'], interpolation=configparser.ExtendedInterpolation())
    assert os.path.isfile(path)
    config.read(path)

    return config
Пример #8
0
 def __init__(self, config_file, env):
     self.config = configparser.ConfigParser(
         interpolation=configparser.ExtendedInterpolation())
     self.config.read(config_file)
     self.env = env
Пример #9
0
    def parse(self, sections=None):

        # Parameters are put directly into this object, based on their name in the config file
        #   or the command line.
        # NOTE:  Parameters with default values which evaluate to TRUE will ALWAYS override the file!
        #
        # self.configValues is the result of reading the config file
        # self.args is the result from the parser

        # Parse the command line (in case the configfile has been overridden)
        self.args = self.parser.parse_args()

        # Set values from the configfile
        configParser = configparser.ConfigParser(
            interpolation=configparser.ExtendedInterpolation())
        configParser.read(self.args.configfile)

        # Get the default section
        configValues = dict(configParser[configParser.default_section])

        # Add any sections we've been asked to include.  If there is a collision between earlier and
        #   later sections, the later section wins; note that the default section is the earliest one.
        #   We use the consolidated dictionary for interpolation of values from the command line.
        if sections:
            if isinstance(sections, str):
                sections = (sections, )
            for s in sections:
                for name in configParser[s]:
                    configValues[name] = configParser[s][name]

        # Now, put values from the configfile into the parms object:
        for name in configValues:
            self.__dict__[name] = configValues[name]

        # Override with non-false values from the command line (or the default).
        # If no value is in the configfile, use the command line or default whether it's true or false.
        # Do interpolation on strings against the consolidated dictionary of values from the configfile.

        # We use section and key 'xyzzy' for interpolation; by this point, everything we might
        # interpolate against has been promoted to the configValues dictionary.
        configParser.remove_section('xyzzy')
        resolver_name = 'xyzzy'
        resolver_section = 'xyzzy'
        configParser.add_section(resolver_section)
        args = vars(self.args)
        for name in list(args.keys()):
            if (args[name] or name not in self.__dict__):
                if isinstance(args[name], str):
                    try:
                        configParser.set(resolver_section, resolver_name,
                                         args[name])
                        args[name] = configParser.get(resolver_section,
                                                      resolver_name,
                                                      vars=configValues)
                    except ValueError:
                        pass  # Ignore interpolation problems here
                self.__dict__[name] = args[name]

        # And handle dbhost specially to make sure it exists:
        if 'dbhost' not in self.__dict__ or not self.dbhost:
            self.dbhost = 'localhost'
Пример #10
0
def parse_config(cfg_file):
    """Parse configuration file for essential variables during flux analysis.

    Parameters
    ----------
    cfg_file : str
        Path to configuration file.

    Returns
    -------
    collections.OrderedDict
        Ordered dictionary with variables for each section.  A list of
        input files found, given the input directory and file pattern, is
        also appended to the 'Inputs' section.

    """
    # Ordered dictionary based on dflts to give to the parser
    dflt_dict = OrderedDict((
        ("EC Inputs",
         OrderedDict((
             ("input_directory", _DFLTS["ec_input_directory"]),
             ("file_pattern", _DFLTS["ec_file_pattern"]),
             ("colnames", _DFLTS["ec_colnames"]),
             ("sample_frequency", _DFLTS["ec_sample_frequency"]),
         ))
        ),
        ("EC Outputs",
         OrderedDict((
             ("summary_file", _DFLTS["ec_summary_file"]),
         ))
        ),
        ("EC Despiking",
         OrderedDict((
             ("despike_win_width", _DFLTS["ec_despike_win_width"]),
             ("despike_step", _DFLTS["ec_despike_step"]),
             ("despike_nreps", _DFLTS["ec_despike_nreps"]),
         ))
        ),
        ("EC Motion Correction",
         OrderedDict((
             ("sonic_xoffset",
              _DFLTS["ec_sonic_xoffset"]),
             ("imu_xoffset",
              _DFLTS["ec_imu_xoffset"]),
             ("tilt_window_width",
              _DFLTS["ec_tilt_window_width"]),
             ("imu2anemometer_pos",
              _DFLTS["imu2anemometer_pos"]),
             ("imu_xyz_idx",
              _DFLTS["imu_xyz_idx"]),
             ("imu2rhs_linaccel_mult",
              _DFLTS["imu2rhs_linaccel_mult"]),
             ("imu2rhs_angaccel_mult",
              _DFLTS["imu2rhs_angaccel_mult"]),
             ("complementary_filter_period",
              _DFLTS["ec_complementary_filter_period"]),
             ("accel_highpass_cutoff",
              _DFLTS["ec_accel_highpass_cutoff"]),
         ))
        ),
        ("UW Inputs",
         OrderedDict((
             ("input_directory", _DFLTS["uw_input_directory"]),
             ("file_pattern", _DFLTS["uw_file_pattern"]),
             ("colnames", _DFLTS["uw_colnames"]),
             ("uw_intake_depth", _DFLTS["uw_intake_depth"]),
             ("uw_regress_temperature_external",
              _DFLTS["uw_regress_temperature_external"]),
             ("uw_temperature_external_coefs",
              _DFLTS["uw_temperature_external_coefs"]),
             ("anemometer2d_height", _DFLTS["anemometer2d_height"]),
         ))
        ),
        ("UW Outputs",
         OrderedDict((
             ("pco2_directory", _DFLTS["uw_pco2_directory"]),
         ))
        ),))

    # Set up the parser to interpolate across sections
    config = cfg.ConfigParser(interpolation=cfg.ExtendedInterpolation())
    config.read_dict(dflt_dict)     # set up our specific defaults
    # Replace defaults with what we're given
    config.read_file(open(cfg_file))
    # Copy where we'll replace strings with other types
    py_dict = dflt_dict.copy()
    # Loop through all items and clean them up to generate our variables as
    # lists of strings
    for sec in config.sections():
        for opt in config.options(sec):
            opt_value = config.get(sec, opt)
            # Just replace and skip if we have the same as defaults
            if dflt_dict[sec][opt] == opt_value:
                py_dict[sec][opt] = opt_value.split(",")
            else:
                # Otherwise move on and remove double quotes, newlines, and
                # spaces
                clean_opt = re.sub('["\n ]+', "", opt_value)
                config.set(sec, opt, clean_opt)
                # Replace values with lists, splitting on comma character,
                # on our local dictionary
                py_dict[sec][opt] = clean_opt.split(",")
            # Extract single elements, and convert to floats and arrays where
            # appropriate
            if len(py_dict[sec][opt]) == 1:
                py_dict[sec][opt] = py_dict[sec][opt][0]
            if sec in _SCALAR_OPTS and opt in _SCALAR_OPTS[sec]:
                py_dict[sec][opt] = float(py_dict[sec][opt])
            if sec in _VECTOR_OPTS and opt in _VECTOR_OPTS[sec]:
                py_dict[sec][opt] = [float(x) for x in py_dict[sec][opt]]

    # Check input directories exist
    if not osp.exists(py_dict["EC Inputs"]["input_directory"]):
        raise Exception("Input directory doesn't exist")
    if not osp.exists(py_dict["UW Inputs"]["input_directory"]):
        raise Exception("Input directory doesn't exist")

    # Check if we have a valid sampling frequency
    if py_dict["EC Inputs"]["sample_frequency"] <= 0:
        raise Exception("Sampling frequency must be greater than zero")

    # Check if we have a valid tilt window width
    if py_dict["EC Motion Correction"]["tilt_window_width"] <= 0:
        raise Exception("Tilt window width must be greater than zero")

    # Check if we have valid despiking parameters
    if py_dict["EC Despiking"]["despike_win_width"] <= 0:
        raise Exception("Despiking window width must be greater than zero")
    if py_dict["EC Despiking"]["despike_step"] <= 0:
        raise Exception("Despiking step size must be greater than zero")
    if py_dict["EC Despiking"]["despike_nreps"] < 0:
        raise Exception(("The number of despiking iterations"
                         "cannot be negative"))

    # Sort input file lists
    ec_input_files = glob.glob(osp.join(config["EC Inputs"]["input_directory"],
                                        config["EC Inputs"]["file_pattern"]))
    ec_input_files.sort()
    py_dict["EC Inputs"]["input_files"] = ec_input_files
    uw_input_files = glob.glob(osp.join(config["UW Inputs"]["input_directory"],
                                        config["UW Inputs"]["file_pattern"]))
    uw_input_files.sort()
    py_dict["UW Inputs"]["input_files"] = uw_input_files

    # Check if we have all legal names for header of input files (don't
    # care about first 2 time columns).
    illegal_names = ((set(py_dict["EC Inputs"]["colnames"][2:]) |
                      set(py_dict["UW Inputs"]["colnames"][2:])) -
                     set(_INCOLS_ALL.keys()))
    legal_names = set(_INCOLS_ALL.keys()) - set(illegal_names)
    if len(illegal_names) > 0:
        logger.info("There are unknown column names in config file: %s",
                    list(illegal_names))

    ec_legal_names = set(py_dict["EC Inputs"]["colnames"][2:]) & legal_names
    if len(ec_legal_names) < 1:
        logger.error("There are no legal EC column names in config file")
    else:
        ec_dtypes = {key: _INCOLS_ALL[key] for key in ec_legal_names}
        py_dict["EC Inputs"]["dtypes"] = ec_dtypes

    uw_legal_names = set(py_dict["UW Inputs"]["colnames"][2:]) & legal_names
    if len(uw_legal_names) < 1:
        logger.error("There are no legal UW column names in config file")
    else:
        uw_dtypes = {key: _INCOLS_ALL[key] for key in uw_legal_names}
        py_dict["UW Inputs"]["dtypes"] = uw_dtypes

    return py_dict
Пример #11
0
 def load(self, interpolation=configparser.ExtendedInterpolation()):
     config = configparser.ConfigParser(interpolation=interpolation)
     config.read(self.location, encoding="utf-8")
     return config
Пример #12
0
 def __init__(self, *args, **kwargs):
     kwargs['interpolation'] = configparser.ExtendedInterpolation()
     kwargs['default_section'] = self.defaultsect
     super().__init__(*args, **kwargs)
Пример #13
0
def main():
    # Format ISO date
    dateiso = datetime.datetime.now().strftime('%Y%m%d')

    # Config
    config = configparser.ConfigParser()
    config._interpolation = configparser.ExtendedInterpolation()
    config.read('config.ini')

    # Folder and file names
    xmlfilename = config['paths']['xmlfilename']

    xmlfolder = config['paths']['xmlfoldername']

    if config['paths']['prefix'] == 'yes':
        if config['paths']['xmlfilename'] == '':
            xmlfilename = dateiso
        else:
            xmlfilename = ('%s_%s' % (dateiso, xmlfilename))

    if config['paths']['prefix'] == 'no':
        if config['paths']['xmlfilename'] == '':
            print('xmlfilename = empty.\nEnter a name in config.ini.')
            exit()

    if config['paths']['prefix'] == '':
        if config['paths']['xmlfilename'] == '':
            print('xmlfilename = empty.\nEnter a name in config.ini.')
            exit()

    xmlout = ('%s/%s.xml' % (xmlfolder, xmlfilename))

    print('\nfolder/xmlfile: %s\n' % xmlout)

    # SBID List
    with open(config['paths']['sbidlistname']) as f:
        sbidlist = [line.strip() for line in f]
    f.close()

    print('SBID list: ')

    for i in sbidlist:
        print(i)

    # Build XML object
    xmldocs = json2xml(config=config, sbidlist=sbidlist)

    # Check and create xml folder output
    if xmldocs:
        if os.path.exists(xmlfolder):
            pass
        else:
            os.mkdir(xmlfolder)

    # Write the XML file
    with open(xmlout, encoding='utf-8-sig', mode='w') as f:

        # Declaration with quotation marks
        f.write(u'<?xml version="1.0" encoding="utf-8"?>\n')

        f.write(xmldocs.decode('utf-8'))

    f.close()
Пример #14
0
    def _preprocess(self, host_chroot_spec, host_chroot_sources):
        rpkg_conf_path = os.path.join(host_chroot_sources, 'rpkg.conf')
        force_enable = self.opts.get('force_enable', False)

        if not force_enable:
            if not os.path.isfile(rpkg_conf_path):
                self.log.info("rpkg.conf not found. "
                              "Skipping rpkg preprocessing step.")
                return

            parser = configparser.ConfigParser(
                interpolation=configparser.ExtendedInterpolation())

            try:
                parser.read(rpkg_conf_path)
            except configparser.ParsingError as e:
                raise PkgError("Parsing of %s failed with error: %s" % (rpkg_conf_path, repr(e)))

            try:
                preprocess_spec = parser.getboolean('rpkg', 'preprocess_spec')
            except (configparser.Error, ValueError):
                self.log.warning(
                    "Could not get boolean value of rpkg.preprocess_spec option from rpkg.conf.")
                preprocess_spec = False

            if not preprocess_spec:
                self.log.info("preprocess_spec not enabled in rpkg.conf. "
                              "Skipping rpkg preprocessing step.")
                return

        # try to locate spec file in SOURCES, which will be our input
        host_chroot_sources_spec = os.path.join(host_chroot_sources,
                                                os.path.basename(host_chroot_spec))

        if not os.path.isfile(host_chroot_sources_spec):
            raise PkgError("%s is not a file. Spec file needs to be among sources." %
                           host_chroot_sources_spec)

        self.log.info("Installing rpkg preprocessing requires...")
        self._install_requires(self.opts.get('requires', []))

        # get rid of host rootdir prefixes
        rootdir_prefix = self.buildroot.make_chroot_path()
        chroot_spec = host_chroot_spec.replace(rootdir_prefix, '')
        chroot_sources = host_chroot_sources.replace(rootdir_prefix, '')
        chroot_sources_spec = host_chroot_sources_spec.replace(rootdir_prefix, '')

        command_str = self.opts.get('cmd') % {'source_spec': chroot_sources_spec,
                                              'target_spec': chroot_spec}
        command = shlex.split(command_str)

        # determine whether to use private network or not based on rpmbuild_networking
        private_network = (not self.config.get('rpmbuild_networking', False))

        self.buildroot.doChroot(
            command,
            shell=False,
            cwd=chroot_sources,
            logger=self.buildroot.build_log,
            uid=self.buildroot.chrootuid,
            gid=self.buildroot.chrootgid,
            user=self.buildroot.chrootuser,
            unshare_net=private_network,
            nspawn_args=self.config.get('nspawn_args', []),
            printOutput=self.config.get('print_main_output', True)
        )
Пример #15
0
def read_layer_config(layer_config_filepath: Path) -> dict:
    # Init
    if not layer_config_filepath.exists():
        raise Exception(
            f"Layer config file not found: {layer_config_filepath}")

    # Read config file...
    layer_config = configparser.ConfigParser(
        interpolation=configparser.ExtendedInterpolation(),
        converters={
            'list': lambda x: [i.strip() for i in x.split(',')],
            'listint': lambda x: [int(i.strip()) for i in x.split(',')],
            'dict': lambda x: None if x is None else json.loads(x),
            'path': lambda x: Path(x)
        })
    layer_config.read(layer_config_filepath)

    # Prepare data
    image_layers = {}
    for image_layer in layer_config.sections():
        # First check if the image_layer code doesn't contain 'illegal' characters
        if any(illegal_char in image_layer
               for illegal_char in illegal_chars_in_codes):
            raise Exception(
                f"Section name [{image_layer}] in layer config should not contain any of the following characters: {illegal_chars_in_codes}, in {layer_config_filepath}"
            )

        # Init layer with all parameters in the section as dict
        image_layers[image_layer] = dict(layer_config[image_layer])

        # If the layer source(s) are specified in a json parameter, parse it
        if 'layersources' in image_layers[image_layer]:
            image_layers[image_layer]['layersources'] = layer_config[
                image_layer].getdict('layersources')

            # Give default values to some optional properties of a server
            for layersource in image_layers[image_layer]['layersources']:
                if 'random_sleep' not in layersource:
                    layersource['random_sleep'] = 0

        else:
            # If not, the layersource should be specified in seperate parameters
            layersource = {}
            layersource['wms_server_url'] = layer_config[image_layer].get(
                'wms_server_url')
            layersource['wms_version'] = layer_config[image_layer].get(
                'wms_version', fallback='1.3.0')
            # The layer names and layer styles are lists
            layersource['layernames'] = layer_config[image_layer].getlist(
                'wms_layernames')
            layersource['layerstyles'] = layer_config[image_layer].getlist(
                'wms_layerstyles')
            # Some more properties
            layersource['bands'] = layer_config[image_layer].getlist(
                'bands', fallback=None)
            layersource['random_sleep'] = layer_config[image_layer].getint(
                'random_sleep', fallback=0)
            image_layers[image_layer]['layersources'] = [layersource]

        # Read nb_concurrent calls param
        image_layers[image_layer]['nb_concurrent_calls'] = (
            layer_config[image_layer].getint('nb_concurrent_calls',
                                             fallback=6))

        # Check if a region of interest is specified as file or bbox
        roi_filepath = layer_config[image_layer].getpath('roi_filepath',
                                                         fallback=None)
        image_layers[image_layer]['roi_filepath'] = roi_filepath
        bbox_tuple = None
        if layer_config.has_option(image_layer, 'bbox'):
            bbox_list = layer_config[image_layer].getlist('bbox')
            bbox_tuple = (float(bbox_list[0]), float(bbox_list[1]),
                          float(bbox_list[2]), float(bbox_list[3]))
            image_layers[image_layer]['bbox'] = bbox_tuple
        image_layers[image_layer]['bbox'] = bbox_tuple

        # Check if the grid xmin and xmax are specified
        grid_xmin = 0
        if layer_config.has_option(image_layer, 'grid_xmin'):
            grid_xmin = layer_config[image_layer].getfloat('grid_xmin')
        image_layers[image_layer]['grid_xmin'] = grid_xmin
        grid_ymin = 0
        if layer_config.has_option(image_layer, 'grid_ymin'):
            grid_ymin = layer_config[image_layer].getfloat('grid_ymin')
        image_layers[image_layer]['grid_ymin'] = grid_ymin

        # Check if a image_pixels_ignore_border is specified
        image_pixels_ignore_border = layer_config[image_layer].getint(
            'image_pixels_ignore_border', fallback=0)
        image_layers[image_layer][
            'image_pixels_ignore_border'] = image_pixels_ignore_border
    return image_layers
Пример #16
0
    def setParameters(self, configfile):
        param = configparser.SafeConfigParser(
            interpolation=configparser.ExtendedInterpolation())
        #param = configparser.SafeConfigParser(os.environ)#interpolation = EnvInterpolation())
        #        param = configparser.SafeConfigParser(interpolation = )
        configfile_str = open(configfile).read()
        #configfile_str = os.path.expandvars(configfile_str)
        configfile_str = configfile_str.replace(
            "$PWD", os.path.abspath(os.path.dirname(configfile)))
        param.read_string(configfile_str)
        print(param)
        self.dir_model_output = param.get('PATH', 'dir_model_output')
        self.dir_vocab_output = param.get('PATH', 'dir_vocab_output')
        self.dir_model = param.get('PATH', 'dir_model')
        self.path_log = param.get('PATH', 'path_log')

        self.filename_train = param.get("PATH", 'filename_train')
        self.filename_test = param.get('PATH', 'filename_test')
        self.filename_dev = param.get('PATH', 'filename_dev')

        self.filename_words = param.get('PATH', 'filename_words')
        self.filename_tags = param.get('PATH', 'filename_tags')
        self.filename_chars = param.get('PATH', 'filename_chars')

        #embedding types: Glove, w2v, fasttext
        try:
            self.embedding_type = param.get("EMBEDDINGS", "embedding_type")
        except configparser.NoOptionError:
            self.embedding_type = "Glove"

        self.dim_word = param.getint('EMBEDDINGS', 'dim_word')
        self.dim_char = param.getint('EMBEDDINGS', 'dim_char')
        self.filename_embeddings = param.get('EMBEDDINGS',
                                             'filename_embeddings')
        self.filename_embeddings_trimmed = param.get(
            'EMBEDDINGS', 'filename_embeddings_trimmed')
        self.use_pretrained = param.getboolean('EMBEDDINGS', 'use_pretrained')

        self.use_large_embeddings = param.getboolean('EMBEDDINGS',
                                                     'use_large_embeddings')

        self.oov_size = 0
        self.oov_current_size = 0
        if param.has_option('EMBEDDINGS', 'oov_size'):
            self.oov_size = param.getint('EMBEDDINGS', 'oov_size')

        self.max_iter = param['PARAM']['max_iter']

        if not self.embedding_type == "fasttext" and self.oov_size > 0:
            sys.stderr.write(
                "Embeddings for unknown words cannot be generated for " +
                self.embedding_type +
                ", thus the parameter oov_size will be set to zero\n")
            self.oov_size = 0

        if self.use_pretrained == False and self.use_large_embeddings == True:
            sys.stderr.write(
                "If you want to train embeddings from scratch the use_large_embedding option is not valid\n"
            )

        if (self.max_iter == "None"):
            self.max_iter = None
        self.train_embeddings = param.getboolean('PARAM', 'train_embeddings')

        try:
            self.lowercase = param.getboolean("PARAM", "lowercase")
        except configparser.NoOptionError:
            self.lowercase = True

        self.nepochs = param.getint('PARAM', 'nepochs')
        self.dropout = param.getfloat('PARAM', 'dropout')
        self.batch_size = param.getint('PARAM', 'batch_size')
        self.lr_method = param.get('PARAM', 'lr_method')
        self.lr = param.getfloat('PARAM', 'lr')
        self.lr_decay = param.getfloat('PARAM', 'lr_decay')
        self.clip = param.getint('PARAM', 'clip')
        self.nepoch_no_imprv = param.getint('PARAM', 'nepoch_no_imprv')
        self.hidden_size_char = param.getint('PARAM', 'hidden_size_char')
        self.hidden_size_lstm = param.getint('PARAM', 'hidden_size_lstm')
        self.use_crf = param.getboolean('PARAM', 'use_crf')
        self.use_chars = param.getboolean('PARAM', 'use_chars')

        self.oov_words = []
Пример #17
0
    def parse_conf_file(self):
        """
        Parse the file ``dirs.USER_CONF_DIR_FILE`` with the following sections

        :[Commons]:
            Try to find user directories; if they exist add them to 
            `dirs.USER_DIRS` and `sys.path`

        For the other sections, OrderedDicts are returned with the class names
        as keys and dictionaries with options as values.

        :[Input Widgets]:
            Store (user) input widgets in `fb.input_dict`

        :[Plot Widgets]:
            Store (user) plot widgets in `fb.plot_dict`

        :[Filter Designs]:
            Store (user) filter designs in `fb.filter_dict`
            
        :[Fixpoint Widgets]:
            Store (user) fixpoint widgets in `fb.fixpoint_dict`


        Parameters
        ----------
        None

        Returns
        -------
        None

        """

        CONF_VERSION = 2
        try:
            # Test whether user config file is readable, this is necessary as
            # configParser quietly fails when the file doesn't exist
            if not os.access(dirs.USER_CONF_DIR_FILE, os.R_OK):
                raise IOError('Config file "{0}" cannot be read.'.format(
                    dirs.USER_CONF_DIR_FILE))

            # -----------------------------------------------------------------
            # setup an instance of config parser, allow  keys without value
            # -----------------------------------------------------------------
            self.conf = configparser.ConfigParser(allow_no_value=True)
            # preserve case of parsed options by overriding optionxform():
            # Set it to function str()
            self.conf.optionxform = str
            # Allow interpolation across sections, ${Dirs:dir1}
            self.conf._interpolation = configparser.ExtendedInterpolation(
            )  # PY3 only
            self.conf.read(dirs.USER_CONF_DIR_FILE)
            logger.info(
                'Parsing config file\n\t"{0}"\n\t\twith sections:\n\t{1}'.
                format(dirs.USER_CONF_DIR_FILE, str(self.conf.sections())))

            # -----------------------------------------------------------------
            # Parsing [Common]
            #------------------------------------------------------------------
            self.commons = self.parse_conf_section("Common")

            if not 'version' in self.commons or int(
                    self.commons['version'][0]) != CONF_VERSION:
                logger.critical("Config file\n\t'{0:s}'\n\thas the wrong version '{2}' "
                                "(required: '{1}').\n"
                                "\tYou can either edit the file or delete it.\n\tWhen deleted, "
                                "a new configuration file will be created at restart."\
                                .format(dirs.USER_CONF_DIR_FILE, CONF_VERSION, int(self.commons['version'][0])))
                sys.exit()

            if 'user_dirs' in self.commons:
                for d in self.commons['user_dirs']:
                    d = os.path.abspath(os.path.normpath(d))
                    if os.path.isdir(d):
                        dirs.USER_DIRS.append(d)
                        if d not in sys.path:
                            sys.path.append(d)
                    else:
                        logger.warning(
                            "User directory doesn't exist:\n\t{0}\n".format(d))

            if dirs.USER_DIRS:
                logger.info("User directory(s):\n\t{0}\n".format(
                    dirs.USER_DIRS))
            else:
                logger.warning(
                    'No valid user directory found in "{0}\n.'.format(
                        dirs.USER_CONF_DIR_FILE))

            # -----------------------------------------------------------------
            # Parsing [Input Widgets]
            #------------------------------------------------------------------
            fb.input_classes = self.build_class_dict("Input Widgets",
                                                     "input_widgets")
            # -----------------------------------------------------------------
            # Parsing [Plot Widgets]
            #------------------------------------------------------------------
            fb.plot_classes = self.build_class_dict("Plot Widgets",
                                                    "plot_widgets")
            # -----------------------------------------------------------------
            # Parsing [Filter Designs]
            #------------------------------------------------------------------
            fb.filter_classes = self.build_class_dict("Filter Designs",
                                                      "filter_designs")
            # currently, option "opt" can only be an association with a fixpoint
            # widget, so replace key "opt" by key "fix":
            # Convert to list in any case
            for c in fb.filter_classes:
                if 'opt' in fb.filter_classes[c]:
                    fb.filter_classes[c]['fix'] = fb.filter_classes[c].pop(
                        'opt')
                if 'fix' in fb.filter_classes[c] and type(
                        fb.filter_classes[c]['fix']) == str:
                    fb.filter_classes[c]['fix'] = fb.filter_classes[c][
                        'fix'].split(',')
            # -----------------------------------------------------------------
            # Parsing [Fixpoint Filters]
            #------------------------------------------------------------------
            fb.fixpoint_classes = self.build_class_dict(
                "Fixpoint Widgets", "fixpoint_widgets")
            logger.info("\nFixpoint_widgets: \n{0}\n".format(
                fb.fixpoint_classes))

            # First check whether fixpoint options of the filter designs are
            # valid fixpoint classes by comparing them to the verified items of
            # fb.fixpoint_classes:
            for c in fb.filter_classes:
                if 'fix' in fb.filter_classes[c]:
                    for w in fb.filter_classes[c]['fix']:
                        if w not in fb.fixpoint_classes:
                            logger.warning('Removing invalid fixpoint module\n\t"{0}" for filter class "{1}".'\
                                           .format(w,c))
                            fb.filter_classes[c]['fix'].remove(w)
            # merge fb.filter_classes info "filter class":[fx_class1, fx_class2]
            # and fb.fixpoint_classes info "fixpoint class":[fil_class1, fil_class2]
            # into the fb.filter_classes dict

            # collect all fixpoint widgets (keys in fb.fixpoint_classes) which
            # have the class name c as a value
                fix_wdg = {
                    k
                    for k, val in fb.fixpoint_classes.items()
                    if c in val['opt']
                }
                if len(fix_wdg) > 0:
                    if 'fix' in fb.filter_classes[c]:
                        #... and merge it with the fixpoint options of class c
                        fix_wdg = fix_wdg.union(fb.filter_classes[c]['fix'])

                    fb.filter_classes[c].update({'fix': list(fix_wdg)})

        # ----- Exceptions ----------------------
        except configparser.DuplicateSectionError as e:
            logger.critical('{0} in config file "{1}".'.format(
                e, dirs.USER_CONF_DIR_FILE))
            sys.exit()
        except configparser.ParsingError as e:
            logger.critical('Parsing Error in config file "{0}:\n{1}".'.format(
                dirs.USER_CONF_DIR_FILE, e))
            sys.exit()
        except configparser.Error as e:
            logger.critical('{0} in config file "{1}".'.format(
                e, dirs.USER_CONF_DIR_FILE))
            sys.exit()
def get_config():
    CONFIG_FILE = os.environ.get("CONFIG_FILE", 'config_files/config.dev.ini')
    config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
    config.read(CONFIG_FILE)
    return config
def main_body():
    '''Main body of this file'''

    parser = argparse.ArgumentParser()

    # Configurations: read noisyspeech_synthesizer.cfg and gather inputs
    parser.add_argument(
        '--cfg',
        default='pdns_synthesizer_icassp2022.cfg',
        help='Read noisyspeech_synthesizer.cfg for all the details')
    parser.add_argument('--cfg_str', type=str, default='noisy_speech')
    args = parser.parse_args()

    params = dict()
    params['args'] = args
    cfgpath = os.path.join(args.cfg)
    # os.path.join(os.path.dirname(__file__), args.cfg)
    assert os.path.exists(cfgpath), f'No configuration file as [{cfgpath}]'

    cfg = CP.ConfigParser()
    cfg._interpolation = CP.ExtendedInterpolation()
    cfg.read(cfgpath)
    params['cfg'] = cfg._sections[args.cfg_str]
    cfg = params['cfg']

    clean_dir = os.path.join('datasets/clean')

    if cfg['speech_dir'] != 'None':
        clean_dir = cfg['speech_dir']

    if not os.path.exists(clean_dir):
        assert False, ('Clean speech data is required')

    if cfg['speech_dir2'] != 'None':
        clean_dir2 = cfg['speech_dir2']

    if cfg['spkid_csv'] != 'None':
        spkid_csv = cfg['spkid_csv']

    if not os.path.exists(clean_dir2):
        assert False, ('Clean speech2 data is required')

    if cfg['rir_dir'] != 'None':
        rir_dir = cfg['rir_dir']

    if cfg['noise_dir'] != 'None':
        noise_dir = cfg['noise_dir']
    if not os.path.exists(noise_dir):
        assert False, ('Clean speech data is required')

    print(clean_dir)
    print(clean_dir2)
    print(noise_dir)
    print(spkid_csv)
    print(rir_dir)

    if cfg['noise_dir'] != 'None':
        noise_dir = cfg['noise_dir']
    if not os.path.exists:
        assert False, ('Noise data is required')

    params['fs'] = int(cfg['sampling_rate'])
    params['audioformat'] = cfg['audioformat']
    params['audio_length'] = float(cfg['audio_length'])
    params['silence_length'] = float(cfg['silence_length'])
    params['total_hours'] = float(cfg['total_hours'])

    # clean singing speech
    params['clean_singing'] = str(cfg['clean_singing'])
    params['singing_choice'] = int(cfg['singing_choice'])

    # rir
    params['rir_choice'] = int(cfg['rir_choice'])
    params['lower_t60'] = float(cfg['lower_t60'])
    params['upper_t60'] = float(cfg['upper_t60'])
    params['rir_table_csv'] = str(cfg['rir_table_csv'])
    params['clean_speech_t60_csv'] = str(cfg['clean_speech_t60_csv'])

    if cfg['fileindex_start'] != 'None' and cfg['fileindex_start'] != 'None':
        params['num_files'] = int(cfg['fileindex_end']) - int(
            cfg['fileindex_start'])
        params['fileindex_start'] = int(cfg['fileindex_start'])
        params['fileindex_end'] = int(cfg['fileindex_end'])
    else:
        params['num_files'] = int(
            (params['total_hours'] * 60 * 60) / params['audio_length'])
        params['fileindex_start'] = 0
        params['fileindex_end'] = int(params['num_files'])

    print('Number of files to be synthesized:', params['num_files'])

    params['is_test_set'] = utils.str2bool(cfg['is_test_set'])
    params['clean_activity_threshold'] = float(cfg['clean_activity_threshold'])
    params['noise_activity_threshold'] = float(cfg['noise_activity_threshold'])
    params['snr_lower'] = int(cfg['snr_lower'])
    params['snr_upper'] = int(cfg['snr_upper'])

    params['randomize_snr'] = utils.str2bool(cfg['randomize_snr'])
    params['target_level_lower'] = int(cfg['target_level_lower'])
    params['target_level_upper'] = int(cfg['target_level_upper'])

    if 'snr' in cfg.keys():
        params['snr'] = int(cfg['snr'])
    else:
        params['snr'] = int((params['snr_lower'] + params['snr_upper']) / 2)

    params['noisyspeech_dir'] = utils.get_dir(cfg, 'noisy_destination',
                                              'noisy')
    params['clean_proc_dir'] = utils.get_dir(cfg, 'clean_destination', 'clean')
    params['noise_proc_dir'] = utils.get_dir(cfg, 'noise_destination', 'noise')

    if 'speech_csv' in cfg.keys() and cfg['speech_csv'] != 'None':
        cleanfilenames = pd.read_csv(cfg['speech_csv'])
        cleanfilenames = cleanfilenames['filename']
    else:
        cleanfilenames = []
        for path in Path(cfg['speech_dir']).rglob('*.wav'):
            cleanfilenames.append(str(path.resolve()))

    selected_primary = []
    selected_secondary = []

    with open(spkid_csv, 'r') as file:
        my_reader = csv.reader(file, delimiter=',')
        for row in my_reader:
            if row[1] == 'primary':
                selected_primary.append(row)
            elif row[1] == 'secondary':
                selected_secondary.append(row)

    cleanfilenames = []
    for row in selected_primary:
        cleanfilenames.append(row[0])

    cleanfilenames2 = []
    for row in selected_secondary:
        cleanfilenames2.append(row[0])

    params['cleanfilenames'] = cleanfilenames

    shuffle(cleanfilenames2)
    params['cleanfilenames2'] = cleanfilenames2

    rirfilenames = []
    for path in Path(cfg['rir_dir']).rglob('*.wav'):
        rirfilenames.append(str(path.resolve()))

    shuffle(rirfilenames)
    params['myrir'] = rirfilenames

    if 'noise_csv' in cfg.keys() and cfg['noise_csv'] != 'None':
        noisefilenames = pd.read_csv(cfg['noise_csv'])
        noisefilenames = noisefilenames['filename']
    else:
        noisefilenames = glob.glob(
            os.path.join(noise_dir, params['audioformat']))

    if len(noisefilenames) != 0:
        shuffle(noisefilenames)
        params['noisefilenames'] = noisefilenames
    else:
        noisedirs = glob.glob(os.path.join(noise_dir, '*'))
        if cfg['noise_types_excluded'] != 'None':
            dirstoexclude = cfg['noise_types_excluded'].split(',')
            for dirs in dirstoexclude:
                noisedirs.remove(dirs)
        shuffle(noisedirs)
        params['noisedirs'] = noisedirs

    # Call main_gen() to generate audio
    clean_source_files, clean_clipped_files, clean_low_activity_files, \
    noise_source_files, noise_clipped_files, noise_low_activity_files = main_gen(params)

    # Create log directory if needed, and write log files of clipped and low activity files
    log_dir = utils.get_dir(cfg, 'log_dir', 'Logs')

    utils.write_log_file(log_dir, 'source_files.csv',
                         clean_source_files + noise_source_files)
    utils.write_log_file(log_dir, 'clipped_files.csv',
                         clean_clipped_files + noise_clipped_files)
    utils.write_log_file(log_dir, 'low_activity_files.csv', \
                         clean_low_activity_files + noise_low_activity_files)

    # Compute and print stats about percentange of clipped and low activity files
    total_clean = len(clean_source_files) + len(clean_clipped_files) + len(
        clean_low_activity_files)
    total_noise = len(noise_source_files) + len(noise_clipped_files) + len(
        noise_low_activity_files)

    pct_clean_clipped = round(len(clean_clipped_files) / total_clean * 100, 1)
    pct_noise_clipped = round(len(noise_clipped_files) / total_noise * 100, 1)
    pct_clean_low_activity = round(
        len(clean_low_activity_files) / total_clean * 100, 1)
    pct_noise_low_activity = round(
        len(noise_low_activity_files) / total_noise * 100, 1)

    print("Of the " + str(total_clean) + " clean speech files analyzed, " + \
          str(pct_clean_clipped) + "% had clipping, and " + str(pct_clean_low_activity) + \
          "% had low activity " + "(below " + str(params['clean_activity_threshold']*100) + \
          "% active percentage)")
    print("Of the " + str(total_noise) + " noise files analyzed, " + str(pct_noise_clipped) + \
          "% had clipping, and " + str(pct_noise_low_activity) + "% had low activity " + \
          "(below " + str(params['noise_activity_threshold']*100) + "% active percentage)")
Пример #20
0
    def load_plugin(self, name, config_names, plugin_params=None):
        if config_names is not None:
            self.logger.info("Loading plugin [%s] with configuration(s) [%s].",
                             name, config_names)
        else:
            self.logger.info("Loading plugin [%s] with default configuration.",
                             name)

        root_path = self._plugin_root_path(name)
        if not config_names:
            # maybe we only have a config folder but nothing else (e.g. if there is only an install hook)
            if io.exists(root_path):
                return PluginDescriptor(name=name,
                                        config=config_names,
                                        root_path=root_path)
            else:
                core_plugin = self._core_plugin(name)
                if core_plugin:
                    return core_plugin
                # If we just have a plugin name then we assume that this is a community plugin and the user has specified a download URL
                else:
                    self.logger.info(
                        "The plugin [%s] is neither a configured nor an official plugin. Assuming that this is a community "
                        "plugin not requiring any configuration and you have set a proper download URL.",
                        name)
                    return PluginDescriptor(name)
        else:
            variables = {}
            config_paths = []
            # used for deduplication
            known_config_bases = set()
            # used to determine whether this is a core plugin
            core_plugin = self._core_plugin(name)

            for config_name in config_names:
                config_file = self._plugin_file(name, config_name)
                # Do we have an explicit configuration for this plugin?
                if not io.exists(config_file):
                    if core_plugin:
                        raise exceptions.SystemSetupError(
                            "Plugin [%s] does not provide configuration [%s]. List the available plugins "
                            "and configurations with %s list elasticsearch-plugins "
                            "--distribution-version=VERSION." %
                            (name, config_name, PROGRAM_NAME))
                    else:
                        raise exceptions.SystemSetupError(
                            "Unknown plugin [%s]. List the available plugins with %s list "
                            "elasticsearch-plugins --distribution-version=VERSION."
                            % (name, PROGRAM_NAME))

                config = configparser.ConfigParser(
                    interpolation=configparser.ExtendedInterpolation())
                # Do not modify the case of option keys but read them as is
                config.optionxform = lambda option: option
                config.read(config_file)
                if "config" in config and "base" in config["config"]:
                    config_bases = config["config"]["base"].split(",")
                    for base in config_bases:
                        if base and base not in known_config_bases:
                            config_paths.append(
                                os.path.join(root_path, base, "templates"))
                        known_config_bases.add(base)

                if "variables" in config.sections():
                    for k, v in config["variables"].items():
                        variables[k] = v
                # add all plugin params here to override any defaults
                if plugin_params:
                    variables.update(plugin_params)

            # maybe one of the configs is really just for providing variables. However, we still require one config base overall.
            if len(config_paths) == 0:
                raise exceptions.SystemSetupError(
                    "At least one config base is required for plugin [%s]" %
                    name)
            return PluginDescriptor(name=name,
                                    core_plugin=core_plugin is not None,
                                    config=config_names,
                                    root_path=root_path,
                                    config_paths=config_paths,
                                    variables=variables)
Пример #21
0
def main():
    try:
        # ------------------------------------------
        # Platform check
        #-------------------------------------------

        sys.tracebacklimit = 0

        if platform.system() == 'Windows':
            iswindows = True
            slash = '\\'
        else:
            iswindows = False
            slash = '/'

        # ------------------------------------------
        # Read the config file
        #-------------------------------------------

        settings = configparser.ConfigParser()
        settings._interpolation = configparser.ExtendedInterpolation()

        try:
            settings.read('config.ini')
        except Exception:
            sys.exit(
                '\n**ERROR**\nCould not open configuration file. It should be in the same folder as the script and named \'config.ini\'\n'
            )

        print(
            "\n-------------------------\nMemespector Python script\n-------------------------"
        )

        try:
            projectFolder = settings['Project']['ProjectFolder']

            dir_path = os.path.dirname(os.path.realpath(__file__))
            dataFolder = dir_path + slash + settings['Folders'][
                'DataFolder'] + slash
            cacheFolder = dir_path + slash + settings['Folders'][
                'CacheFolder'] + slash
            outputsFolder = dir_path + slash + settings['Folders'][
                'OutputsFolder'] + slash + projectFolder + slash
            imageCpFolder = outputsFolder + settings['Folders'][
                'ImageCopyFolder'] + slash
            cacheCopyFolder = outputsFolder + settings['Folders'][
                'CacheCopyFolder'] + slash

            makeNetwork = f.yn(settings['OutputConfiguration']['MakeNetwork'])
            makeWebNetwork = f.yn(
                settings['OutputConfiguration']['MakeWebEntitiesNetwork'])

            imagesRemote = f.yn(
                settings['SourceImagesLocation']['ImagesRemote'])
            absolutePath = f.yn(
                settings['SourceImagesLocation']['AbsolutePath'])
            forceBase64 = f.yn(settings['SourceImagesLocation']['ForceBase64'])
            saveImageCopy = f.yn(
                settings['SourceImagesLocation']['SaveImageCopy'])
            inputImageFolder = settings['SourceImagesLocation'][
                'InputImageFolder']

            labelThreshold = float(
                settings['OutputConfiguration']['LabelScoreFilter'])
            includeScore = f.yn(
                settings['OutputConfiguration']['IncludeScoreInSpreadsheet'])

            inputFileName = settings['InputConfiguration']['InputFile']
            inputFilePath = dataFolder + inputFileName
            delimiter = settings['InputConfiguration']['Delimiter'].encode(
                'utf-8').decode('unicode_escape')

            imagesColumn = settings['InputConfiguration']['ImagesColumn']
            linkColumn = settings['InputConfiguration']['LinkColumn']
            if linkColumn == "":
                linkColumn = imagesColumn

            procLimit = int(settings['InputConfiguration']['Limit'])

            imagesRemote = f.yn(
                settings['SourceImagesLocation']['ImagesRemote'])
            forceBase64 = f.yn(settings['SourceImagesLocation']['ForceBase64'])

            labelDetection = f.yn(settings['ApiRequestFeatures']['Label'])
            safeSearchDetection = f.yn(
                settings['ApiRequestFeatures']['SafeSearch'])
            textDetection = f.yn(settings['ApiRequestFeatures']['Text'])
            webDetection = f.yn(settings['ApiRequestFeatures']['Web'])
            faceDetection = f.yn(settings['ApiRequestFeatures']['Face'])

            maxResults = settings['ApiRequestFeatures']['MaxResults']
            apiKey = settings['ApiRequestFeatures']['ApiKey']

            if maxResults == "0":
                setMaxResults = False
            else:
                setMaxResults = True

        except Exception:
            sys.exit(
                "\n**ERROR**\nCould not parse at least one of the settings from the config file. Please verify its contents carefully."
            )

        # ------------------------------------------
        # Create folders
        #-------------------------------------------

        if not os.path.exists(cacheFolder):
            os.makedirs(cacheFolder)

        if os.path.exists(outputsFolder):
            answer = input(
                "\nATTENTION: Project folder already exists. There is risk of overwriting files. Continue? Y or N > "
            )
            if answer.lower() == 'n':
                sys.exit('Rename project in config file.')
            elif answer.lower() == 'y':
                print('Continuing overwriting existing files.')
            else:
                sys.exit('Answer not understood. Exiting.')
        else:
            os.makedirs(outputsFolder)

        if not os.path.exists(imageCpFolder):
            os.makedirs(imageCpFolder)

        if not os.path.exists(cacheCopyFolder):
            os.makedirs(cacheCopyFolder)

        # ------------------------------------------
        # Configure Processing attributes
        # ------------------------------------------

        apirequest = VisionApiRequest(apiKey, setMaxResults, maxResults,
                                      labelDetection, safeSearchDetection,
                                      textDetection, webDetection,
                                      faceDetection)

        apirequest.printModuleConfiguration()

        print("HANDLING OF IMAGE SOURCE\n")
        if imagesRemote:
            print("\tOnline images")
            if forceBase64:
                print(
                    "\tLocal intermediation of data submission (forcing base64)"
                )
            else:
                print("\tRemotely retrieved by Google")
            if saveImageCopy:
                print("\tSaving local copies of processed images")
            else:
                print("\tNot saving local copies")
        else:
            print("\tProcessing local images")
            if absolutePath:
                print("\tReading file paths as absolute")
            else:
                print("\tReading file paths as relative to: " +
                      inputImageFolder)
        print()

        # ------------------------------------------
        # Get input
        # ------------------------------------------

        try:
            inputFile = open(inputFilePath, encoding='utf8')
        except Exception:
            sys.exit(
                '\n**ERROR**\nInput file could not be opened. Please check the configuration file.\n'
            )

        print("Delimiter: " + delimiter)

        csvDialect = csv.Sniffer().sniff(inputFile.read(1024),
                                         delimiters=delimiter)

        if not csvDialect.escapechar:
            csvDialect.escapechar = "\\"
        print("Escape char: " + str(csvDialect.escapechar))

        inputFile.seek(0)
        inputCSV = csv.reader(inputFile, csvDialect)
        inputHeader = next(inputCSV)

        if imagesColumn in inputHeader:
            imagesColumnIdx = inputHeader.index(imagesColumn)
        else:
            print(inputHeader)
            sys.exit(
                '\n**ERROR**\nImage column could not be found in input file. Please check the configuration file.\n'
            )

        if makeNetwork or makeWebNetwork:
            if linkColumn in inputHeader:
                linkColumnIdx = inputHeader.index(linkColumn)
            else:
                print(inputHeader)
                sys.exit(
                    '\n**ERROR**\nLink column could not be found in input file. Please check the configuration file\n'
                )

        # Workaround for Facebook URLs messing with filename extensions
        if ("created_time_unix" in inputHeader) or ("post_published_unix"
                                                    in inputHeader):
            isFacebook = True
        else:
            isFacebook = False

        numImages = sum(1 for row in inputCSV)
        inputFile.seek(0)

        if procLimit > numImages or procLimit == 0:
            procLimit = numImages

        print("DATA FILE\n")
        print("\tSource file path\n\t\t" + inputFilePath)
        print("\tNumber of entries\n\t\t" + str(numImages))
        print("\tProcessing limit\n\t\t" + str(procLimit))
        print("\tImage column header\n\t\t" + imagesColumn + "(" +
              str(imagesColumnIdx) + ")")
        print("\n-------------------------")

        # ------------------------------------------
        # Create output files
        # ------------------------------------------

        outputFileName = "annotated_" + inputFileName
        outputFilePath = outputsFolder + outputFileName

        try:
            outputFile = open(outputFilePath, 'w', newline='', encoding='utf8')
        except Exception:
            sys.exit(
                '\n**ERROR**\nOutput file could not be created. Please check the configuration file.\n'
            )

        outputCSV = csv.writer(outputFile, csvDialect)

        # Add columns to input file
        outputHeader = inputHeader + [
            'original_filename', 'image_id', 'file_ext', 'copy_filename',
            'gv_ss_adult', 'gv_ss_spoof', 'gv_ss_medical', 'gv_ss_violence',
            'gv_labels', 'gv_text', 'gv_text_lang', 'gv_web_entities',
            'gv_web_full_matching_images', 'gv_web_partial_matching_images',
            'gv_web_pages_with_matching_images',
            'gv_web_visually_similar_images', 'gv_num_faces', 'gv_face_joy',
            'gv_face_sorrow', 'gv_face_anger', 'gv_face_surprise'
        ]

        outputCSV.writerow(outputHeader)

        if makeNetwork:
            graph = nx.Graph()
            graphfilename = "img-label-net_" + inputFileName.split(
                ".")[0] + ".gexf"
            graphfilepath = outputsFolder + graphfilename
            foundlabels = []

        if makeWebNetwork:
            webgraph = nx.Graph()
            webgraphfilename = "img-webentities-net_" + inputFileName.split(
                ".")[0] + ".gexf"
            webgraphfilepath = outputsFolder + webgraphfilename
            foundwebentities = []

        # ------------------------------------------
        # Process images
        # ------------------------------------------

        next(inputCSV)

        for i in range(procLimit):

            print("\nImage %s of %s" % (i + 1, procLimit))

            inputRow = next(inputCSV)
            outputRow = inputRow

            # Retrieve image path from input
            imagePath = inputRow[imagesColumnIdx]

            # If image is local and path is not absolute, make up absolute path
            if not imagesRemote and not absolutePath:
                imagePath = inputImageFolder + imagePath

            # Clean file name and extension from path if Facebook data
            if isFacebook:
                originalFileName = os.path.basename(
                    re.findall(".+\/(.+?)\?", imagePath)[0])
            else:
                originalFileName = os.path.basename(imagePath)

            extension = os.path.splitext(originalFileName)[1]

            # Create hash for image url
            hashObj = hashlib.sha1(imagePath.encode('utf-8'))
            imageHash = hashObj.hexdigest()
            print("\tImage ID: %s" % (imageHash))

            # Make image copy
            copyFilename = imageHash + extension
            if (saveImageCopy):
                copyFilePath = imageCpFolder + copyFilename
                if not os.path.isfile(copyFilePath):
                    print("\tCopying image...", end="")
                    sys.stdout.flush()
                    try:
                        image = requests.get(imagePath, allow_redirects=True)
                    except Exception:
                        print(
                            '\n\t**ERROR**\n\tCould not retrieve remote image. Check data, internet connection, configuration.\n'
                        )
                        outputCSV.writerow(outputRow)
                        continue
                    open(copyFilePath, "wb").write(image.content)
                    print("done")
                else:
                    print("\tCopy already existed")

            if makeNetwork:
                nodelink = inputRow[linkColumnIdx]
                graph.add_node(imageHash,
                               type='image',
                               label='_image',
                               file=copyFilename,
                               link=nodelink)

            if makeWebNetwork:
                nodelink = inputRow[linkColumnIdx]
                webgraph.add_node(imageHash,
                                  type='image',
                                  label='_image',
                                  file=copyFilename,
                                  link=nodelink)

            # Process image

            responseFile = cacheFolder + imageHash + '.json'
            responseFileCp = cacheCopyFolder + imageHash + '.json'

            if not (os.path.isfile(responseFile)):
                if imagesRemote and forceBase64 and saveImageCopy:
                    # If images are remote but are to be processed through local base64 coding and copies have been made, use copies for network traffic efficiency.
                    apirequest.annotateImage(copyFilePath, isRemote=False)
                    responseData = apirequest.getResponse()
                    if not responseData:
                        outputCSV.writerow(outputRow)
                        continue
                else:
                    apirequest.annotateImage(imagePath,
                                             isRemote=imagesRemote,
                                             base64Encode=forceBase64)
                    responseData = apirequest.getResponse()
                    if not responseData:
                        outputCSV.writerow(outputRow)
                        continue
                with open(responseFile, 'w', encoding='utf8') as outFile:
                    json.dump(responseData, outFile, indent=4, sort_keys=True)
                copyfile(responseFile, responseFileCp)

            else:
                # If there is a json annotation file save in cache, use that instead of processing the image again.
                print(
                    "\t*ATTENTION* Using cached content (remove all files in the cache folder if you see this message and the tool is not working yet)"
                )
                copyfile(responseFile, responseFileCp)
                responseData = json.load(open(responseFile, encoding='utf8'))

            # Parse API response
            try:
                response = responseData['responses'][0]
            except Exception:
                print(
                    '\t*ATTENTION* Vision API returned an error for this image. Check JSON file for details.\n\tMoving on to next image.\n'
                )
                outputCSV.writerow(outputRow)
                continue

            # Safe Search
            if 'safeSearchAnnotation' in response:
                gv_ss_adult = response['safeSearchAnnotation']['adult']
                gv_ss_spoof = response['safeSearchAnnotation']['spoof']
                gv_ss_medical = response['safeSearchAnnotation']['medical']
                gv_ss_violence = response['safeSearchAnnotation']['violence']
            else:
                gv_ss_adult = "NONE"
                gv_ss_spoof = "NONE"
                gv_ss_medical = "NONE"
                gv_ss_violence = "NONE"

            # Labels
            if 'labelAnnotations' in response:
                gv_labels = []
                for label in response['labelAnnotations']:
                    if label['score'] < labelThreshold:
                        continue
                    if label['mid'] == '':
                        label_id = "_" + label['description']
                    else:
                        label_id = label['mid']
                    if makeNetwork:
                        if not label_id in foundlabels:
                            foundlabels.append(label_id)
                            graph.add_node(label_id,
                                           type='gv_label',
                                           label=label['description'],
                                           mid=label['mid'],
                                           description=label['description'])
                        graph.add_edge(imageHash,
                                       label_id,
                                       score=label['score'],
                                       topicality=label['topicality'])
                    if includeScore:
                        gv_labels.append(label['description'] + "(" +
                                         str(label['score']) + ")")
                    else:
                        gv_labels.append(label['description'])
                gv_labels = ",".join(gv_labels)
            else:
                gv_labels = "NONE"

            # Text
            if 'textAnnotations' in response:
                gv_text = response['textAnnotations'][0]['description']
                gv_text = re.sub("[\n\t\r]", " ", gv_text)
                gv_text_lang = response['textAnnotations'][0]['locale']
            else:
                gv_text = "NONE"
                gv_text_lang = "NONE"

            # Web Detection
            if 'webDetection' in response:
                if 'fullMatchingImages' in response['webDetection']:
                    gv_web_full_matching_images = []
                    for url in response['webDetection']['fullMatchingImages']:
                        gv_web_full_matching_images.append(url['url'].replace(
                            ",", "%2C"))
                    gv_web_full_matching_images = ",".join(
                        gv_web_full_matching_images)
                else:
                    gv_web_full_matching_images = "NONE"
                if 'pagesWithMatchingImages' in response['webDetection']:
                    gv_web_pages_with_matching_images = []
                    for url in response['webDetection'][
                            'pagesWithMatchingImages']:
                        gv_web_pages_with_matching_images.append(
                            url['url'].replace(",", "%2C"))
                    gv_web_pages_with_matching_images = ",".join(
                        gv_web_pages_with_matching_images)
                else:
                    gv_web_pages_with_matching_images = "NONE"
                if 'partialMatchingImages' in response['webDetection']:
                    gv_web_partial_matching_images = []
                    for url in response['webDetection'][
                            'partialMatchingImages']:
                        gv_web_partial_matching_images.append(
                            url['url'].replace(",", "%2C"))
                    gv_web_partial_matching_images = ",".join(
                        gv_web_partial_matching_images)
                else:
                    gv_web_partial_matching_images = "NONE"
                if 'visuallySimilarImages' in response['webDetection']:
                    gv_web_visually_similar_images = []
                    for url in response['webDetection'][
                            'visuallySimilarImages']:
                        gv_web_visually_similar_images.append(
                            url['url'].replace(",", "%2C"))
                    gv_web_visually_similar_images = ",".join(
                        gv_web_visually_similar_images)
                else:
                    gv_web_visually_similar_images = "NONE"
                if 'webEntities' in response['webDetection']:
                    gv_web_entities = []
                    for entity in response['webDetection']['webEntities']:
                        if 'entityId' in entity and 'description' in entity and 'score' in entity:
                            entity_id = entity['entityId']
                            description = entity['description']
                            gv_web_entities.append(description + "(" +
                                                   str(entity['score']) + ")")
                            if makeWebNetwork:
                                if not entity_id in foundwebentities:
                                    foundwebentities.append(entity_id)
                                    # This likely adds redundant attributes but may make it more compatible with the label network graph
                                    webgraph.add_node(entity_id,
                                                      type='gv_web_entity',
                                                      label=description,
                                                      mid=entity_id,
                                                      description=description)
                                webgraph.add_edge(imageHash,
                                                  entity_id,
                                                  score=entity['score'],
                                                  topicality=entity['score'])
                        else:
                            description = "NONE"
                    gv_web_entities = ",".join(gv_web_entities)
                else:
                    gv_web_entities = "NONE"
            else:
                gv_web_full_matching_images = "NONE"
                gv_web_pages_with_matching_images = "NONE"
                gv_web_partial_matching_images = "NONE"
                gv_web_visually_similar_images = "NONE"
                gv_web_entities = "NONE"

            # Face
            if 'faceAnnotations' in response:
                gv_num_faces = 0
                gv_face_joy = 'VERY_UNLIKELY'
                gv_face_sorrow = 'VERY_UNLIKELY'
                gv_face_anger = 'VERY_UNLIKELY'
                gv_face_surprise = 'VERY_UNLIKELY'
                for face in response['faceAnnotations']:
                    gv_face_joy = f.likelihoodCompare(gv_face_joy,
                                                      face['joyLikelihood'])
                    gv_face_sorrow = f.likelihoodCompare(
                        gv_face_sorrow, face['sorrowLikelihood'])
                    gv_face_anger = f.likelihoodCompare(
                        gv_face_anger, face['angerLikelihood'])
                    gv_face_surprise = f.likelihoodCompare(
                        gv_face_surprise, face['surpriseLikelihood'])
                    gv_num_faces += 1
            else:
                gv_face_joy = 'NONE'
                gv_face_sorrow = 'NONE'
                gv_face_anger = 'NONE'
                gv_face_surprise = 'NONE'
                gv_num_faces = '0'

            # Add values to output row
            outputRow.append(originalFileName)
            outputRow.append(imageHash)
            outputRow.append(extension)
            outputRow.append(copyFilename)
            outputRow.append(gv_ss_adult)
            outputRow.append(gv_ss_spoof)
            outputRow.append(gv_ss_medical)
            outputRow.append(gv_ss_violence)
            outputRow.append(gv_labels)
            outputRow.append(gv_text)
            outputRow.append(gv_text_lang)
            outputRow.append(gv_web_entities)
            outputRow.append(gv_web_full_matching_images)
            outputRow.append(gv_web_partial_matching_images)
            outputRow.append(gv_web_pages_with_matching_images)
            outputRow.append(gv_web_visually_similar_images)
            outputRow.append(gv_num_faces)
            outputRow.append(gv_face_joy)
            outputRow.append(gv_face_sorrow)
            outputRow.append(gv_face_anger)
            outputRow.append(gv_face_surprise)

            # Write results to output file
            outputCSV.writerow(outputRow)
        if makeNetwork:
            nx.write_gexf(graph, graphfilepath)
        if makeWebNetwork:
            nx.write_gexf(webgraph, webgraphfilepath)
    except KeyboardInterrupt:
        if makeNetwork:
            if graph:
                nx.write_gexf(graph, graphfilepath)
        if makeWebNetwork:
            if webgraph:
                nx.write_gexf(webgraph, webgraphfilepath)
        print("\n\n**Script interrupted by user**\n\n")
    except Exception:
        traceback.print_exc(file=sys.stdout)
    sys.exit(0)
Пример #22
0
def main():

    # load configurations from settings file using configparser()
    settingsfile = '../settings/settings'

    if os.path.exists(settingsfile):
        config = configparser.ConfigParser()
        config._interpolation = configparser.ExtendedInterpolation()
        config.read(settingsfile)
        paths = config['Paths']

        outputdir = paths['output_dir'] + "/"
        emailsentfilecsv = paths['emailsentfile']
        logincredfile = paths['login_cred']
    else:
        print(
            "No settings file in ../settings/settings. Using default values.")
        parent_directory = ".."
        inputdir = parent_directory + 'datainput/'
        outputdir = parent_directory + 'dataoutput/'
        logincredfile = inputdir + 'login.cred'

    if os.path.exists(emailsentfilecsv):
        dfsentemails = pd.read_csv(emailsentfilecsv)
    else:
        dfsentemails = pd.DataFrame({'date': [], 'email': []})

    print("Saving data to: ", emailsentfilecsv)

    # parse command-line arguments using argparse()
    parser = argparse.ArgumentParser()
    parser.add_argument('-v',
                        '--verbose',
                        help='Verbose mode [Default=Off]',
                        action='store_true')
    parser.add_argument('-l',
                        '--login',
                        help='Enter login info manually [Default=Off]',
                        action='store_true')
    parser.add_argument('-n',
                        '--dryrun',
                        help='Dry run (not send emails)',
                        action='store_true')

    args = parser.parse_args()

    verbose = args.verbose
    login = args.login
    dryrun = args.dryrun

    if dryrun:
        print(
            "In dryrun the [emailsentfilecsv] is not created so that multiples runs shows data output."
        )

    # BEGIN MANUAL LOGIN
    # login credentials from file to avoid exposing them in code
    if not login:
        if os.path.exists(logincredfile):
            f = open(logincredfile, "r")
            uname = f.readline().strip()
            pwd = f.readline().strip()
            f.close()
        else:
            print('Login credentials file not found. Enter it manually.')
            uname, pwd = getlogininfo()
    else:
        uname, pwd = getlogininfo()

    # set up the SMTP server
    s = smtplib.SMTP(host="smtp-mail.outlook.com", port=587)
    s.starttls()
    print("Logging in")
    s.login(uname, pwd)

    emailto = ""
    emailmsg = ""
    counter = 0

    # For each contact, send the email:
    for emailfile in glob.glob(outputdir + '*.txt'):

        coursename = re.sub(
            ' ', '',
            re.sub(r'letter-', '',
                   re.sub(r'.txt', '',
                          os.path.basename(emailfile)).strip(" ")))
        emailsubject = "[" + coursename + "] " + "Unattempted assignments"

        if verbose:
            print('+' * 80, "\n")
            print(emailsubject, "\n")

        with open(emailfile, 'r') as f:
            line = f.readline()

            while line:

                if line == '=' * 70 + '\n':
                    createmessage(dfsentemails, s, emailsentfilecsv, counter,
                                  emailto, emailsubject, emailmsg, verbose,
                                  dryrun)
                    # after sending the previous email start a new email message
                    counter = counter + 1
                    emailto = f.readline()
                    emailmsg = ""

                elif line == "EOF":
                    createmessage(dfsentemails, s, emailsentfilecsv, counter,
                                  emailto, emailsubject, emailmsg, verbose,
                                  dryrun)

                    # after sending the previous email start a new email message
                    counter = 0
                    emailto = ""
                    emailmsg = ""

                else:
                    emailmsg = emailmsg + line

                line = f.readline()
                # print(line)

                # if counter > 1:
                #     break

    s.quit()
Пример #23
0
 def parser(cls):
     cfg = configparser.ConfigParser(
         interpolation=configparser.ExtendedInterpolation())
     cfg.optionxform = str
     return cfg
Пример #24
0
 def __init__(self, grid_conf_file, args):
     self.conf = configparser.ConfigParser(
         interpolation=configparser.ExtendedInterpolation())
     self.conf.read_file(open(grid_conf_file, 'r'))
Пример #25
0
    def __init__(self, config_name, test=False):
        self.logger = logging.getLogger('%s.%s' % (__name__, config_name))
        self.logger.setLevel(logging.DEBUG)

        self.error = True
        self.status = 'Backup failed!'
        self.pid_created = False
        script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))

        # Load the global configuration file
        configfile_global = os.path.join(script_dir, 'rsync-backup.conf')
        self.global_config = configparser.ConfigParser(
            interpolation=configparser.ExtendedInterpolation())
        self.global_config.read_file(open(configfile_global))

        # Load the backup configuration file
        configfile_backup = os.path.join(script_dir, 'conf.d',
                                         '%s.conf' % config_name)
        self.config = configparser.ConfigParser(
            interpolation=configparser.ExtendedInterpolation())
        self.config.read_file(open(configfile_backup))

        self.test = test
        current_datetime = datetime.now()
        self.rules = configfile_backup.replace('.conf', '.rules')
        self.timestamp = current_datetime.strftime('%Y-%m-%d-%H%M%S')
        self.backup_root = os.path.join(
            self.global_config.get('general', 'backup_root'),
            self.config.get('general', 'label'))
        self.log_dir = os.path.join(self.backup_root, 'logs')
        self.log_file = os.path.join(self.log_dir, '%s.log' % self.timestamp)
        self.to_addrs = set(
            self.config.get('reporting',
                            'to_addrs',
                            fallback=self.global_config.get(
                                'reporting', 'to_addrs')).split(','))
        self.pidfile = '/var/run/backup/backup-%s.pid' % (self.config.get(
            'general', 'label'))
        self.cache_dir = os.path.join(self.backup_root, 'cache')
        self.backups_dir = os.path.join(self.backup_root, 'backups')
        self.last_verification_file = os.path.join(self.cache_dir,
                                                   'last_verification')
        self.umask = int(
            self.global_config.get('general', 'umask', fallback='0o077'), 8)
        os.umask(self.umask)

        # Configure backup intervals
        self.intervals = {
            'snapshot': {
                'retention':
                self.config.getint('retention',
                                   'snapshot',
                                   fallback=self.global_config.getint(
                                       'retention', 'snapshot')),
            },
            'daily': {
                'retention':
                self.config.getint('retention',
                                   'daily',
                                   fallback=self.global_config.getint(
                                       'retention', 'daily')),
            },
            'monthly': {
                'retention':
                self.config.getint('retention',
                                   'monthly',
                                   fallback=self.global_config.getint(
                                       'retention', 'monthly')),
            },
            'yearly': {
                'retention':
                self.config.getint('retention',
                                   'yearly',
                                   fallback=self.global_config.getint(
                                       'retention', 'yearly')),
            }
        }

        # Check if backup is already running and set up logging
        self._is_running()
        self._create_dirs()
        self._prepare_logging()
Пример #26
0
    def configTest(self):
        # setup the configuration parser
        self.config = configparser.SafeConfigParser(interpolation=configparser.ExtendedInterpolation())

        # Set the config parser to make option names case sensitive.
        self.config.optionxform = str

        # fill ENV section with environemt variables
        self.config.add_section('ENV')
        for k,v in os.environ.items():
            self.config.set('ENV', k, v.replace("$","$$"))

        # log the list of files - note that the argument parser
        # puts the input files in a list, with the first member
        # being the list of input files
        self.log['inifiles'] = self.args.ini_files[0]
        # initialize the list of active sections
        self.actives = []
        # if they specified a list to execute, then use it
        sections = []
        if self.args.section:
            sections = self.args.section.split(",")
            skip = False
        elif self.args.skipsections:
            sections = self.args.skipsections.split(",")
            skip = True
        else:
            sections = None
        # cycle thru the input files
        for testFile in self.log['inifiles']:
            if not os.path.isfile(testFile):
                print("Test description file",testFile,"not found!")
                sys.exit(1)
            self.config.read(self.log['inifiles'])
        for section in self.config.sections():
            if section.startswith("SKIP") or section.startswith("skip"):
                # users often want to temporarily ignore a section
                # of their test definition file, but don't want to
                # remove it lest they forget what it did. So let
                # them just mark the section as "skip" to be ignored
                continue
            # if we are to filter the sections, then do so
            takeus = True
            if sections is not None:
                found = False
                for sec in sections:
                    if sec == section:
                        found = True
                        sections.remove(sec)
                        if skip:
                            takeus = False
                        break
                if not found and not skip:
                    takeus = False
            if takeus:
                self.actives.append(section)
            if self.logger is not None:
                self.logger.verbose_print("SECTION: " + section)
                self.logger.verbose_print(self.config.items(section))
        if sections is not None and 0 != len(sections) and not skip:
            print("ERROR: sections were specified for execution and not found:",sections)
            sys.exit(1)
        return
Пример #27
0
import configparser

conf = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())

# conf['DEFAULT'] = {}
# conf['DEFAULT']['path'] = 'c:/f/d'

# conf['lh'] = {}
# conf['lh']['path'] = '${path}/init.com'

# with open('1.txt', 'a', encoding='utf-8') as f:
#     conf.write(f)

conf.read('1.txt')
print(conf.get('lh','path'))
Пример #28
0
import select
import configparser
import time
import sys

timeout = 1

log = open("log.log", "w")
try:
    os.mkdir("logs-" + sys.argv[1])
except:
    pass
programs = []

programs = configparser.ConfigParser(
    interpolation=configparser.ExtendedInterpolation(), default_section="")
programs.read("entrylist.ini")

stats = {}
test_data = [
    i.split() for i in open(sys.argv[1]).read().split("\n") if i.strip()
]


def pad(s, l):
    return s + (l - len(s)) * " "


def print(*args, print=print, **kwargs):
    s = " ".join(str(i) for i in args)
    log.write(s + kwargs.get("end", "\n"))
Пример #29
0
tmpDir = 'tmp'
headerPart = os.path.join(tmpDir, '~header')
binPart = os.path.join(tmpDir, '~bin')
footerPart = os.path.join(tmpDir, '~footer')

# Command line args
if len(sys.argv) == 1:
    print("Usage: pack.py <config file>")
    print("Example: pack.py configs/letv-x355pro.ini")
    quit()

configFile = sys.argv[1]

# Parse config file
config = configparser.ConfigParser(
    interpolation=configparser.ExtendedInterpolation())
#config = configparser.ConfigParser()
config.read(configFile)

# Main
main = config['Main']
firmwareFileName = main['FirmwareFileName']
projectFolder = main['ProjectFolder']
useHexValuesPrefix = utils.str2bool(main['useHexValuesPrefix'])

SCRIPT_FIRMWARE_FILE_NAME = main['SCRIPT_FIRMWARE_FILE_NAME']
DRAM_BUF_ADDR = main['DRAM_BUF_ADDR']
MAGIC_FOOTER = main['MAGIC_FOOTER']
HEADER_SIZE = utils.sizeInt(main['HEADER_SIZE'])

# Header
Пример #30
0
def main():
    """Main procedure for training the GAN.

    It will run through all classes and train the GANs
    for each class.
    The following command line arguments are supported:
    --traintool: script to call
    --tfrecords: path to the TFrecords
    --gans: path to the GANs folder
    --size: image size (part of folder name)
    --gpus: number of GPUs to use
    --kimg: number of training rounds (in 1000s of images)
    """

    # read config file with defaults
    config = configparser.ConfigParser(
        interpolation=configparser.ExtendedInterpolation())
    config.read('config.ini')
    # first step: parse config-file related arguments
    cfg_parser = argparse.ArgumentParser(description='Train GAN',
                                         add_help=False)
    cfg_parser.add_argument(
        '-e',
        '--env',
        type=str,
        default=configparser.DEFAULTSECT,
        help='base environment to use (in file `config.ini`)')
    # parse config file related args
    cfg_args = cfg_parser.parse_known_args()[0]
    # add defaults from environment
    defaults = dict(config.items(cfg_args.env))
    # add other argument parser arguments
    parser = argparse.ArgumentParser(parents=[cfg_parser])
    parser.set_defaults(**defaults)
    parser.add_argument('--traintool', type=str, help='script to call')
    parser.add_argument('--tfrecords', type=str, help='path to the TFrecords')
    parser.add_argument('--gans', type=str, help='path to the GANs folder')
    parser.add_argument('--size',
                        type=str,
                        help='image size (part of folder name)')
    parser.add_argument('--gpus', type=int, help='number of GPUs to use')
    parser.add_argument('--kimg',
                        type=int,
                        help='number of training rounds (in 1000s of images)')
    # parse command line arguments
    args = parser.parse_args()
    # does the source folder exist?
    train_path = f"{args.tfrecords}_{args.size}/"
    if not os.path.isdir(train_path):
        print(f"Error: source folder `{train_path}` does not exist!",
              file=sys.stderr)
        os._exit(1)
    # run through classes
    for cls in range(5):
        # source path for this class
        source_path = train_path + "/" + str(cls)
        if not os.path.isdir(source_path):
            print(f"Error: Class folder `{source_path}` not found",
                  file=sys.stderr)
            os._exit(2)
        # start existing script
        print(f"Training GAN for class {cls}...")
        subprocess.run([
            sys.executable, args.traintool, f"--num-gpus={args.gpus}",
            f"--data-dir={train_path}", f"--dataset={cls}",
            f"--total-kimg={args.kimg}",
            f"--result-dir={args.gans}_{args.size}", "--config=config-f",
            "--mirror-augment=true"
        ])