Beispiel #1
0
def wmts_dict_merge(target, *args):
    # Merge multiple dicts
    # Recursively merge dicts and set non-dict values
    obj = args[0]['layers']
    conf = args[1]['layers']

    if not isinstance(obj, dict):
        return obj
    for k, v in obj.items():
        if k in conf and isinstance(conf[k], dict):
            foundSourceMisMatch = False
            if 'projections' in v and 'projections' in conf[k]:
                conf_projections = conf[k]['projections']
                for projectionKey, projection in v['projections'].items():
                    source = projection['source']
                    if projectionKey in conf_projections:
                        if 'source' in conf_projections[projectionKey]:
                            if source != conf_projections[projectionKey][
                                    'source']:
                                foundSourceMisMatch = True
                                continue
            if foundSourceMisMatch:
                continue
            if k in target and isinstance(target[k], dict):
                dict_merge(target[k], v, conf[k])
            else:
                target[k] = dict_merge(v, conf[k])
    return target
Beispiel #2
0
    def updateconf(self):
        log.info("updating config")

        self.collect_enabled_features()
        self.process_rules()

        self.kconf_enable  = self.config.get_kconf_enable()
        self.kconf_module  = self.config.get_kconf_module()
        self.kconf_setting = self.config.get_kconf_setting()

        self.allsym = self.config.get_kconf_all()

        # process enabled feature list
        for fname, fobj in self.enabled_features.iteritems():
            self.kconf_enable  |= fobj.get_kconf_enable()
            self.kconf_module  |= fobj.get_kconf_module()
            dict_merge(self.kconf_setting, fobj.get_kconf_setting())

        for x in self.kconf_enable.intersection(self.kconf_module):
            log.info("module forced builtin: %s: " % x)
            self.kconf_module.remove(x)

        self.kconf_unset = (self.allsym - self.kconf_enable
                                        - self.kconf_module
                                        - dict_keys(self.kconf_setting))

        self.processed = True
Beispiel #3
0
 def set_metadata(self, metadata):
     metadata = util.build_recursive(metadata)
     if 'Metadata' in metadata:
         metadata = metadata['Metadata']
     self['Metadata'] = util.dict_merge(self.get('Metadata', {}),
                                        metadata)
     self.dirty_children()
Beispiel #4
0
def get_dataloader(config, device):
    """ Get train / valid dataloader. """
    # read data
    ratings_df = pd.read_csv(config['ratings'])

    # sizes
    user_size = int(ratings_df.userId.max() + 1)
    item_size = int(ratings_df.itemId.max() + 1)

    # split
    (users,
     items,
     ratings) = (torch.from_numpy(ratings_df.userId.values).long().to(device),
                 torch.from_numpy(ratings_df.itemId.values).long().to(device),
                 torch.from_numpy(ratings_df.rating.values).float().to(device))
    dataset = TensorDataset(users, items, ratings)
    valid_size = int(len(dataset) * config['args']['valid'])
    (trainset,
     validset) = random_split(dataset, [len(dataset) - valid_size, valid_size])
    batch_size = config['args']['batch_size']
    (trainloader,
     validloader) = (DataLoader(trainset, batch_size=batch_size),
                     DataLoader(validset, batch_size=batch_size))

    # update config
    config['args'] = dict_merge(config['args'], {
        'user_size': user_size,
        'item_size': item_size,
    })

    return trainloader, validloader
Beispiel #5
0
def run():  # pylint: disable=too-many-locals
    """ Run! """
    args = parse_args()

    # load config
    default_config = yaml.safe_load(open(args.default_config))
    config = yaml.safe_load(open(args.config))
    print('config loaded:', config)
    config = dict_merge(default_config, config)

    # init
    init_seed(config['args']['seed'])
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # load data
    preprocess_src = 'preprocess.{}'.format(Path(config['preprocess']).stem)
    preprocess_module = importlib.import_module(preprocess_src).get_dataloader
    (trainloader, validloader) = preprocess_module(config, device)

    # init trainer
    print('config:', config)
    trainer_src = 'trainer.{}'.format(Path(config['trainer']).stem)
    trainer_class = importlib.import_module(trainer_src).Trainer
    trainer = trainer_class(config, device)

    # train!
    model_dir = Path(config['model_dir'])
    if not model_dir.is_dir():
        model_dir.mkdir()
    trainer.train(trainloader, validloader, config['args']['epoch_size'],
                  model_dir)
Beispiel #6
0
def process_requests(wv_product_dict):
    futures = []
    with ThreadPoolExecutor() as executor:
        for wv_id, val in wv_product_dict.items():
            c_id = val['conceptId']
            futures.append(executor.submit(get_cmr_data, wv_id, c_id))
            futures.append(executor.submit(get_cmr_umm_data, wv_id, c_id))
    for f in futures:
        try:
            f.result()
        except Exception as e:
            print("%s: ERROR: %s" % (prog, e))

    collection_data = dict_merge(cmr_data, cmr_umm_data)
    no_data_count = 0
    for value in collection_data.values():
        if len(value.keys()) < 1:
            # TODO eventually should probably delete empty dict entries.
            # For now, may be useful to see which products are not returning metadata
            no_data_count += 1

    success_count = len(collection_data) - no_data_count
    print("%s: %s collections returned metadata." % (prog, success_count))
    print("%s: %s collections did not reutrn any metadata." %
          (prog, no_data_count))
    return collection_data
Beispiel #7
0
def get_cmr_data(concept_id_dict):
  concept_id = concept_id_dict["value"]
  if cmr_data.get(concept_id) is None:
    response = requests.get(cmr_collection_url + concept_id)
    entry = response.json().get('feed', {}).get('entry')
    if len(entry) == 1:
      cmr_data[concept_id] = process_entries(entry[0], cmr_keys_map)
    elif len(entry) > 1:
      print("%s: WARNING: multiple entries found for %s ", (prog, concept_id))
  concept_id_dict = dict_merge(concept_id_dict, cmr_data[concept_id])
Beispiel #8
0
    async def get_summoner_names_by_ids(self, summoner_ids):
        """Get summoner names by their ids

        Keyword arguments:
        summoner_ids -- list of summoner ids to query
        """
        results = []
        for subset in util.grouper(summoner_ids, 40):
            url = self.base_summoner_url + ','.join(str(summoner_id) for summoner_id in subset if summoner_id) + '/name'
            results.append(await self.get(url))

        return util.dict_merge(results)
Beispiel #9
0
    async def get_summoners_info_by_names(self, summoner_names):
        """Get info about summoners by summoner names

        Keyword arguments:
        summoner_names -- list of summoner names to query
        """
        results = []
        for subset in util.grouper(summoner_names, 40):
            url = self.base_summoner_url + 'by-name/' + ','.join(name for name in subset if name)
            results.append(await self.get(url))

        return util.dict_merge(results)
Beispiel #10
0
    def get_summoners_info_by_names(self, summoner_names):
        """Get info about summoners by summoner names

        Keyword arguments:
        summoner_names -- list of summoner names to query
        """

        results = []
        for subset in util.grouper(summoner_names, 40):
            url = self.base_url + "by-name/" + ",".join(name for name in subset if name)
            results.append(LeagueRequest.get(url))

        return util.dict_merge(results)
Beispiel #11
0
    def get_summoner_names_by_ids(self, summoner_ids):
        """Get summoner names by their ids

        Keyword arguments:
        summoner_ids -- list of summoner ids to query
        """

        results = []
        for subset in util.grouper(summoner_ids, 40):
            url = self.base_url + ",".join(str(summoner_id) for summoner_id in subset if summoner_id) + "/name"
            results.append(LeagueRequest.get(url))

        return util.dict_merge(results)
Beispiel #12
0
 def set_metadata(self, metadata):
     metadata = util.build_recursive(metadata)
     if 'Metadata' in metadata:
         metadata = metadata['Metadata']
     self['Metadata'] = util.dict_merge(self.get('Metadata', {}), metadata)
     self.dirty_children()
Beispiel #13
0
def load(file, sections=[], **instargs):
    """Create a sMAP instance based on the representation stored in a file.

The configuration file contains sections which refer to either
reporting instances, or paths in the sMAP heirarchy.  Any section
whose name starts with ``/`` is treated as a resource name; sections
starting with ``report`` are treated as reports.

The file must contain at least one section named ``/``, which must
contain a ``uuid`` key to set the root identifier for the source.

:param string file: filename of the configuration file
:param instargs: arguments passed to the :py:class:`~smap.core.SmapInstance` constructor.
:return smap.core.SmapInstance: the created instancev
:raise smap.loader.SmapLoadError: an error is encountered processing the file
:raise smap.core.SmapError: some other error is encountered validating the loaded object 
    """
    found = None
    for l in ['', os.getcwd(), sys.prefix]:
      path = os.path.join(l, file)
      if os.path.isfile(path):
        found = path
    if not found:
      raise Exception("Config file %s not found." % file)
    print "Loading config file:", found

    conf = configobj.ConfigObj(found, indent_type='  ')

    # if there's a server section, override the default server
    # configuration with that
    if 'server' in conf:
        smapconf.SERVER = util.dict_merge(smapconf.SERVER, 
                                          dict(((k.lower(), v) for (k, v) in 
                                                conf['server'].iteritems())))
    if 'logging' in conf:
        smapconf.LOGGING = util.dict_merge(smapconf.LOGGING, 
                                           dict(((k.lower(), v) for (k, v) in 
                                                 conf['logging'].iteritems())))

    # we need the root to have a uuid
    inst = core.SmapInstance(conf['/']['uuid'], **instargs)
    inst.loading = True
    reports = []

    for s in conf:
        print "Loading section", s
        if s.startswith('report'):
            resource = conf[s].get('ReportResource', '/+')
            format = conf[s].get('Format', 'json')
            max_age = conf[s].get('MaxAge', None)
            max_age = int(max_age) if max_age != None else None

            dest = [conf[s]['ReportDeliveryLocation']]
            for i in xrange(0, 10):
                if 'ReportDeliveryLocation%i' % i in conf[s]:
                    dest.append(conf[s]['ReportDeliveryLocation%i' % i])

            reportinst = {
                'ReportDeliveryLocation' : dest,
                'ReportResource' : resource,
                'Format': format,
                'uuid' : inst.uuid(s),
                'MaxAge' : max_age,
                }
            for o in ['MinPeriod', 'MaxPeriod']:
                if o in conf[s]:
                    reportinst[o] = conf[s][o]
            for o in ['ClientCertificateFile', 'ClientPrivateKeyFile', 'CAFile']:
                if o in conf[s]:
                    reportinst[i] = os.path.expanduser(conf[s][o])

            reports.append(reportinst)
            continue
                      
        elif not s.startswith('/'):
            # path sections must start with a '/'
            # other sections might be present and could be parsed by
            # other parts of the program
            print "Warning: skipping section", s, "since it does not begin with a '/'"
            continue
        elif len(sections) and not util.norm_path(s) in sections: 
            # skip all but the listed sections if we were asked to
            continue

        s = util.norm_path(s)

        # build the UUID for the item
        props = util.build_recursive(dict(conf[s].items()))
        id = None
        if 'uuid' in conf[s]:
            key = None
            id = uuid.UUID(conf[s]['uuid'])
        elif 'key' in conf[s]:
            key = conf[s]['key']
        else:
            # default to the path if 
            key = s
        if key:
            id = inst.uuid(key)
            # raise SmapLoadError("Every config file section must have a uuid or a key!")

        # create the timeseries or collection
        if (s == '/' or 
            conf[s].get("type", None) == 'Collection' or 
            inst.get_collection(s) != None):
            if s == '/':
                c = inst.get_collection('/')
            elif inst.get_collection(s) != None:
                # sometimes you will have collections created twice,
                # for instance if a driver creates it and then we want
                # to tag it with metadata
                c = inst.get_collection(s)
            else:
                c = core.Collection(s, inst)
                inst.add_collection(s, c)
        elif conf[s].get("type", "Timeseries") == "Timeseries":
            if inst.get_timeseries(s) != None:
                c = inst.get_timeseries(s)
            else:   
                try:
                    props['Properties']['UnitofMeasure']
                except KeyError:
                    raise SmapLoadError("A Timeseries must have at least "
                                        "the Properites/UnitofMeasure key")
                
                # the Timeseries uses defaults if the conf file doesn't
                # contain the right sections.
                c = core.Timeseries(id, props['Properties']['UnitofMeasure'],
                                    data_type=props['Properties'].get('ReadingType', 
                                                                      core.Timeseries.DEFAULTS['Properties/ReadingType']),
                                    timezone=props['Properties'].get('Timezone', 
                                                                     core.Timeseries.DEFAULTS['Properties/Timezone']),
                                    buffersz=int(props.get('BufferSize', core.Timeseries.DEFAULTS['BufferSize'])))
                inst.add_timeseries(s, c)
        else:
            if not id:
                raise SmapLoadError("A driver must have a key or uuid to generate a namespace")
            
            # load a new driver manager layer
            newdrv = driver.SmapDriver.get_driver(inst, conf[s]['type'], s, id)
            # create a collection and add it at the attachment point
            c = inst.get_collection(s)
            if not c:
                c = core.Collection(s, inst)
                inst.add_collection(s, c)
            
            # Add config file specified checkers for the driver
            check = checkers.get(inst, newdrv, conf[s])
            if check:
                inst.checkers.append(check)

            # get the driver to add its points
            newdrv.setup(conf[s])

        # Metadata and Description are shared between both Collections
        # and Timeseries
        if props.has_key('Metadata'):
            # the driver may have added metadata; however config file
            # metadata overrides it
            c['Metadata'] = util.dict_merge(c.get('Metadata', {}),
                                            props['Metadata'])
        if props.has_key('Description'):
            c['Description'] = props['Description']
        if key:
            setattr(c, 'key', key)

    # since the sections could come in any order, update the reporting
    # instance to make sure all the topics are set right.
    for reportinst in reports:
        if not inst.reports.update_report(reportinst):
            inst.reports.add_report(reportinst)
    inst.reports.update_subscriptions()
    inst.loading = False
    return inst
Beispiel #14
0
new_conf["layers"] = {}
new_conf["sources"] = {}

with open(output_file) as fp:
    output_data = json.load(fp)
file_count = 0
for file in os.listdir(input_dir):
    try:
        if not file.endswith(".json"):
            continue
        file_count += 1
        with open(os.path.join(input_dir, file)) as fp:
            data = json.load(fp)
        new_conf['layers'] = wmts_dict_merge(new_conf['layers'], data,
                                             output_data)
        new_conf["sources"] = dict_merge(new_conf["sources"], data['sources'],
                                         output_data['sources'])
    except Exception as e:
        sys.stderr.write("ERROR: %s: %s\n" %
                         (os.path.join(input_dir, file), str(e)))
        sys.exit(1)
new_conf = dict_merge(new_conf, output_data)
json_options = {}
json_options["indent"] = 2
json_options["separators"] = (',', ': ')

with open(output_file, "w") as fp:
    json.dump(new_conf, fp, **json_options)

print("%s: %s file(s) merged into %s" %
      (prog, file_count, os.path.basename(output_file)))
Beispiel #15
0
 def get_recursive_dict(self, name):
     lst = dict_from(self.cf, name)
     for i in self.imports:
         dict_merge(lst, self.imports[i].get_recursive_dict(name))
     return lst
Beispiel #16
0
    experiments = {}
    custom_config = {
        "callbacks": {
            "on_episode_end": tune.function(on_episode_end)
        }
    }

    if args.dgx:
        custom_config["num_workers"] = os.cpu_count() - 4
        custom_config["num_gpus"] = 16
        custom_config["num_envs_per_worker"] = 1

    if args.ppo:
        config = ppo.DEFAULT_CONFIG.copy()
        dict_merge(config, custom_config)
        dict_merge(config, alg_config.ppo["v1"])
        experiments["ppo"] = {
            "run": "PPO",
            "env": DeepLogisticsA10M20x20D4,
            "stop": {
                "episode_reward_mean": 500
            },
            "config": config
        }

    if args.a3c:
        config = a3c.DEFAULT_CONFIG.copy()
        dict_merge(config, custom_config)

        if args.dgx:
Beispiel #17
0
input_dir = args[0]
output_file = args[1]

conf = {}
file_count = 0
for root, dirs, files in os.walk(input_dir):
    dirs.sort()
    for file in sorted(files):
        try:
            if not file.endswith(".json"):
                continue
            file_count += 1
            with open(os.path.join(root, file)) as fp:
                data = json.load(fp)
            dict_merge(conf, data)
        except Exception as e:
            sys.stderr.write("ERROR: %s: %s\n" %
                             (os.path.join(root, file), str(e)))
            sys.exit(1)

json_options = {}
json_options["indent"] = 2
json_options["separators"] = (',', ': ')

with open(output_file, "w") as fp:
    json.dump(conf, fp, **json_options)

print("%s: %s file(s) merged into %s" %
      (prog, file_count, os.path.basename(output_file)))
Beispiel #18
0
def load(file, sections=[], **instargs):
    """Create a sMAP instance based on the representation stored in a file.

The configuration file contains sections which refer to either
reporting instances, or paths in the sMAP heirarchy.  Any section
whose name starts with ``/`` is treated as a resource name; sections
starting with ``report`` are treated as reports.

The file must contain at least one section named ``/``, which must
contain a ``uuid`` key to set the root identifier for the source.

:param string file: filename of the configuration file
:param instargs: arguments passed to the :py:class:`~smap.core.SmapInstance` constructor.
:return smap.core.SmapInstance: the created instancev
:raise smap.loader.SmapLoadError: an error is encountered processing the file
:raise smap.core.SmapError: some other error is encountered validating the loaded object 
    """
    found = None
    for l in ['', os.getcwd(), sys.prefix]:
        path = os.path.join(l, file)
        if os.path.isfile(path):
            found = path
    if not found:
        raise Exception("Config file %s not found." % file)
    print "Loading config file:", found

    conf = configobj.ConfigObj(found, indent_type='  ')

    # if there's a server section, override the default server
    # configuration with that
    if 'server' in conf:
        smapconf.SERVER = util.dict_merge(
            smapconf.SERVER,
            dict(((k.lower(), v) for (k, v) in conf['server'].iteritems())))

    # we need the root to have a uuid
    inst = core.SmapInstance(conf['/']['uuid'], **instargs)
    inst.loading = True
    reports = []

    for s in conf:
        print "Loading section", s
        if s.startswith('report'):
            resource = conf[s].get('ReportResource', '/+')
            format = conf[s].get('Format', 'json')
            max_age = conf[s].get('MaxAge', None)
            max_age = int(max_age) if max_age != None else None

            dest = [conf[s]['ReportDeliveryLocation']]
            for i in xrange(0, 10):
                if 'ReportDeliveryLocation%i' % i in conf[s]:
                    dest.append(conf[s]['ReportDeliveryLocation%i' % i])

            reportinst = {
                'ReportDeliveryLocation': dest,
                'ReportResource': resource,
                'Format': format,
                'uuid': inst.uuid(s),
                'MaxAge': max_age,
            }
            for o in ['MinPeriod', 'MaxPeriod']:
                if o in conf[s]:
                    reportinst[o] = conf[s][o]
            for o in [
                    'ClientCertificateFile', 'ClientPrivateKeyFile', 'CAFile'
            ]:
                if o in conf[s]:
                    reportinst[i] = os.path.expanduser(conf[s][o])

            reports.append(reportinst)
            continue

        elif not s.startswith('/'):
            # path sections must start with a '/'
            # other sections might be present and could be parsed by
            # other parts of the program
            print "Warning: skipping section", s, "since it does not begin with a '/'"
            continue
        elif len(sections) and not util.norm_path(s) in sections:
            # skip all but the listed sections if we were asked to
            continue

        s = util.norm_path(s)

        # build the UUID for the item
        props = util.build_recursive(dict(conf[s].items()))
        id = None
        if 'uuid' in conf[s]:
            key = None
            id = uuid.UUID(conf[s]['uuid'])
        elif 'key' in conf[s]:
            key = conf[s]['key']
        else:
            # default to the path if
            key = s
        if key:
            id = inst.uuid(key)
            # raise SmapLoadError("Every config file section must have a uuid or a key!")

        # create the timeseries or collection
        if (s == '/' or conf[s].get("type", None) == 'Collection'
                or inst.get_collection(s) != None):
            if s == '/':
                c = inst.get_collection('/')
            elif inst.get_collection(s) != None:
                # sometimes you will have collections created twice,
                # for instance if a driver creates it and then we want
                # to tag it with metadata
                c = inst.get_collection(s)
            else:
                c = core.Collection(s, inst)
                inst.add_collection(s, c)
        elif conf[s].get("type", "Timeseries") == "Timeseries":
            if inst.get_timeseries(s) != None:
                c = inst.get_timeseries(s)
            else:
                try:
                    props['Properties']['UnitofMeasure']
                except KeyError:
                    raise SmapLoadError("A Timeseries must have at least "
                                        "the Properites/UnitofMeasure key")

                # the Timeseries uses defaults if the conf file doesn't
                # contain the right sections.
                c = core.Timeseries(
                    id,
                    props['Properties']['UnitofMeasure'],
                    data_type=props['Properties'].get(
                        'ReadingType',
                        core.Timeseries.DEFAULTS['Properties/ReadingType']),
                    timezone=props['Properties'].get(
                        'Timezone',
                        core.Timeseries.DEFAULTS['Properties/Timezone']),
                    buffersz=int(
                        props.get('BufferSize',
                                  core.Timeseries.DEFAULTS['BufferSize'])))
                inst.add_timeseries(s, c)
        else:
            if not id:
                raise SmapLoadError(
                    "A driver must have a key or uuid to generate a namespace")

            # load a new driver manager layer
            newdrv = driver.SmapDriver.get_driver(inst, conf[s]['type'], s, id)
            # create a collection and add it at the attachment point
            c = inst.get_collection(s)
            if not c:
                c = core.Collection(s, inst)
                inst.add_collection(s, c)

            # Add config file specified checkers for the driver
            check = checkers.get(inst, newdrv, conf[s])
            if check:
                inst.checkers.append(check)

            # get the driver to add its points
            newdrv.setup(conf[s])

        # Metadata and Description are shared between both Collections
        # and Timeseries
        if props.has_key('Metadata'):
            # the driver may have added metadata; however config file
            # metadata overrides it
            c['Metadata'] = util.dict_merge(c.get('Metadata', {}),
                                            props['Metadata'])
        if props.has_key('Description'):
            c['Description'] = props['Description']
        if key:
            setattr(c, 'key', key)

    # since the sections could come in any order, update the reporting
    # instance to make sure all the topics are set right.
    for reportinst in reports:
        if not inst.reports.update_report(reportinst):
            inst.reports.add_report(reportinst)
    inst.reports.update_subscriptions()
    inst.loading = False
    return inst