Example #1
0
 def __getitem__(self, attr):
     # if using star notation -> return dict of all keys that match
     if "*" in attr:
         return search(self, attr, separator=DEFAULT_STAR_SEPARATOR)
     # otherwise -> basic dict.get
     else:
         return dict.get(self, attr)
Example #2
0
async def show_keys(
        client: influx.QueryClient,
        database: Optional[str] = None,
        measurement: Optional[str] = None,
        **_  # allow, but discard all other kwargs
) -> dict:
    """Selects available keys (without data) from Influx."""
    query = 'SHOW FIELD KEYS'

    if measurement:
        query += ' FROM "{measurement}"'

    params = _prune(locals(), ['query', 'database', 'measurement'])
    query_response = await client.query(**params)

    response = dict()

    for path, meas_name in dpath.search(query_response,
                                        'results/*/series/*/name',
                                        yielded=True,
                                        dirs=False):

        # results/[index]/series/[index]/values/*/0
        values_glob = '/'.join(path.split('/')[:-1] + ['values', '*', '0'])
        response[meas_name] = dpath.values(query_response, values_glob)

    return response
Example #3
0
    def marketise_all_tickers_for_stock_cash(cls,
                                             start_date: datetime,
                                             end_date: datetime.date,
                                             overwrite: bool = False) -> None:
        """
        This function marketise all tickers for cash stocks
        :parm start_date: the date from which we marketise
        :parm end_date: the date will which we marketise (inclusive)
        :parm overwrite: a flag that determines if we overwrite existing data or not
        """
        mkt_cfg = mkt_classes.mkt_data_cfg()
        xpath = '{0}/{1}'.format("equity", "single stock").upper()
        search = dpath.search(mkt_cfg, xpath, yielded=True)
        equity_tickers_to_marketise = [
            i for i in search
        ].pop()[1]  # let us dump all the tickers and their metadata

        for ticker, metadata in equity_tickers_to_marketise.items():
            cls.marketise_equity_index_ticker(ticker,
                                              metadata['default_source'],
                                              start_date,
                                              end_date,
                                              overwrite=overwrite)

        print('finished the marketisiation process for {}'.format(xpath))
Example #4
0
def _set_if_absent(d, path, value):
    if '*' in path:
        [pre, post] = path.split('*')
        elem_count = len(du.values(d, f'{pre}*'))
        for i in range(elem_count):
            _set_if_absent(d, f'{pre}{i}{post}', value)
    elif du.search(d, path) == {}:
        du.new(d, path, value())
Example #5
0
    def get(self, path, default=None, separator='/'):
        for found_path, value in dpath_util.search(self.source.obj,
                                                   path,
                                                   yielded=True,
                                                   separator=separator):
            if path == '{}{}'.format(separator, found_path):
                return value

        return default
Example #6
0
 def __setitem__(self, attr, val):
     # if using star notation -> search and set all keys matching
     if "*" in attr:
         for k, _ in search(self,
                            attr,
                            yielded=True,
                            separator=DEFAULT_STAR_SEPARATOR):
             setattr(self, k, val)
     # otherwise -> just __setitem__
     else:
         dict.__setitem__(self, attr, val)
def save_snp_500_tickers(tickers: list) -> None:
    """update the YAML market coordinates config with the SNP500 tickers"""
    mkt_class = "equity".upper()
    mkt_type = "single stock".upper()
    market_coordinates = mkt_classes.mkt_data_cfg()
    # lets load the defaults and then see if there is tsdb yaml to overwrite base defaults
    defaults = mkt_coord_defaults.defaults.copy()
    mkt_default_cfg_load = mkt_classes.mkt_defaults_cfg()
    dp.merge(defaults, mkt_default_cfg_load)

    equity_defaults = [
        i for i in dp.search(
            defaults, '{0}/{1}'.format(mkt_class, mkt_type), yielded=True)
    ].pop()[1]

    for ticker in tickers:
        mkt_asset = ticker
        points_default = [
            i for i in dp.search(market_coordinates,
                                 '{0}/{1}/{2}/points'.format(
                                     mkt_class, mkt_type, mkt_asset),
                                 yielded=True)
        ]
        points_default = points_default.pop()[1] if len(points_default) else []
        points = list(set(points_default))
        value = {'points': points}
        value.update(equity_defaults)

        xpath = '{0}/{1}/{2}'.format(mkt_class, mkt_type, mkt_asset)
        dp.new(market_coordinates, xpath, value)

    mkt_data_cfg = {
        'market_coordinates': market_coordinates,
        'defaults': defaults
    }

    with open(mkt_classes.tsdb_path() + 'market_coord_cfg.YAML', "w") as f:
        yaml.dump(mkt_data_cfg, f)

    "added snp500 tickers to the config"
Example #8
0
 def to_csv(self, tag, filename):
     with open(filename, 'w') as f:
         f.write('ct, tag')
         for vname in du.get(self.memory, '/memory/var_names'):
             f.write(f', {vname}')
         f.write('\n')
         data = du.search(self.memory, f'/data/*/{tag}/*')['data']
         for ct in data.keys():
             f.write(f'{ct}, {tag}')
             for vname in du.get(self.memory, '/memory/var_names'):
                 vvalue = data[ct][tag].get(vname, 'n.a')
                 f.write(f', {vvalue}')
             f.write('\n')
     return
Example #9
0
 def __getitem__(self, attr):
     # if using minus and star notation: split attribute -> search -> filter
     if ("|" in attr) and ("*" in attr):
         assert attr.count("|") == 1, f"Only one filter allowed: {attr}"
         search_attr, filter_out = attr.split("|")
         # recursive call __getitem__ without filtering substring
         searched = self.__getitem__(search_attr)
         # filter
         return {k: v for k, v in searched.items() if filter_out not in k}
     # if using star notation -> return dict of all keys that match
     if "*" in attr:
         return search(self, attr, separator=DEFAULT_STAR_SEPARATOR)
     # otherwise -> basic dict.get
     else:
         return dict.get(self, attr)
Example #10
0
 def __setitem__(self, attr, val):
     # if using minus and star notation: split attribute -> search -> filter
     if ("|" in attr) and ("*" in attr):
         assert attr.count("|") == 1, f"Only one filter allowed: {attr}"
         search_attr, filter_out = attr.split("|")
         attr = search_attr
     else:
         # if not, filter is long random string
         filter_out = "".join(
             random.choice(string.ascii_lowercase) for _ in range(30))
     # if using star notation -> search and set all keys matching
     if "*" in attr:
         for k, _ in search(self,
                            attr,
                            yielded=True,
                            separator=DEFAULT_STAR_SEPARATOR):
             if filter_out not in k:
                 setattr(self, k, val)
     # otherwise -> just __setitem__
     else:
         dict.__setitem__(self, attr, val)
Example #11
0
    def get(self, path, default=None, separator='/'):
        for found_path, value in dpath_util.search(self.source.obj, path, yielded=True, separator=separator):
            if path == '{}{}'.format(separator, found_path):
                return value

        return default
Example #12
0
File: netsim.py Project: gagon/dino
def DoGetAll(dictionary, path):
    vals = list(dpu.search(dictionary, path, yielded=True))
    return vals
def dump_results_to_db(device_name, lldp_infos) -> None:
    nodes_list: List[Tuple[Dict[str, str], Dict[str, str]]] = []
    links_list: List[Tuple[Dict[str, str], Dict[str, str]]] = []

    # Each item of the lists are composed by the "query" (so the DB knows which entry to update
    # And the actual data
    dev_name = device_name.lower()
    query = {"device_name": dev_name}
    # We add the device if it doesn't exist
    nodes_list.append((query, query))

    for lldp_nei in lldp_infos:
        # Getting neigh node infos and adding it to nodes_list
        _, neigh_name = next(
            search(lldp_nei,
                   f"{NEEDED_MIBS['lldp_neigh_name']}*",
                   yielded=True))
        if not neigh_name:
            continue
        neigh_name = neigh_name.lower()

        # IP is a lil' special since it is written in the oid (yeah weird)
        neigh_ip_oid, _ = next(
            search(lldp_nei, f"{NEEDED_MIBS['lldp_neigh_ip']}*", yielded=True))
        neigh_ip = ".".join(neigh_ip_oid.split(".")[-4:])
        query_neigh = {"device_name": neigh_name}
        nodes_list.append((query_neigh, {
            "device_name": neigh_name,
            "device_ip": neigh_ip
        }))

        # Getting neigh and local ifaces infos and adding them to link list
        _, local_iface = next(
            search(lldp_nei,
                   f"{NEEDED_MIBS['lldp_local_iface']}*",
                   yielded=True))
        _, neigh_iface = next(
            search(lldp_nei,
                   f"{NEEDED_MIBS['lldp_neigh_iface']}*",
                   yielded=True))
        # Stripping "Et, Ethernet, E,... " which can be different per equipment
        dev_iface = "/".join(
            "".join(x)
            for is_number, x in groupby(local_iface, key=str.isdigit)
            if is_number is True)
        neigh_iface = "/".join(
            "".join(x)
            for is_number, x in groupby(neigh_iface, key=str.isdigit)
            if is_number is True)

        query_link = {
            "device_name": dev_name,
            "iface_name": dev_iface,
            "neighbor_name": neigh_name,
            "neighbor_iface": neigh_iface,
        }

        links_list.append((query_link, query_link))

        query_neigh_link = {
            "device_name": neigh_name,
            "iface_name": neigh_iface,
            "neighbor_name": dev_name,
            "neighbor_iface": dev_iface,
        }
        links_list.append((query_neigh_link, query_neigh_link))

    try:
        bulk_update_collection(NODES_COLLECTION, nodes_list)
        bulk_update_collection(LINKS_COLLECTION, links_list)
    except InvalidOperation:
        print(
            "Nothing to dump to db (wasn't able to scrap devices?), passing..")
Example #14
0
def dump_results_to_db(device_name, ifaces_infos) -> None:  # pylint: disable=too-many-locals
    utilization_list: List[Tuple[Dict[str, str], Dict[str, str]]] = []
    stats_list: List[Dict[str, str]] = []
    for iface in ifaces_infos:
        _, ifname = next(
            search(iface, f"{NEEDED_MIBS['iface_name']}*", yielded=True))
        ifname = ifname.lower()
        if (ifname.startswith("se") or ifname.startswith("nu")
                or ifname.startswith("lo") or ifname.startswith("mgm")
                or ifname.startswith("mana") or ifname.startswith("po")
                or ifname == "vlan1"):
            # TODO: Mgmt ifaces/lo & po could actually be interesting... Need to think about this
            continue
        _, mtu = next(search(iface, f"{NEEDED_MIBS['mtu']}*", yielded=True))
        _, mac = next(search(iface, f"{NEEDED_MIBS['mac']}*", yielded=True))
        _, speed = next(search(iface, f"{NEEDED_MIBS['speed']}*",
                               yielded=True))
        _, in_disc = next(
            search(iface, f"{NEEDED_MIBS['in_disc']}*", yielded=True))
        _, in_err = next(
            search(iface, f"{NEEDED_MIBS['in_err']}*", yielded=True))
        _, out_disc = next(
            search(iface, f"{NEEDED_MIBS['out_disc']}*", yielded=True))
        _, out_err = next(
            search(iface, f"{NEEDED_MIBS['out_err']}*", yielded=True))
        _, in_octets = next(
            search(iface, f"{NEEDED_MIBS['in_octets']}*", yielded=True))
        _, in_ucast_pkts = next(
            search(iface, f"{NEEDED_MIBS['in_ucast_pkts']}*", yielded=True))
        _, in_mcast_pkts = next(
            search(iface, f"{NEEDED_MIBS['in_mcast_pkts']}*", yielded=True))
        _, in_bcast_pkts = next(
            search(iface, f"{NEEDED_MIBS['in_bcast_pkts']}*", yielded=True))
        _, out_octets = next(
            search(iface, f"{NEEDED_MIBS['out_octets']}*", yielded=True))
        _, out_ucast_pkts = next(
            search(iface, f"{NEEDED_MIBS['out_ucast_pkts']}*", yielded=True))
        _, out_mcast_pkts = next(
            search(iface, f"{NEEDED_MIBS['out_mcast_pkts']}*", yielded=True))
        _, out_bcast_pkts = next(
            search(iface, f"{NEEDED_MIBS['out_bcast_pkts']}*", yielded=True))

        iface_infos_dict = {
            "mtu": mtu,
            "mac": hexlify(mac.encode()).decode(),
            "speed": speed,
            "in_discards": in_disc,
            "in_errors": in_err,
            "out_discards": out_disc,
            "out_errors": out_err,
            "in_bytes": in_octets,
            "in_ucast_pkts": in_ucast_pkts,
            "in_mcast_pkts": in_mcast_pkts,
            "in_bcast_pkts": in_bcast_pkts,
            "out_bytes": out_octets,
            "out_ucast_pkts": out_ucast_pkts,
            "out_mcast_pkts": out_mcast_pkts,
            "out_bcast_pkts": out_bcast_pkts,
        }

        iface_name = "/".join(
            "".join(x) for is_number, x in groupby(ifname, key=str.isdigit)
            if is_number is True)
        iface_stats_dict = {
            "device_name": device_name,
            "iface_name": iface_name,
            "timestamp": int(time()),
        }
        iface_stats_dict.update(iface_infos_dict)
        stats_list.append(iface_stats_dict)
        # Each item of the lists are composed are the "query" (so the DB knows which entry to update
        # And the actual data
        query = {"device_name": device_name, "iface_name": iface_name}
        highest = int(iface_infos_dict["in_bytes"])
        lowest = int(iface_infos_dict["out_bytes"])
        if lowest > highest:
            highest = lowest
        previous_utilization = get_latest_utilization(device_name, iface_name)
        utilization = {
            "device_name": device_name,
            "iface_name": iface_name,
            "prev_utilization": previous_utilization,
            "last_utilization": highest,
        }
        utilization_list.append((query, utilization))

    try:
        bulk_update_collection(UTILIZATION_COLLECTION, utilization_list)
        add_iface_stats(stats_list)
    except InvalidOperation:
        print(
            "Nothing to dump to db (wasn't able to scrap devices?), passing..")
Example #15
0
def modify_values_yaml(experiment_folder: str, script_location: str, script_parameters: Tuple[str, ...],
                       experiment_name: str, pack_type: str, username: str,
                       cluster_registry_port: int, pack_params: List[Tuple[str, str]] = None,
                       env_variables: List[str] = None):
    log.debug("Modify values.yaml - start")
    pack_params = pack_params if pack_params else []

    values_yaml_filename = os.path.join(experiment_folder, f"charts/{pack_type}/values.yaml")
    values_yaml_temp_filename = os.path.join(experiment_folder, f"charts/{pack_type}/values_temp.yaml")

    with open(values_yaml_filename, "r") as values_yaml_file:

        template = jinja2.Template(values_yaml_file.read())

        rendered_values = template.render(NAUTA = {
            'ExperimentName': experiment_name,
            'CommandLine': common.prepare_script_paramaters(script_parameters, script_location),
            'RegistryPort': str(cluster_registry_port),
            'ExperimentImage': f'127.0.0.1:{cluster_registry_port}/{username}/{experiment_name}:latest',
            'ImageRepository': f'127.0.0.1:{cluster_registry_port}/{username}/{experiment_name}:latest'
        })

        v = yaml.safe_load(rendered_values)

        workersCount = None
        pServersCount = None

        regex = re.compile(r"^\[.*|^\{.*")  # Regex used for detecting dicts/arrays in pack params
        for key, value in pack_params:
            if re.match(regex, value):
                try:
                    value = ast.literal_eval(value)
                except Exception as e:
                    raise AttributeError(Texts.CANT_PARSE_VALUE.format(value=value, error=e))
            # Handle boolean params
            elif value in {"true", "false"}:
                value = str(_parse_yaml_boolean(value))
            if key == WORK_CNT_PARAM:
                workersCount = value
            if key == P_SERV_CNT_PARAM:
                pServersCount = value

            dutil.new(v, key, value, '.')

        # setting sum of replicas involved in multinode training if both pServersCount and workersCount are present in
        # the pack or given in the cli
        if (WORK_CNT_PARAM in v or workersCount) and (P_SERV_CNT_PARAM in v or pServersCount):
            number_of_replicas = int(v.get(WORK_CNT_PARAM)) if not workersCount else int(workersCount)
            number_of_replicas += int(v.get(P_SERV_CNT_PARAM)) if not pServersCount else int(pServersCount)
            v[POD_COUNT_PARAM] = number_of_replicas
        elif (WORK_CNT_PARAM in v or workersCount) and (POD_COUNT_PARAM not in v):
            number_of_replicas = int(v.get(WORK_CNT_PARAM)) if not workersCount else int(workersCount)
            v[POD_COUNT_PARAM] = number_of_replicas + 1

        env_variables = env_variables if env_variables else []
        parsed_envs = []
        for variable in env_variables:
            key, value = variable.split("=")
            one_env_map = {"name": key, "value": value}
            parsed_envs.append(one_env_map)

        # Set OMP_NUM_THREADS to be equal to cpu limit if it was not explicitly passed
        if "OMP_NUM_THREADS" not in (env["name"] for env in parsed_envs):
            try:
                cpu_limit = calculate_omp_num_threads(v)
                if cpu_limit:
                    parsed_envs.append({"name": "OMP_NUM_THREADS", "value": str(cpu_limit)})
            except (ValueError, TypeError, KeyError):
                log.exception("Failed to infer OMP_NUM_THREADS value.")

        envs_to_set = {'env', 'worker.env', 'master.env'}  # Env placeholders in values.yaml that we expect
        for env in envs_to_set:
            if dutil.search(v, env, separator='.'):
                dutil.get(v, env, separator='.').extend(parsed_envs)

    with open(values_yaml_temp_filename, "w") as values_yaml_file:
        yaml.safe_dump(v, values_yaml_file)

    shutil.move(values_yaml_temp_filename, values_yaml_filename)
    log.debug("Modify values.yaml - end")