Example #1
0
    def __setup_log_streaming(cls):
        log_streaming_config = None
        account = None
        error = None
        try:
            # Load keystore
            keystore_file = get(Program.__yaml_config[Program.__env],
                                "/keystore_file")
            with open(keystore_file) as k:
                keystore = load(k)

            account = Web3.toChecksumAddress('0x' + keystore['address'])

            # Get log streaming config (if any)
            log_streaming_config = get(Program.__yaml_config[Program.__env],
                                       "/logging/streaming")
        except KeyError:
            error = InvalidKeyStoreException("Invalid keystore file")

        except Exception as unknown_error:
            error = unknown_error
        finally:
            # Initialize the log streaming module (should be done once)
            import log_streaming
            log_streaming.initialize(account, log_streaming_config)
            if error:
                raise error
Example #2
0
def update_region(keycloak: KeycloakClient, vault: Vault, region_id, body,
                  user):
    region = model.Region.query.get(region_id)
    if not region:
        return problem(404, 'Not Found', f'Region {region_id} does not exist')

    if not keycloak.user_check_role(user, ADMIN_ROLE):
        if not keycloak.user_check_group(user, region.owner_group):
            raise Forbidden("You don't have write access to this region.")

    try:
        if body.get('users_group'):
            keycloak.group_get(body['users_group'])
    except KeycloakGetError as e:
        logger.exception(e)
        return problem(
            400, 'Users group does not exist',
            f'Users group {body["users_group"]} does not exist in Keycloak, '
            'you have to create group first or use existing group.')

    if 'quota' in body:
        if body['quota']:
            if region.quota is None:
                region.quota = model.Quota(**body['quota'])
            else:
                for k, v in body['quota'].items():
                    setattr(region.quota, k, v)
        else:
            region.quota = None
        del body['quota']

    openstack_credentials = dpath.get(body,
                                      'openstack/credentials',
                                      default=region.openstack_credentials)
    if not isinstance(openstack_credentials, str):
        vault.write(region.openstack_credentials, openstack_credentials)
        dpath.delete(body, 'openstack/credentials')

    satellite_credentials = dpath.get(body,
                                      'satellite/credentials',
                                      default=region.satellite_credentials)
    if not isinstance(satellite_credentials, str):
        vault.write(region.satellite_credentials, satellite_credentials)
        dpath.delete(body, 'satellite/credentials')

    dns_server_key = dpath.get(body,
                               'dns_server/key',
                               default=region.dns_server_key)
    if not isinstance(dns_server_key, str):
        vault.write(region.dns_server_key, dns_server_key)
        dpath.delete(body, 'dns_server/key')

    region.update_from_dict(body)

    db.session.commit()
    logger.info(
        f'Region {region.name} (id {region.id}) updated by user {user}')

    return region.to_dict() | {'_href': _region_href(region)}
Example #3
0
def namespace_dir(settings):
    """Combine `output_path` & `namespace` to form the output dir."""
    namespace = os.path.join(dpath.get(settings, '/output_path'),
                             dpath.get(settings, '/namespace'))
    # make the namespace dir, if necessary
    os.makedirs(namespace, exist_ok=True)

    return namespace
Example #4
0
    def test_get_single_task(self):
        response = self.tasks.add("single task", "project1", "label1", "today")
        self.assertTrue(self.util.verify_structure(response))

        index = util.get(response, "tasks/0/index")
        response = self.task.get_task(index)

        self.assertTrue(self.util.verify_structure(response))
        self.assertEqual(util.get(response, "tasks/0/text"), "single task")
Example #5
0
def lido(record,target,attribut,path):
    try:
        if attribut not in target:
            if "@id" not in target:
                target[attribut]=get(record,path)
            else:
                target[attribut]="hcn-"+str(get(record,path))
    except:
        pass
Example #6
0
    def test_count_all(self):
        self.tasks.add("task1", "project1", "label1", "today")
        self.tasks.add("task2", "project1", "label1", "today")
        self.tasks.add("task3", "project1", "label1", "today")
        self.tasks.add("task4", "project1", "label1", "today")

        response = self.tasks.count_all()
        self.assertEqual(util.get(response, "snapshot/summary/count"), 4)
        snapshot_list = util.get(response, "snapshot/list")
        self.assertEqual(len(snapshot_list), 1)
Example #7
0
 def to_csv(self, tag, filename):
     with open(filename, 'w') as f:
         f.write('ct, tag')
         for vname in du.get(self.memory, '/memory/var_names'):
             f.write(f', {vname}')
         f.write('\n')
         data = du.search(self.memory, f'/data/*/{tag}/*')['data']
         for ct in data.keys():
             f.write(f'{ct}, {tag}')
             for vname in du.get(self.memory, '/memory/var_names'):
                 vvalue = data[ct][tag].get(vname, 'n.a')
                 f.write(f', {vvalue}')
             f.write('\n')
     return
Example #8
0
    def test_delete_task(self):
        self.tasks.add("task1", "project1", "label1", "today")
        self.tasks.add("task2", "project1", "label1", "today")
        response = self.tasks.add("task3", "project1", "label1", "today")

        uuid = util.get(response, "tasks/0/unique_id")
        response = self.task.delete_task(uuid)
        self.assertTrue(self.util.verify_structure(response))
        is_deleted = util.get(response, "tasks/0/deleted")
        self.assertTrue(is_deleted)

        response = self.task.undelete_task(uuid)
        self.assertTrue(self.util.verify_structure(response))
        is_deleted = util.get(response, "tasks/0/deleted")
        self.assertFalse(is_deleted)
def transform(args):
    data = args[0]
    config = args[1]

    result = config
    if isinstance(config, dict):
        result = {k: transform((data, v)) for k, v in config.iteritems()}
    elif isinstance(config, Transformation):
        args = [transform((data, v)) for v in config.args]
        kwargs = {k: transform((data, v)) for k, v in config.kwargs.iteritems()}
        result = config.transform(*args, **kwargs)
    elif isinstance(config, Map):
        result = config.get_result(transform((data, config.source)))
    elif isinstance(config, ApplyConf):
        result = config.get_result(transform((data, config.source)))
    elif isinstance(config, Path):
        try:
            if config == "" or config == "/":
                result = data
            else:
                result = util.get(data, config)
        except:
            result = None

    if isinstance(result, list):
        result = [x for x in result if x is not None]
        if len(result) > 0:
            return result
    elif isinstance(result, dict):
        result = {k: v for k, v in result.iteritems() if v is not None}
        if len(result) > 0:
            return result
    else:
        return result
Example #10
0
    def test_count_by_due_date(self):
        self.tasks.add("task1", "project1", "label1", "2021-07-11")
        self.tasks.add("task2", "project1", "label1", "2021-07-15")
        self.tasks.add("task3", "project1", "label1", "2021-07-21")
        self.tasks.add("task4", "project1", "label1", "2021-07-13")

        response = self.tasks.count_by_due_date("2021-07-11")
        self.assertEqual(util.get(response, "snapshot/summary/count"), 1)
        snapshot_list = util.get(response, "snapshot/list")
        self.assertEqual(len(snapshot_list), 1)

        response = self.tasks.count_by_due_date_range("2021-07-10",
                                                      "2021-07-14")
        self.assertEqual(util.get(response, "snapshot/summary/count"), 2)
        snapshot_list = util.get(response, "snapshot/list")
        self.assertEqual(len(snapshot_list), 2)
Example #11
0
    def _dereference(self, urlstring):
        """
    Dereference the URL string.

    Returns the parsed URL, the object path extraced from the URL, and the
    dereferenced object.
    """
        # Parse URL
        parsed_url = _url.absurl(urlstring, self.parsed_url)

        # In order to start dereferencing anything in the referenced URL, we have
        # to read and parse it, of course.
        parsed_url, referenced = self._fetch_url(parsed_url)
        obj_path = parsed_url.fragment.split('/')
        while len(obj_path) and not obj_path[0]:
            obj_path = obj_path[1:]

        # In this inner parser's specification, we can now look for the referenced
        # object.
        value = referenced
        if len(obj_path) != 0:
            from dpath import util as dutil
            try:
                value = dutil.get(referenced, obj_path)
            except KeyError:
                raise _url.ResolutionError('Cannot resolve reference "%s"!' %
                                           (urlstring, ))
        return parsed_url, obj_path, value
Example #12
0
 def get(self, path=''):
     if path == '':
         d = self.data
     else:
         # throws KeyError
         d = dpath.get(self.data, path)
     return d
Example #13
0
    def assertLenEqual(self, glob, obj, exp):
        ag = attrgetter(glob)
        length = util.get(exp, glob, separator='.')['len']

        self.assertEqual(len(ag(obj)), length)

        return length
Example #14
0
    def test_edit_task(self):
        response = self.tasks.add("task1", "project1", "label1", "today")
        self.assertTrue(self.util.verify_structure(response))

        index = util.get(response, "tasks/0/index")
        response = self.tasks.edit(index, "task1_1", "project1_1", "label1_1",
                                   "today")
        self.assertTrue(self.util.verify_structure(response))

        text = util.get(response, "tasks/0/text")
        project = util.get(response, "tasks/0/project")
        label = util.get(response, "tasks/0/label")

        self.assertTrue(text == "task1_1")
        self.assertTrue(project == "project1_1")
        self.assertTrue(label == "label1_1")
Example #15
0
def get_str_by_path(payload: Dict, path: str) -> str:
    """Return the string value from the dict for the path using dpath library."""
    try:
        raw = dpath_util.get(payload, path)
        return str(raw)
    except (IndexError, KeyError, TypeError):
        return None
Example #16
0
def auth(service, user, password):
    """Authenticates the given user and password.

    Authenticates the given USER and password.
    If successful the API key for the account is printed
    """
    result = service.auth_user(user, password).get('data')
    click.echo(dpath.get(result,'meta/api-token'))
Example #17
0
def resolve_symbols(settings, block):
    """
    Resolve any reference symbols in a block name.
    """
    symbol_map = dpath.get(settings, '/symbol_map', default={})

    for brace in re.findall(r'{.*?}', block):
        block = block.replace(brace, symbol_map[brace[1:-1]])
    return block
def status(service, script):
    """Displays current status information for a script.

    Display status information a given SCRIPT. If the script is in an error
    condition the error details are displayed.
    """
    script = util.lookup_resource_id(service.get_cloud_scripts, script)
    data = service.get_cloud_script(script).get('data')
    click.echo('Status: ' + dpath.get(data, "attributes/state"))
    try:
        error = dpath.get(data, "attributes/error")
    except KeyError:
        error = None
    # Pull out the details if they're there
    if isinstance(error, dict):
        error = error.get('details')
    if error:
        click.echo('Error: ' + error)
 def _dict_from_object(self, keys, obj, auction_index):
     to_patch = {}
     for to_key, from_key in keys.items():
         try:
             value = util.get(obj, from_key.format(auction_index))
         except KeyError:
             continue
         util.new(to_patch, to_key, value)
     return to_patch
def user(service):
    """Lists users for the organization.

    Lists the users that are part of the authorized organization.
    """
    org = service.get_org().get('data')
    ## TODO: Once include=user is supported fix up to display 'name'
    users = dpath.get(org, 'relationships/user/data')
    _tabulate_users(users)
Example #21
0
 def get_session(self, country: str) -> str:
     res = post_response(country=country, **self.details)
     try:
         session: str = dpath.get(res, '/Location')
         time.sleep(0.1)
         session = session.rstrip('/')
         return f'{session}?apikey={KEY}'
     except BaseException:  # noqa
         print('R:', res)
         raise
def show(service, script, file):
    """Gets a script file from a given cloud-script.

    Fetches a FILE from a given cloud-SCRIPT.
    """
    script = util.lookup_resource_id(service.get_cloud_scripts, script)
    json = service.get_cloud_script(script).get('data')
    file_urls = [f.encode('utf-8') for f in dpath.get(json, 'meta/scripts')]
    names = dict(zip(util.extract_script_filenames(file_urls), file_urls))
    file_url = names[file]
    click.echo(requests.get(file_url).text)
Example #23
0
def calculate_omp_num_threads(values_dict: dict) -> int:
    """
    Calculates correct value of OMP_NUM_THREADS according to CPU resources requested in template's values.yaml.
    :param values_dict: Dictionary containing template's values,yaml file
    :return: Calculated OMP_NUM_THREADS value
    :raises ValueError, TypeError, KeyError
    """
    if values_dict.get("cpu") and values_dict.get("cpu") != "null":
        cpu_limit = values_dict.get("cpu")
    elif values_dict.get("resources"):
        cpu_limit = dutil.get(values_dict, "resources.limits.cpu", separator='.')
    elif values_dict.get("worker_resources"):
        cpu_limit = dutil.get(values_dict, "worker_resources.limits.cpu", separator='.')
    else:
        raise ValueError('Unable to find requested CPUs count.')

    # We need to handle cases when CPU is provided either as absolute value, or in millicpu format.
    # Convert_k8s_cpu_resource returns cpu request in millicpus, so we divide it by 1000 to get absolute
    # value of cpus, and we make sure that there will be at least one thread.
    return int(max(convert_k8s_cpu_resource(cpu_limit) // 1000, 1))
Example #24
0
    def test_group_by_label(self):
        self.tasks.add("task1", "project1", "label1", "today")
        self.tasks.add("task2", "project1", "label2", "today")
        self.tasks.add("task3", "project1", "label3", "today")
        self.tasks.add("task4", "project1", "label1", "today")
        self.tasks.add("task5", "project1", "label2", "today")
        self.tasks.add("task6", "project1", "label3", "today")

        response = self.tasks.group_by_label()
        self.assertTrue(self.util.verify_structure(response))
        self.assertTrue(self.util.count_tasks(response) == 6)

        task1 = self.util.get_by_index(response, 0)
        self.assertTrue(util.get(task1, "label") == "label1")

        task2 = self.util.get_by_index(response, 2)
        self.assertTrue(util.get(task2, "label") == "label2")

        task3 = self.util.get_by_index(response, 4)
        self.assertTrue(util.get(task3, "label") == "label3")
Example #25
0
def process_stuff(l, record):
        data=json.loads(record)
        target={}
        #1:1
        target["@context"]="http://schema.org"
        target["@type"]='http://schema.org/CreativeWork'
        
        for k,v in schema.items():
            if isinstance(v,dict):
                target[k]={}
                for c,w in v.items():
                    lido(data,target[k],c,w)
            elif isinstance(v,list):
                target[k]=[]
                for elem  in v:
                    temp={}
                    for c,w in elem.items():
                        lido(data,temp,c,w)
                    target[k].append(temp)
            elif isinstance(v,str):
                lido(data,target,k,v)
        #generate @id
        if "genre" in target:
            target["genre"]["@type"]="Text"
        _id=baseuri+str(target["identifier"].rsplit('-')[-1])
        target["@id"]=_id
        #bnodes 1:n
        target['mentions']=[]
        try:
            for i in get(data,"lido:descriptiveMetadata/lido:objectRelationWrap/lido:subjectWrap/lido:subjectSet/lido:subject/lido:subjectConcept"):
                tag={}
                tag['sameAs']=get(i,"lido:conceptID/_")
                tag['name']=get(i,"lido:term")
                target['mentions'].append(tag)
        except:
            pass
        target=checkids(target)
        lock.acquire()
        sys.stdout.write(json.dumps(target)+"\n"),
        sys.stdout.flush()
        lock.release()
Example #26
0
    def __setup_log_streaming(cls):
        # Load keystore
        keystore_file = get(Program.__yaml_config[Program.__env],
                            "/keystore_file")
        with open(keystore_file) as k:
            keystore = load(k)

        # Get account
        account = Web3.toChecksumAddress('0x' + keystore['address'])

        # Get log streaming config (if any)
        log_streaming_config = None
        try:
            log_streaming_config = get(Program.__yaml_config[Program.__env],
                                       "/logging/streaming")
        except KeyError:
            pass

        # Initialize the log streaming module (should be done once)
        import log_streaming
        log_streaming.initialize(account, log_streaming_config)
def dotkey(obj: dict, path: str, default=None, separator='.'):
    """
    :param obj: dict like {'some': {'value': 3}}
    :param path: 'some.value'
    :param separator: '.' | '/'
    :param default: default for KeyError
    :return: value or default value
    """
    try:
        return get(obj, path, separator=separator)
    except KeyError:
        return default
Example #28
0
def lookup_resource_id(list, id_rep, name_path=None, mac=False, **kwargs):
    if hasattr(list, '__call__'):
        list = list().get('data')
    _is_uuid = not mac and is_uuid(id_rep)
    id_rep_lower = id_rep.lower()
    id_rep_len = len(id_rep)
    name_path = name_path or "attributes/name"
    matches = []
    for entry in list:
        entry_id = entry.get('id')
        if _is_uuid:
            if entry_id == id_rep:
                return entry_id
        elif mac:
            try:
                entry_mac = dpath.get(entry, 'meta/mac')
                if entry_mac[-id_rep_len:].lower() == id_rep_lower:
                    matches.append(entry_id.encode('utf8'))
            except KeyError:
                pass

        else:
            short_id = shorten_id(entry_id)
            if short_id == id_rep:
                matches.append(entry_id.encode('utf8'))
            else:
                try:
                    entry_name = dpath.get(entry, name_path)
                    if entry_name[:id_rep_len].lower() == id_rep_lower:
                        matches.append(entry_id.encode('utf8'))
                except KeyError:
                    pass
    if len(matches) == 0:
        raise KeyError('Id: ' + id_rep.encode('utf8') + ' does not exist')
    elif len(matches) > 1:
        short_matches = [shorten_id(id) for id in matches]
        match_list = ' (' + ', '.join(short_matches) + ')'
        raise KeyError('Ambiguous id: ' + id_rep.encode('utf8') + match_list)

    return matches[0]
Example #29
0
 def retrieve(
     self,
     query,
     default: Any = None,
     document: Optional['DpathMixin'] = None,
 ) -> Optional[Any]:
     '''Get value from settings with key.'''
     if not document:
         document = self
     try:
         return dpath.get(document, query, DpathMixin.separator)
     except KeyError:
         return default
Example #30
0
 def lookup(self, o, path, **kwargs):
     try:
         if hasattr(path, '__call__'):
             result = path(o)
         else:
             result = dpath.get(o, path)
     except KeyError:
         return ""
     json_opts = kwargs.pop('json', None)
     if isinstance(result, dict) and json_opts:
         result = json.dumps(result, **json_opts)
     else:
         result = result
     return result
Example #31
0
def dotkey(obj: dict, path: str, default=None, separator='.'):
    """
    Provides an interface to traverse nested dict values by dot-separated paths. Wrapper for ``dpath.util.get``.

    :param obj: dict like ``{'some': {'value': 3}}``
    :param path: ``'some.value'``
    :param separator: ``'.'`` or ``'/'`` or whatever
    :param default: default for KeyError
    :return: dict value or default value
    """
    try:
        return get(obj, path, separator=separator)
    except KeyError:
        return default
Example #32
0
 def getval(self, path):  # Attempts to retrieve value. Returns default value if non-existent.
     try:
         log.debug("Retrieving '%s' from save data.", path)
         val = dpath.get(self.data, path)
         if self.alog:
             log.debug("Value is " + str(val))
         try:  # Is this a number?
             val = float(val)
         except ValueError:
             pass  # Not a number.
         return val
     except KeyError:
         log.debug("Data at '%s' does not exist.", path)
         log.debug("Using default.")
         try:
             val = dpath.get(self.default_data, path)
             try:  # Can I be numerical?
                 val = float(val)
             except ValueError:
                 pass  # I can not.
             return val
         except KeyError:
             log.exception("Default not defined! Somebody f****d up.")
             return None  # This is should yield interesting results.
Example #33
0
def process_templates(templates_path, output_path, context, render_exceptions, pretty_urls):
    with tempfile.TemporaryDirectory() as tmp_dir:
        template_lookup = TemplateLookup(directories=[templates_path],
                                         module_directory=tmp_dir,
                                         input_encoding='utf-8',
                                         output_encoding='utf-8',
                                         encoding_errors='replace')

        # Extract repeat from context
        repeat = context['repeat']

        # For each .mako template
        for template_path in templates_path.rglob('*.mako'):
            # Remove base directory name
            template_path = Path(*template_path.parts[1:])

            # Check for ignored files: <PAGE NAME>.template.mako and <PAGE NAME>.part.mako
            ignored_extensions = ['.template', '.part']
            if len(template_path.suffixes) >= 2 and template_path.suffixes[-2] in ignored_extensions:
                # Skip file
                continue

            # Check if template is repeated page
            repeat_extension = ".repeat"
            if len(template_path.suffixes) >= 2 and template_path.suffixes[-2] == repeat_extension:
                # Get repeat config
                repeat_basename = template_path.stem.split('.')[0]
                repeat_data_path = repeat.get(repeat_basename, False)
                if not repeat_data_path:
                    logging.error('Repeat config for "%s" not found', repeat_basename)
                    sys.exit(1)

                # Fetch collection from context
                repeat_collection = dpath.get(context['data'], repeat_data_path)

                # Render repeat
                for page in repeat_collection:
                    output_name = page['slug']
                    repeat_output_path = output_path / repeat_basename
                    repeat_context = context
                    repeat_context['page'] = page
                    render_template(template_lookup, template_path, repeat_output_path,
                                    output_name, repeat_context, render_exceptions, pretty_urls)

            else:
                # Render page
                output_name = template_path.stem
                render_template(template_lookup, template_path, output_path, output_name, context, render_exceptions, pretty_urls)
Example #34
0
    def test_filter_by_status(self):
        response = self.tasks.add("task1", "project1", "label1", "today")
        uuid = util.get(response, "tasks/0/unique_id")

        response = self.tasks.filter_by_status("incomplete")
        self.assertTrue(self.util.verify_structure(response))
        self.assertTrue(self.util.count_tasks(response) == 1)

        response = self.task.complete_task(uuid)
        self.assertTrue(self.util.verify_structure(response))
        response = self.tasks.filter_by_status("complete")
        self.assertTrue(self.util.count_tasks(response) == 1)

        response = self.task.incomplete_task(uuid)
        self.assertTrue(self.util.verify_structure(response))
        self.assertTrue(self.util.count_tasks(response) == 1)
Example #35
0
def config_value(cfg, path, default=None, accept_none=True):
    """
    Extracts a configuration entry from a given configuration dictionary.
    """
    try:
        value = get(cfg, path)
    except KeyError as key_error:
        if default is not None:
            return default

        if accept_none:
            return None

        raise key_error

    return value
    def __init__(self, source, key, window_size):
        l = []
        for e in source:
            key_value = util.get(e, key)
            l.append((key_value, e))
        ordered_list = sorted(l)
        buckets = {}

        for i in range(0, len(ordered_list) - 1):
            buckets[i] = []
            for j in range(0, window_size):
                if len(ordered_list) > i + j:
                    buckets[i].append(ordered_list[i + j][1])

        for k, v in buckets.iteritems():
            buckets[k] = combinations(v, 2)

        self._buckets = buckets.itervalues()
Example #37
0
def _get_joke() -> str:
    """Get a joke randomly from one of several api choices"""
    api_list = db.get_others('joke-api/__list__')
    # randomly choose a single api
    api_key = random.choices(
        [api['key'] for api in api_list],
        weights=[api['weight'] for api in api_list],  # weights
        k=1  # return only one answer
    )[0]
    logging.info(f"Using joke api \"{api_key}\"")

    api = db.get_others('joke-api/' + api_key)

    response = requests.get(api['url'], headers=api.get('headers'))
    if api['result']['type'] == "json":
        return get(response.json(), api['result']['path'])
    elif api['result']['type'] == "text":
        return response.text
Example #38
0
def determine_linecol(data,
                      paths,
                      max_steps=5) -> Tuple[Optional[int], Optional[int], int]:
    """Determine linecol from the CommentedMap for the `paths` location.

    CommentedMap from `ruamel.yaml` has `.lc` property from which we can read
    `.line` and `.col`. This is available in the collections type,
    i.e. list and dictionaries.

    But this may fail on non-collection types. For example, if the `paths` is
    ['stages', 'metrics'], metrics being a boolean type does not have `lc`
    prop.
    ```
    stages:
      metrics: true
    ```

    To provide some context to the user, we step up to the
    path ['stages'], which being a collection type, will have `lc` prop
    with which we can find line and col.

    This may end up being not accurate, so we try to show the same amount of
    lines of code for `n` number of steps taken upwards. In a worst case,
    it may be just 1 step (as non-collection item cannot have child items),
    but `schema validator` may provide us arbitrary path. So, this caps the
    number of steps upward to just 5. If it does not find any linecols, it'll
    abort.
    """
    from dpath.util import get

    step = 1
    line, col = None, None
    while paths and step < max_steps:
        value = get(data, paths, default=None)
        if value is not None:
            with suppress(AttributeError, TypeError):
                line = value.lc.line + 1
                col = value.lc.col + 1
                break
        step += 1
        *paths, _ = paths

    return line, col, step
Example #39
0
async def run_query(client: influx.QueryClient, query: str, params: dict):
    query_response = await client.query(query=query, **params)

    try:
        # Only support single-measurement queries
        response = dpath.get(query_response, 'results/0/series/0')
        # Workaround for https://github.com/influxdata/influxdb/issues/7332
        # The continuous query that fills the downsampled database inserts "key" as "m_key"
        prefix = params.get('prefix')
        if prefix:
            response['columns'] = [
                re.sub(prefix, '', v, count=1)
                for v in response.get('columns', [])
            ]
    except KeyError:
        # Nothing found
        response = dict()

    response['database'] = params.get('database') or influx.DEFAULT_DATABASE
    response['policy'] = params.get('policy') or influx.DEFAULT_POLICY
    return response
Example #40
0
def extract_from_tuple(data, path):
    """
    Args:
        data (tuple): tuple of records
        path (Path): attribute to extract

    Returns:
        tuple: one attribute for record

    Example:
        data: ({'a':2, 'b':3}, {'a':1, 'b':2})
        path: 'a'
        returns: (2, 1)
    """
    result = []

    for x in data:
        try:
            result.append(util.get(x, path))
        except:
            result.append(None)

    return tuple(result)
def test_now_playing_nothing_is_playing(vcr, bt, key, expected):
    with vcr.use_cassette("nothing_is_playing"):
        assert get(bt.now_playing, key) == expected
    def __init__(self, source, attributes, no_none=False, debug=False, filter_size=0, string_as_list=False):
        buckets = {}
        blocking_keys = {}

        count = 0

        for e in source:

            count += 1

            if debug and count % 1000 == 0:
                logger.info('tick ' + str(count))

            keys = [[]]
            for a in attributes:
                v = None
                try:
                    v = util.get(e, a)
                except:
                    pass
                if string_as_list and isinstance(v, basestring):
                    v = v.split(' ')
                if isinstance(v, list):
                    new_keys = []
                    for x in keys:
                        for y in v:
                            new_keys.append(x + [y])
                    keys = new_keys
                else:
                    for x in keys:
                        x.append(v)
            for tmp in keys:
                if no_none and None in tmp:
                    continue
                h = hash(tuple(tmp))
                if h not in blocking_keys:
                    blocking_keys[h] = tmp
                if h in buckets:
                    buckets[h].append(e)
                else:
                    buckets[h] = [e]

        if debug:
            for k in sorted(buckets, key=lambda k: len(buckets[k]), reverse=True):
                if len(buckets[k]) == 1:
                    break
                logger.info(str(blocking_keys[k]) + ' appears ' + str(len(buckets[k])) + ' times')

        count = 0

        new_buckets = {}

        for k, v in buckets.iteritems():
            size = (len(v) * (len(v) - 1)) / 2
            if filter_size == 0 or size <= filter_size:
                count += size
                new_buckets[k] = combinations(v, 2)

        if debug:
            logger.info("# candidate pairs: " + str(count))

        self._buckets = new_buckets.itervalues()
Example #43
0
 def _map_card(json):
     key = unicode(dpath.get(json, 'meta/card/id'))
     return card_type(key, key)
Example #44
0
 def _map_sensor_count(json):
     return len(dpath.get(json, 'relationships/sensor/data'))
def test_bass_capabilities(vcr, bt, key, expected):
    with vcr.use_cassette("bass_capabilities"):
        assert get(bt.bass_capabilities, key) == expected
def test_sources(vcr, bt, key, expected):
    with vcr.use_cassette("sources"):
        assert len(bt.sources) == 2
        assert get(bt.sources[0], key) == expected
def test_info(vcr, bt, key, expected):
    with vcr.use_cassette("info"):
        assert get(bt.info, key) == expected
def test_presets(vcr, bt, key, expected):
    with vcr.use_cassette("presets"):
        assert len(bt.presets) == 6
        p = bt.presets[0]
        assert get(p, key) == expected
 def _map_user_count(json):
     return len(dpath.get(json, 'relationships/user/data'))
def test_volume_read(vcr, bt, key, expected):
    with vcr.use_cassette("vol_read"):
        assert get(bt.volume, key) == expected
Example #51
0
def map_script_filenames(json):
    files = dpath.get(json, 'meta/scripts')
    return ', '.join(extract_script_filenames(files))
def test_bass_read(vcr, bt, key, expected):
    with vcr.use_cassette("bass_read"):
        assert get(bt.bass, key) == expected