コード例 #1
0
ファイル: fetcher.py プロジェクト: kevinyrock/500px-Fetcher
def main():
    image_id = 0
    name_id = 0
    photos_retrieved = 0
    photos_needed = 10
    failed = 0 

    for photo in results:
        image_id = nested_lookup('id', photo)[0]
        print (nested_lookup('id', photo)[0])

        try:
            photo = Photo(image_id)
            failed = 0
        except:
            print "Something didn't work, boss."
            failed = 1

        if failed != 1:
            print photo.image_url
            urllib.urlretrieve(photo.image_url, "image" + str(name_id) + ".jpg")
            name_id += 1
            photos_retrieved = photos_retrieved + 1

        if photos_retrieved == photos_needed:
            break

    print "Successfully pulled and saved " + str(photos_retrieved) + " photo(s)!"

    sys.exit()
コード例 #2
0
ファイル: elasticache.py プロジェクト: NandeeshHD/botoform
    def delete_related_cache_clusters(self, cluster_ids=None):
        """
        delete all cache clusters and subnet groups related to this VPC.

        :param cluster_ids:
            optional list of cache_cluster_ids (names) to delete instead.

        :returns: None
        """
        cluster_descs = self.get_related_cluster_descriptions()
        related_cluster_ids = nested_lookup('CacheClusterId', cluster_descs)

        if cluster_ids is None:
            cluster_ids = related_cluster_ids
        else:
            # We make sure cluster_ids are part of this VPC.
            cluster_ids = [c for c in cluster_ids if c in related_cluster_ids]

        # get sibling cache cluster subnet group ids.
        subnet_ids = []
        for cluster_desc in cluster_descs:
            if cluster_desc['CacheClusterId'] in cluster_ids:
                subnet_ids.append(cluster_desc['CacheSubnetGroupName'])

        for cluster_id in cluster_ids:
            # delete the cache_cluster.
            self.delete_cache_cluster(CacheClusterId = cluster_id)

        # wait for all cluster_ids to reach cache_cluster_deleted state.
        self.wait_for_related_clusters('cache_cluster_deleted', cluster_ids)

        for subnet_id in subnet_ids:
            # delete the sibling cache_cluster_subnet_group.
            self.delete_cache_subnet_group(CacheSubnetGroupName = subnet_id)
コード例 #3
0
ファイル: elasticache.py プロジェクト: NandeeshHD/botoform
 def get_related_cluster_descriptions(self):
     """return a list of cache cluster descriptions related to VPC."""
     subnet_descs = self.get_related_subnet_group_descriptions()
     subnet_names = nested_lookup('CacheSubnetGroupName', subnet_descs)
     return filter(
                lambda x : x['CacheSubnetGroupName'] in subnet_names,
                self.get_all_cluster_descriptions()
            )
コード例 #4
0
def get_shodan_score_delta(ip):
    score = {"malware_score": 0,
             "phishing_score": 0,
             "unwanted_score": 0,
             "unsecure_score": 0}
    try:
        response = api.host(ip)
        score['unsecure_score'] = EXPIRED_FACTOR * list(nested_lookup('expired', response)).count(True)
        return score
    except shodan.APIError:
        return score
コード例 #5
0
ファイル: metsParser.py プロジェクト: EUDAT-B2STAGE/http-api
    def _pathListExtractor(self, fileId, fileGroups):
        "get the list of file paths under a single file ID"

        fileList = nested_lookup(fileId, fileGroups)
        self.logger.debug('file ID: ' + fileId)
        pathList = []
        for phisicalFileElement in fileList:
            pathList += phisicalFileElement['locations']
            for childKey, childValue in phisicalFileElement['files']:
                pathList += self._pathListExtractor(childKey,
                                                    phisicalFileElement['files'])
        self.logger.debug('Path list: ' + pprint.pformat(pathList))

        return pathList
コード例 #6
0
    def create_dhcp_options(self, dhcp_configurations):
        """Creates and return a new dhcp_options set."""
        response = self.boto.ec2_client.create_dhcp_options(
                     DhcpConfigurations = dhcp_configurations
                   )

        dhcp_options_id = nested_lookup('DhcpOptionsId', response)[0]
        dhcp_options = self._get_dhcp_options_from_id(dhcp_options_id)

        self.log.emit('tagging dhcp_options (Name:{})'.format(self.evpc.name), 'debug')
        update_tags(dhcp_options, Name = self.evpc.name)

        self.log.emit('associating dhcp_options to {}'.format(self.evpc.name))
        dhcp_options.associate_with_vpc(VpcId = self.evpc.id)
コード例 #7
0
def build_vector(json):
    operators = nested_lookup('Node Type', json)
    cost = json['Total Cost']
    rows = json['Plan Rows']

    codec = [
        'Aggregate', 'Merge Join', 'Sort', 'Nested Loop', 'Seq Scan',
        'Index Scan'
    ]

    arr = [Counter(operators)[c] for c in codec]
    arr.append(rows)
    arr.append(cost)

    return arr
コード例 #8
0
def criteria3():
    prog_lang = {}
    for u in range(0,len(X.user_name)):
        file = requests.get("https://api.github.com/users/" + X.user_name[u] + "/repos", auth = (myusername, mytoken))        
        data = json.loads(file.text)
        output = nested_lookup(key = 'name', document = json_data, wild = True, with_keys = False)
        
        lang = []
        for x in range(0,len(output)):
            file = requests.get("https://api.github.com/repos/" + output[x] + "/languages", auth = (myusername, mytoken)) 
            lang_data = json.loads(file.text)
            for y in range(0,len(lang_data)):
                lang.append(list(lang_data)[x])
            git_language['{0}'.format(X.user_name[j])] = list(set(lang))            
    return jsonify(prog_lang) 
コード例 #9
0
ファイル: env_helper.py プロジェクト: lgirdk/boardfarm-docsis
    def env_check(self, test_environment):
        """Test environment check (overrides behaviour).

        This is needed as some of the list in the config boot file do not follow
        the same rules as the lists in the base class.

        :param test_environment: the environment to be checked against the EnvHelper environment
        :type test_environment: dict

        .. note:: raises BftEnvMismatch  if the test_environment is not contained in the env helper environment
        .. note:: recursively checks dictionaries
        """
        if nested_lookup("config_boot", test_environment):
            req_cfg_boot = test_environment["environment_def"]["board"].get(
                "config_boot", {})
            self._check_config_boot(req_cfg_boot)

        if nested_lookup("boot_file", test_environment) and nested_lookup(
                "boot_file", self.env):
            req_boot_file_checks = test_environment["environment_def"][
                "board"].get("boot_file", {})
            self._check_boot_file_conditions(req_boot_file_checks)
            test_environment["environment_def"]["board"].pop("boot_file")
        return super().env_check(test_environment)
コード例 #10
0
    def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx,
                           dataloader_idx):
        lrs = {
            f"lr_group_{i}": param["lr"]
            for i, param in enumerate(
                pl_module.trainer.optimizers[0].param_groups)
        }

        for key, val in lrs.items():
            x_axis = nested_lookup('x_axis', outputs)
            assert len(
                x_axis
            ), 'training_step must return value of x_axis for logging'
            pl_module.logger.experiment.add_scalar(f"LR_Scheduler/{key}", val,
                                                   x_axis[0])
コード例 #11
0
def filter_line(stat, filter_dict, or_and=True, include_absent=True):
    """
    stat - dict which represents JSONL line
    filter_dict - simple dict to use for filtering
    or_and - True for OR logic else AND
    include_absent - if True and filter finds nothing return True

    returns True if line has entry from filter_dict
    Currently support simple keys and values
    """
    found = [filter_dict[k] in nl.nested_lookup(k, stat) for k in filter_dict]
    num_exist = [nl.get_occurrence_of_key(stat, k) for k in filter_dict]

    return (include_absent and sum(num_exist)
            == 0) or (or_and and any(found)) or (not or_and and all(found))
コード例 #12
0
    def contingency_check(self, env_req, dev_mgr, env_helper):
        """Register service check plugins based on env_req.

        Reading the key value pairs from env_req, BFPluginManager scans
        for relative hook specs and implementations and loads them into a
        feature PluginManager (use generate_feature_manager).

        Once all plugins are registered, this functions will call the hook
        initiating respective service checks.

        :param env_req: ENV request provided by a test
        :type env_req: dict
        """

        logger.info("Executing all contingency service checks under boardfarm")
        # initialize a feature Plugin Manager for Contingency check
        pm = BFPluginManager("contingency")
        # this will load all feature hooks for contingency
        pm.load_hook_specs("feature")
        all_impls = pm.fetch_impl_classes("feature")

        plugins_to_register = [all_impls["boardfarm.DefaultChecks"]]

        # referencing this from boardfarm-lgi
        dns_env = nested_lookup("DNS", env_req.get("environment_def", {}))
        if dns_env:
            plugins_to_register.append(all_impls["boardfarm.DNS"])

        # ACS reference from boardfarm-lgi
        if "tr-069" in env_req.get("environment_def", {}):
            plugins_to_register.append(all_impls["boardfarm.ACS"])
        # Voice reference from boardfarm-lgi
        if "voice" in env_req.get("environment_def", {}):
            plugins_to_register.append(all_impls["boardfarm.Voice"])

        plugins_to_register.append(all_impls["boardfarm.CheckInterface"])

        # since Pluggy executes plugin in LIFO order of registration
        # reverse the list so that Default check is executed first
        for i in reversed(plugins_to_register):
            pm.register(i)
        result = pm.hook.service_check(
            env_req=env_req, dev_mgr=dev_mgr, env_helper=env_helper
        )

        # this needs to be orchestrated by hook wrapper maybe
        BFPluginManager.remove_plugin_manager("contingency")
        return result
コード例 #13
0
    def get_images(self):
        r = self.s.get(self.url)
        text = r.text

        finder_text_start = "window.__additionalDataLoaded('feed',"
        finder_text_start_len = len(finder_text_start) - 1
        finder_text_end = ");</script>"

        all_data_start = text.find(finder_text_start)
        all_data_end = text.find(finder_text_end, all_data_start + 1)
        json_str = text[(all_data_start + finder_text_start_len +
                         1):all_data_end]
        all_data = json.loads(json_str)
        img_sources = nested_lookup('display_url', all_data)
        shuffle(img_sources)
        return img_sources
コード例 #14
0
    def nested_get(self, key, *args, allow_multiple=False, **kwargs):
        """
        Returns all occurances of :param:`key` in :param:`self` and subdicts

        Parameters
        ----------
        key : str
            the key to search for
        *args :
            positional arguments to provide default value
        allow_multiple: bool
            allow multiple results
        **kwargs :
            keyword arguments to provide default value

        Raises
        ------
        KeyError
            Multiple Values are found for key and :param:`allow_multiple` is
            False (unclear which value should be returned)
            OR
            No Value was found for key and no default value was given

        Returns
        -------
        Any
            value corresponding to key (or default if value was not found)

        """

        if "." in key:
            return self[key]
        results = nested_lookup(key, self)
        if len(results) > 1:
            if allow_multiple:
                return results
            else:
                raise KeyError("Multiple Values found for key %s" % key)
        elif len(results) == 0:
            if "default" in kwargs:
                return kwargs["default"]
            elif args:
                return args[0]
            else:
                raise KeyError("No Value found for key %s" % key)
        else:
            return results[0]
コード例 #15
0
 def test_nested_update_in_place_false_list_input(self):
     doc = self.sample_data4
     # get all instances of the given element
     findings = nested_lookup("plz", doc, False, True)
     # alter those instances
     updated_findings = list()
     for key, val in findings.items():
         for elem in val:
             updated_findings.append(elem + 300)
     # update those instances with the altered results
     doc_updated = nested_update(
         doc, "plz", updated_findings, in_place=False, treat_as_element=False
     )
     elem1 = doc_updated["plz"]
     elem2 = doc_updated["vertragsteile"][0]["beitragsDaten"]["plz"]
     self.assertEqual(elem1, 82569)
     self.assertEqual(elem2, 87199)
コード例 #16
0
    def _get_ops(cls, self, user_url, user_fpath):
        """Data cleaning/getting ops."""
        st_get = requests.get(user_url)
        st_decode = json.load(StringIO(st_get.content.decode('utf-8')))

        [messages] = nested_lookup('messages', st_decode)
        raw_df = pd.DataFrame(messages)

        syms_df = self._clean_syms(self, st_decode, messages)

        if os.path.isfile(self.prev_fpath):
            raw_df = self._concat_drop(self.prev_fpath, raw_df)
        if os.path.isfile(self.syms_fpath):
            syms_df = self._concat_drop(self.syms_fpath, syms_df)

        self._write_to_parquet(self.prev_fpath, raw_df)
        self._write_to_parquet(self.syms_fpath, syms_df)
コード例 #17
0
def get_python_runtime_image(parsed_template_obj):
    runtimes = nested_lookup('Runtime', parsed_template_obj)

    if len(runtimes) > 1:
        raise Exception('Multiple python runtimes are not supported')

    runtime = runtimes[0]
    if 'python' not in runtime.lower():
        raise Exception(f'Only Python runtimes are supported, found {runtime}')

    lambda_python_containers = {
        "python3.8": "lambci/lambda:build-python3.8",
        "python3.7": "lambci/lambda:build-python3.7",
        "python3.6": "lambci/lambda:build-python3.6"
    }

    return runtime, lambda_python_containers[runtime]
コード例 #18
0
    def parse_full_name(self, xml):
        """ Spracuje xml obsahujuce cely nazov publikacie
        Arguments: xml {str} -- retazec s XML na spracovanie
        Returns: str -- nazov publikacie
        Raises: WrongXmlDataToParse -- nespravne data pre dane parsovanie """
        res = self.find_in_nested_xml(xml, 'title')
        index = 0
        while index < len(res):
            i = res[index]
            index += 1
            if type(i) == list:
                res.extend(i[1:])
                i = i[0]
            if 'title_proper' in nested_lookup('@title_type', i):
                return " ".join(i['#text'].split())

        return None
コード例 #19
0
    def _parse_task_tags(self):
        for rfile in self._files_registry.get_files():
            if any(fnmatch.fnmatch(rfile, "*/tasks/*." + ext) for ext in YAML_EXTENSIONS):
                with open(rfile, "r", encoding="utf8") as yaml_file:
                    try:
                        data = ruamel.yaml.safe_load(yaml_file)
                    except (
                        ruamel.yaml.composer.ComposerError, ruamel.yaml.scanner.ScannerError
                    ) as e:
                        message = "{} {}".format(e.context, e.problem)
                        self.log.sysexit_with_message(
                            "Unable to read yaml file {}\n{}".format(rfile, message)
                        )

                    tags_found = nested_lookup("tags", data)
                    for tag in tags_found:
                        self._data["tags"][tag] = {}
コード例 #20
0
def findSubject(dataJsonObj):
    if 'mods:mods' in dataJsonObj:
        return nested_lookup(key='mods:topic', document=dataJsonObj)
    else:
        subjects = deep_get(dataJsonObj, "metadata.idinfo.keywords.theme",
                            deep_get(dataJsonObj, "metadata.dataIdInfo.searchKeys", []))
        subs = []
        try:
            for keyword in ["themekey", "themekey"]:
                for itm in subjects:
                    temp = deep_get(itm, keyword, [])
                    if not isinstance(temp, list):
                        temp = [temp]
                    subs = subs + temp
        except Exception as inst:
            subs.append(str(inst))
        return subs
コード例 #21
0
 def test_wild_nested_lookup(self):
     results = nested_lookup(
         key='mail',
         document={
             'name': 'Russell Ballestrini',
             'email_address': '*****@*****.**',
             'other': {
                 'secondary_email': '*****@*****.**',
                 'EMAIL_RECOVERY': '*****@*****.**',
             },
         },
         wild=True,
     )
     self.assertEqual(3, len(results))
     self.assertIn('*****@*****.**', results)
     self.assertIn('*****@*****.**', results)
     self.assertIn('*****@*****.**', results)
コード例 #22
0
    def parse(self, response):
        hrefs = nested_lookup(
            "detailPageUrl",
            json.loads(
                response.css('script::text').re_first(
                    r'window.__WEB_CONTEXT__=\{pageManifest:\s*(\{.*?)\}\s*;\s*'
                )))
        for href in hrefs:
            url = response.urljoin(href)
            yield scrapy.Request(url, callback=self.parseRestaurant)

        next_page_href = response.xpath(
            '//div[contains(@class, "pagination")]//a[contains(@class, "next")]/@href'
        ).get()
        if (next_page_href):
            url = response.urljoin(next_page_href)
            yield scrapy.Request(url, self.parse)
コード例 #23
0
ファイル: utils.py プロジェクト: zigmax/opencve
def convert_cpes(conf):
    """
    This function takes an object, extracts its CPE uris and transforms them into
    a dictionnary representing the vendors with their associated products.
    """
    uris = nested_lookup("cpe23Uri", conf) if not isinstance(conf, list) else conf

    # Create a list of tuple (vendor, product)
    cpes_t = list(set([tuple(uri.split(":")[3:5]) for uri in uris]))

    # Transform it into nested dictionnary
    cpes = {}
    for vendor, product in cpes_t:
        if vendor not in cpes:
            cpes[vendor] = []
        cpes[vendor].append(product)

    return cpes
コード例 #24
0
ファイル: api.py プロジェクト: engdan77/hass_assister
    async def update_sensor_status(self):
        """Collect the sensor status

        """
        q = f'{self.url.strip("/")}/api/states'
        logger.debug(f"query {q}")
        try:
            async with aiohttp.ClientSession() as s, s.get(
                    q, headers={"Authorization":
                                f"Bearer {self.auth_token}"}) as response:
                self.attributes = await response.json()
                entity_count = len(nested_lookup("entity_id", self.attributes))
                logger.debug(f"{entity_count} entities pulled")
        except aiohttp.ClientConnectorError as e:
            logger.error(f"failed connecting to {q} with error {e}")
        except aiohttp.ContentTypeError as e:
            logger.error(f"invalid JSON from HASS {e}")
            logger.debug(f"{response}")
コード例 #25
0
    def delete_related_db_instances(self, db_ids=None, skip_snapshot=False):
        """
        delete all RDS DB instances and subnet groups related to this VPC.

        rds_ids:
            optional list of rds_ids (names) to delete instead.
        """
        date = datetime.now().strftime('%Y-%m-%d-%H%m')
        descriptions = self.get_related_db_descriptions()
        related_db_ids = nested_lookup('DBInstanceIdentifier', descriptions)

        if db_ids is None:
            db_ids = related_db_ids
        else:
            # We make sure db_ids are part of this VPC.
            db_ids = [c for c in db_ids if c in related_db_ids]

        # get sibling db subnet group ids.
        subnet_ids = []
        for desc in descriptions:
            if desc['DBInstanceIdentifier'] in db_ids:
                subnet_ids.append(desc['DBSubnetGroup']['DBSubnetGroupName'])

        for db_id in db_ids:
            # delete the db_instance.
            self.evpc.log.emit('deleting rds: {}'.format(db_id))
            if skip_snapshot:
                self.delete_db_instance(
                    DBInstanceIdentifier=db_id,
                    SkipFinalSnapshot=skip_snapshot,
                )
            else:
                self.delete_db_instance(
                    DBInstanceIdentifier=db_id,
                    FinalDBSnapshotIdentifier='{}-{}'.format(db_id, date),
                )

        # wait for all db_ids to reach db_instance_deleted state.
        self.evpc.log.emit('waiting for rds delete ...')
        self.wait_for_related_dbs('db_instance_deleted', db_ids)

        for subnet_id in subnet_ids:
            # delete the sibling db subnet group.
            self.delete_db_subnet_group(DBSubnetGroupName=subnet_id)
コード例 #26
0
ファイル: rds.py プロジェクト: russellballestrini/botoform
    def delete_related_db_instances(self, db_ids=None, skip_snapshot=False):
        """
        delete all RDS DB instances and subnet groups related to this VPC.

        rds_ids:
            optional list of rds_ids (names) to delete instead.
        """
        date = datetime.now().strftime('%Y-%m-%d-%H%m')
        descriptions = self.get_related_db_descriptions()
        related_db_ids = nested_lookup('DBInstanceIdentifier', descriptions)

        if db_ids is None:
            db_ids = related_db_ids
        else:
            # We make sure db_ids are part of this VPC.
            db_ids = [c for c in db_ids if c in related_db_ids]

        # get sibling db subnet group ids.
        subnet_ids = []
        for desc in descriptions:
            if desc['DBInstanceIdentifier'] in db_ids:
                 subnet_ids.append(desc['DBSubnetGroup']['DBSubnetGroupName'])

        for db_id in db_ids:
            # delete the db_instance.
            self.evpc.log.emit('deleting rds: {}'.format(db_id))
            if skip_snapshot:
                self.delete_db_instance(
                    DBInstanceIdentifier = db_id,
                    SkipFinalSnapshot = skip_snapshot,
                )
            else:
                self.delete_db_instance(
                    DBInstanceIdentifier = db_id,
                    FinalDBSnapshotIdentifier = '{}-{}'.format(db_id, date),
                )

        # wait for all db_ids to reach db_instance_deleted state.
        self.evpc.log.emit('waiting for rds delete ...')
        self.wait_for_related_dbs('db_instance_deleted', db_ids)

        for subnet_id in subnet_ids:
            # delete the sibling db subnet group.
            self.delete_db_subnet_group(DBSubnetGroupName = subnet_id)
コード例 #27
0
def criteria5():
    average_commit = {}
    for u in X.user_name:
        flag = 0
        flag1 = 0
        file = requests.get("https://api.github.com/users/" + u + "/repos?per_page=100", auth=(myusername, mytoken))
        json_data = json.loads(file.text)
        output = nested_lookup(key = 'name', document = json_data, wild = False, with_keys = False)
    
        for j in output:
            file = requests.get("https://api.github.com/repos/" + u + j + "/commits?since=2018-01-01", auth = (myusername, mytoken))
            json_data = json.loads(file.text)
            if (len(json_data) != 0):
                flag = flag + 1
                flag1 = len(json_data) + flag1
            else:
                continue
        average_commit['{0}'.format(u)] = (flag1/flag)
    return jsonify(average_commit)  
コード例 #28
0
def informer(dist):
    jurl = urlopen("https://api.covid19india.org/state_district_wise.json")
    obj = json.loads(jurl.read())
    val = nested_lookup(dist, obj)
    DeltaDict = DelUd()
    try:
        if DeltaDict[dist] >= 0:
            return "Active cases in your district are: " + str(
                val[0]['active']) + "\n\n" + "The total cases till date are: " + str(
                val[0]["confirmed"]) + "\nTotal deaths till now: " + str(
                val[0]['deceased']) + "\nPatients Recovered: " + str(val[0]["recovered"])
        else:
            return "New Cases in your District are: " + str(
                -DeltaDict[dist]) + "\n\n" + "Active cases in your district are: " + str(
                val[0]['active']) + "\n" + "The total cases till date are: " + str(
                val[0]["confirmed"]) + "\nTotal deaths till now: " + str(
                val[0]['deceased']) + "\nPatients Recovered: " + str(val[0]["recovered"])
    except:
        return "Please enter correct district. You may check spelling on Google :)"
コード例 #29
0
ファイル: utils.py プロジェクト: kevleyski/tensor
def parse_mpd(host):
    r = requests.get(host)
    if r.status_code != 200:
        raise Exception('Host didn\'t return status code 200')
    xml_data = r.text
    data = xmltodict.parse(xml_data,
                           attr_prefix='',
                           force_list={
                               'Period': True,
                               'AdaptationSet': True,
                               'Representation': True,
                               'S': True
                           })['MPD']
    # print(json.dumps(data, indent=2))
    # mpd = MPD(data, host)
    # print(mpd.segments)
    segments = []
    bitrates = sorted(map(lambda x: int(x), nested_lookup('bandwidth', data)))
    return {'segments': segments, 'bitrates': bitrates}
コード例 #30
0
ファイル: debian.py プロジェクト: nickberry17/boardfarm
    def add_hosts(self, addn_host=None, config=None):
        # to add extra hosts(dict) to dnsmasq.hosts if dns has to run in wan container
        # this is a hack, the add_host should have been called from RootFs
        if addn_host is None:
            addn_host = {}
        self.hosts = getattr(self, "hosts", defaultdict(list))
        restart = False

        def _update_host_dict(host_data):
            for host, ip in host_data.items():
                if type(ip) is list:
                    self.hosts[host] += ip
                else:
                    self.hosts[host].append(ip)

        if addn_host:
            _update_host_dict(addn_host)
            restart = True
        else:
            if hasattr(self, "profile"):
                host_dicts = nested_lookup("hosts",
                                           self.profile.get(self.name, {}))
                for i in host_dicts:
                    _update_host_dict(i)
            if config is not None and hasattr(config, "board"):
                for dev in config.devices:
                    d = getattr(config, dev, None)
                    if hasattr(d, "dns"):
                        v4_hosts = d.dns.hosts_v4
                        v6_hosts = d.dns.hosts_v6
                        for host_val in v4_hosts, v6_hosts:
                            for host, ips in host_val.items():
                                for ip in set(ips):
                                    self.hosts[host].append(ip)

        if self.hosts:
            self.sendline("cat > /etc/dnsmasq.hosts << EOF")
            for host, ips in self.hosts.items():
                for ip in set(ips):
                    self.sendline(ip + " " + host)
            self.check_output("EOF")
        if restart:
            self.restart_dns_server()
コード例 #31
0
    def add_hosts(self, addn_host={}, config=None):
        # to add extra hosts(dict) to dnsmasq.hosts if dns has to run in wan container
        # this is a hack, the add_host should have been called from RootFs
        hosts = {}
        if hasattr(self, "profile"):
            host_dicts = nested_lookup("hosts",
                                       self.profile.get(self.name, {}))
            for host_data in host_dicts:
                hosts.update(host_data)
        if config is not None and hasattr(config, "board"):
            for device in config.board['devices']:
                # TODO: this should be different...
                if 'lan' in device['name']:
                    continue
                d = getattr(config, device['name'])
                domain_name = device['name'] + '.boardfarm.com'
                final = None
                if 'wan-static-ip' in str(device):
                    final = str(
                        re.search(
                            'wan-static-ip:' + '(' + ValidIpv4AddressRegex +
                            ')', device['options']).group(1))
                elif 'ipaddr' in device:
                    final = str(device['ipaddr'])
                elif hasattr(d, 'ipaddr'):
                    final = str(d.ipaddr)

                if final == 'localhost':
                    if hasattr(d, 'gw'):
                        final = str(d.gw)
                    elif hasattr(d, 'iface_dut'):
                        final = d.get_interface_ipaddr(d.iface_dut)
                    else:
                        final = None
                if final is not None:
                    hosts[domain_name] = final
        hosts.update(addn_host)
        if hosts is not None:
            self.sendline('cat > /etc/dnsmasq.hosts << EOF')
            for host, ip in hosts.items():
                self.sendline(ip + " " + host)
            self.sendline('EOF')
コード例 #32
0
ファイル: scanner.py プロジェクト: VNYui/Zi5zb2NpZXR5
def parse_top(s1):
    global console
    global hosts
    #init console log
    console = Console()
    #RETRIEVE KEY FROM NESTED DICT
    hosts = nested_lookup('host', s1)
    portid = nested_lookup('portid', s1)
    state = nested_lookup('state', s1)
    protocol = nested_lookup('protocol', s1)
    service = nested_lookup('service', s1)
    reason = nested_lookup('reason', s1)
    #print(f"\nTOP PORTS SCAN : {hosts[1]}")
    #PARSING PORT / STATE / PROTO / SERVICES / REASON

    console.print("\nTOP PORTS SCAN :", style="bold red")
    for (p, s, proto, serv, res) in zip(portid, state, protocol, service,
                                        reason):
        #print(f"{p}\t{s}\t{proto}\t{serv['name']}\t{res}".expandtabs(20))
        if 'open' in s:
            a = Fore.GREEN + p + Fore.RESET
            b = Fore.GREEN + s + Fore.RESET
            c = Fore.GREEN + proto + Fore.RESET
            d = Fore.GREEN + serv['name'] + Fore.RESET
            e = Fore.GREEN + res + Fore.RESET
            print(a + "\t" + b + "\t" + c + "\t" + d + "\t" + e.expandtabs(10))
            status = 'open'
        else:
            if 'closed' in s:
                a = Fore.RED + p + Fore.RESET
                b = Fore.RED + s + Fore.RESET
                c = Fore.RED + proto + Fore.RESET
                d = Fore.RED + serv['name'] + Fore.RESET
                e = Fore.RED + res + Fore.RESET
                print(a + "\t" + b + "\t" + c + "\t" + d + "\t" +
                      e.expandtabs(10))
                status = 'closed'
            else:
                a = Fore.YELLOW + p + Fore.RESET
                b = Fore.YELLOW + s + Fore.RESET
                c = Fore.YELLOW + proto + Fore.RESET
                d = Fore.YELLOW + serv['name'] + Fore.RESET
                e = Fore.YELLOW + res + Fore.RESET
                print(a + "\t" + b + "\t" + c + "\t" + d + "\t" +
                      e.expandtabs(10))
                status = 'filtered'
        #print('\033[31m' + f"{p}" + "\t" + Fore.WHITE + Fore.GREEN + f"{s}" +"\t" + Fore.WHITE + f"{proto}" + "\t" + Fore.BLUE + f"{serv['name']}" + "\t" + Fore.YELLOW + f"{res}".expandtabs(30))
    print("\n" + Fore.BLUE + f"{hosts[1]}" + Fore.RED + "      [DONE]")
    return hosts, p, s, proto, serv, res
コード例 #33
0
def findCreators(dataJsonObj):
    if 'mods:mods' in dataJsonObj:
        creators = []
        name_tags = nested_lookup(key='mods:name', document=dataJsonObj)
        for name_tag in name_tags:
            if type(name_tag) is list:
                for itm in name_tag:
                    creators = creators + findcreatorParts(itm)
            else:
                creators = creators + findcreatorParts(name_tag)
        return cleanBlanksFromList(creators)
    else:
        creator = deep_get(dataJsonObj, "metadata.idinfo.citation.citeinfo.origin",
                           deep_get(dataJsonObj, "metadata.idinfo.citation.citeinfo.pubinfo.publish",
                                    deep_get(dataJsonObj, "metadata.dataIdInfo.idCitation.citResParty.rpOrgName", [])))
        print("creator:", creator)
        if type(creator) == str:
            creator = [u"{0}".format(creator)]
        print("creator:", creator)
        return cleanBlanksFromList(creator)
コード例 #34
0
def set_directory(recipe: Path, working_directory: str):
    """Set all the workingdir parameters to the working_directory provided"""

    # Read json in
    try:
        Path(working_directory)
        with open(recipe, "r") as recipe_file:
            recipe_json = json.load(recipe_file)
            # print(recipe_json)
            # Check that the working directory parameter exists
            assert nested_lookup(
                "workingdir", recipe_json
            ), f"Did not find occurences of 'workingdir' in {recipe}"
        # Open file for writing
        with open(recipe, "w") as recipe_file:
            # Get updated recipe
            updated_recipe_json = nested_update(recipe_json, "workingdir",
                                                str(working_directory))
            try:
                json.dump(updated_recipe_json, recipe_file, indent=4)
            except:
                logging.error(f"Issue putting updated value in {recipe}")
                # Clear output and put back the original recipe
                recipe_file.seek(0)
                recipe_file.truncate()
                json.dump(recipe_json, recipe_file, indent=4)
                raise
    except TypeError as e:
        logging.error(
            f"Expected file path as working directory, got {working_directory}\n{e}"
        )
        raise
    except (FileNotFoundError, PermissionError) as e:
        logging.error(f"Could not load json from {recipe}")
        raise
    except json.JSONDecodeError as e:
        logging.error(f"Invalid json found in {recipe}\n{e}")
        raise
    except AssertionError as e:
        logging.error(e)
        raise
コード例 #35
0
    def _check_files_presence(self):
        """
        Verifying now the list of files. Prevents stopping after a long
        processing time if a file does not exist.

        The list of files to verify for each subject is :
         - the standardization mask
         - all files in the group_config file
        """
        logging.debug("Verifying files presence")

        # concatenating files from all groups files:
        config_file_list = sum(nested_lookup('files', self.groups_config), [])

        for subj_id in self.all_subjs:
            subj_input_dir = Path(self.root_folder).joinpath(subj_id)

            # Find subject's standardization mask
            if self.std_mask is not None:
                for sub_mask in self.std_mask:
                    sub_std_mask_file = subj_input_dir.joinpath(
                        sub_mask.replace('*', subj_id))
                    if not sub_std_mask_file.is_file():
                        raise FileNotFoundError(
                            "Standardization mask {} not found for subject {}!"
                            .format(sub_std_mask_file, subj_id))

            # Find subject's files from group_config
            for this_file in config_file_list:
                this_file = this_file.replace('*', subj_id)
                if this_file.endswith('/ALL'):
                    logging.debug(
                        "    Keyword 'ALL' detected; we will load all "
                        "files in the folder '{}'"
                        .format(this_file.replace('/ALL', '')))
                else:
                    this_file = subj_input_dir.joinpath(this_file)
                    if not this_file.is_file():
                        raise FileNotFoundError(
                            "File from groups_config ({}) not found for "
                            "subject {}!".format(this_file, subj_id))
コード例 #36
0
    def contingency_check(self, env_req, dev_mgr, env_helper):
        """Register service check plugins based on env_req.

        Reading the key value pairs from env_req, BFPluginManager scans
        for relative hook specs and implementations and loads them into a
        feature PluginManager (use generate_feature_manager).

        Once all plugins are registered, this functions will call the hook
        initiating respective service checks.

        :param env_req: ENV request provided by a test
        :type env_req: dict
        """

        logger.info("Executing all contingency service checks under BF Docsis")

        pm = BFPluginManager("contingency")
        pm.load_hook_specs("feature")
        all_impls = pm.fetch_impl_classes("feature")

        plugins_to_register = [all_impls["boardfarm_docsis.DefaultChecks"]]

        if "voice" in env_req.get("environment_def", {}):
            plugins_to_register.append(all_impls["boardfarm_docsis.Voice"])

        plugins_to_register.append(
            all_impls["boardfarm_docsis.CheckInterface"])

        if nested_lookup("cwmp_version", env_req.get("environment_def", {})):
            plugins_to_register.append(all_impls["boardfarm_docsis.Cwmp"])

        for i in reversed(plugins_to_register):
            pm.register(i)
        result = pm.hook.service_check(env_req=env_req,
                                       dev_mgr=dev_mgr,
                                       env_helper=env_helper)

        # this needs to be orchestrated by hook wrapper maybe
        BFPluginManager.remove_plugin_manager("contingency")
        return result
コード例 #37
0
ファイル: git_test.py プロジェクト: yangxiyucs/NewRepo
def collaborators():
    contributors = {}
    for user in git.user_names:
        counter = 0
        count = 0
        link = requests.get("https://api.github.com/users/" + user + "/repos?per_page=1000",
                            auth=('yangxiyucs', 'ab112113'))
        json_data = json.loads(link.text)
        results = nested_lookup(key='full_name', document=json_data)
        # date = nested_lookup(key='updated_at', document=json_data)
        # print(date)
        # for r in results:
        #     if (str(date).find('2018') != -1):
        #         contributor = requests.get("https://api.github.com/repos/" + r + "/contributors",
        #                                    auth=('yangxiyucs', 'ab112113'))
        #         data = json.loads(contributor.text)
        #         counter += len(data)
        for r in results:
            result = requests.get("https://api.github.com/repos/" + r + "/commits?since=2018-01-01",
                                  auth=('yangxiyucs', 'ab112113'))
            getresul = json.loads(result.text)

            if (len(getresul) != 0):
                count += 1
                contributor = requests.get("https://api.github.com/repos/" + r + "/contributors",
                                           auth=('yangxiyucs', 'ab112113'))
                if (len(contributor.text) != 0):
                    data = json.loads(contributor.text)
                    counter = counter + len(data)
                else:
                    pass

            else:
                continue
        contributors['{0}'.format(user)] = (counter - count)
    final = json.dumps({'contributors (2018)': contributors})
    git.add.append(final)
    git.send_mail()
    print(final)
    return render_template('index.html', final=final)
コード例 #38
0
ファイル: SzpieGwiazdor.py プロジェクト: internety/smarTSO
    def _incoming_traffic_handler(self, response):
        """ logika postępowania z przechwyconym ruchem (1001 response) """
        resp_1001= self._amf_raw_response_parser(response)
        pom      = nested_lookup('playersOnMap', resp_1001)
 
        for pd in pom[0]:
            bv = pd.get('availableBuffs_vector')
            if bv:
                pID = pd['userID']
                pN  = pd['username_string']
                recent_key = ":".join([str(pID),pN])
                ittl= self.recent_act.ttl( recent_key )
                if ittl:
                    log.info('gracz/ID %s/%d: ignorowanie jeszcze %d sekund'%(pN, pID, ittl))
                    return None
                else:
                    adventure_cntr = dict(Counter(b['resourceName_string'] for b in [a for a in bv if a['buffName_string'] == "Adventure"]))
                    adventure_list = [self.StarItem(pID, pN, "Adventure", i[0], i[1]) for i in adventure_cntr.iteritems()]
                    otheritms_list = [self.StarItem(pID, pN, i['resourceName_string'],i['buffName_string'], i['amount']) for i in bv if i['buffName_string'] <> "Adventure"]
                    star_menu = adventure_list + otheritms_list
                    self._pom_remove_if_exists(pID, pN)
                    self._pom_insert_star_list(pID, pN, star_menu)
                    log.info('gracz/ID %s/%d: zapisano menu gwiazdy, będzie ignorowany kolejne %d sekund'%(pN, pID, self.recent_act.max_age))
                    self.recent_act[ recent_key ] = True
コード例 #39
0
 def get_related_launch_config_names(self):
     return nested_lookup(
         'LaunchConfigurationName',
         self.get_related_launch_config_descriptions()
     )
コード例 #40
0
ファイル: rds.py プロジェクト: russellballestrini/botoform
 def get_related_db_ids(self):
     """return a list of db instance identifiers related to this VPC."""
     return nested_lookup(
                'DBInstanceIdentifier',
                self.get_related_db_descriptions(),
            )
コード例 #41
0
 def test_nested_lookup_wrapped_in_list_in_list(self):
     results = nested_lookup("d", [{}, [self.subject_dict, {}]])
     self.assertEqual(2, len(results))
     self.assertIn(100, results)
     self.assertIn(200, results)
     self.assertSetEqual({100, 200}, set(results))
コード例 #42
0
ファイル: rds.py プロジェクト: russellballestrini/botoform
 def get_related_db_endpoints(self):
     """return a list of cache cluster dns endpoints related to this VPC"""
     return nested_lookup('Endpoint', self.get_related_db_descriptions())
コード例 #43
0
def get_region_names(session):
    ec2 = session.create_client('ec2', region_name='us-east-1')
    return nested_lookup('RegionName', ec2.describe_regions())
コード例 #44
0
ファイル: vpc_endpoint.py プロジェクト: NandeeshHD/botoform
 def services(self):
     """Return a list of available VPC endpoint services."""
     return nested_lookup(
         'ServiceNames',
         self.evpc.boto.ec2_client.describe_vpc_endpoint_services()
     )[0]
コード例 #45
0
 def get_related_autoscaling_group_names(self):
     return nested_lookup(
         'AutoScalingGroupName',
         self.get_related_autoscaling_group_descriptions()
     )
コード例 #46
0
ファイル: elasticache.py プロジェクト: NandeeshHD/botoform
 def get_related_cluster_ids(self):
     """return a list of cache cluster ids related to this VPC"""
     descriptions = self.get_related_cluster_descriptions()
     return nested_lookup('CacheClusterId', descriptions)
コード例 #47
0
ファイル: elb.py プロジェクト: NandeeshHD/botoform
 def get_related_elb_names(self):
     """Return a list of related ELB (Load Balancer) names."""
     return nested_lookup(
                'LoadBalancerName',
                self.get_related_elb_descriptions(),
            )
コード例 #48
0
 def test_nested_lookup(self):
     results = nested_lookup("d", self.subject_dict)
     self.assertEqual(2, len(results))
     self.assertIn(100, results)
     self.assertIn(200, results)
     self.assertSetEqual({100, 200}, set(results))
コード例 #49
0
ファイル: vpc_endpoint.py プロジェクト: NandeeshHD/botoform
 def related_ids(self):
     """Return VPC endpoint ids related to this VPC."""
     return nested_lookup('VpcEndpointId', self.describe_related())