Ejemplo n.º 1
0
def repository_path_search(apiurl, project, search_project, search_repository):
    queue = []

    # Initialize breadth first search queue with repositories from top project.
    root = ETL.fromstringlist(show_project_meta(apiurl, project))
    for repository in root.xpath(
            'repository[path[@project and @repository]]/@name'):
        queue.append((repository, project, repository))

    # Perform a breadth first search and return the first repository chain with
    # a series of path elements targeting search project and repository.
    for repository_top, project, repository in queue:
        if root.get('name') != project:
            # Repositories for a single project are in a row so cache parsing.
            root = ETL.fromstringlist(show_project_meta(apiurl, project))

        paths = root.findall('repository[@name="{}"]/path'.format(repository))
        for path in paths:
            if path.get('project') == search_project and path.get(
                    'repository') == search_repository:
                return repository_top

            queue.append(
                (repository_top, path.get('project'), path.get('repository')))

    return None
Ejemplo n.º 2
0
 def repository_published(self, project):
     root = ET.fromstringlist(
         show_results_meta(self.apiurl,
                           project,
                           multibuild=True,
                           repository=['standard']))
     return not len(root.xpath('result[@state!="published"]'))
Ejemplo n.º 3
0
    def get_build_succeeded_packages(self, project):
        """Get the build succeeded packages from `from_prj` project.
        """

        f = osc.core.show_prj_results_meta(self.apiurl, project)
        root = ET.fromstringlist(f)
        # print ET.dump(root)

        failed_multibuild_pacs = []
        pacs = []
        for node in root.findall('result'):
            if node.get('repository') == 'standard' and node.get('arch') == 'x86_64':
                for pacnode in node.findall('status'):
                    if ':' in pacnode.get('package'):
                        mainpac = pacnode.get('package').split(':')[0]
                        if pacnode.get('code') not in ['succeeded', 'excluded']:
                            failed_multibuild_pacs.append(pacnode.get('package'))
                            if mainpac not in failed_multibuild_pacs:
                                failed_multibuild_pacs.append(mainpac)
                            if mainpac in pacs:
                                pacs.remove(mainpac)
                        else:
                            if mainpac in failed_multibuild_pacs:
                                failed_multibuild_pacs.append(pacnode.get('package'))
                            elif mainpac not in pacs:
                                pacs.append(mainpac)
                        continue
                    if pacnode.get('code') == 'succeeded':
                        pacs.append(pacnode.get('package'))
            else:
                logging.error("Can not find standard/x86_64 results")

        return pacs
Ejemplo n.º 4
0
def sendCmd(host, port, cmd):
    """
    Send a cmd string to the server.
    @host is the host address
    @port is the port on the server to send the command
    @cmd is the string form of an XML document.
    """
    import socket
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.connect((host, port))
    s.settimeout(240)
    s.sendall(cmd)
    s.shutdown(socket.SHUT_WR)
    sleep(1)
    buff = s.recv(4096)
    data = []
    data.append(buff)
    while len(buff) > 0:
        buff = s.recv(4096)
        if buff:
            data.append(buff)
        else:
            break
    s.close()
    return etree.fromstringlist(data)
Ejemplo n.º 5
0
def list_parse():
    try:
        from lxml import etree as etree
    except ImportError:
        import xml.etree.ElementTree as etree
    import gzip

    params = ['series_animedb_id', 'series_title', 'series_type', 'series_episodes',
     'my_id', 'my_watched_episodes', 'my_start_date', 'my_finish_date',
     'my_fansub_group', 'my_rated', 'my_score', 'my_dvd', 'my_storage',
     'my_status', 'my_comments', 'my_times_watched', 'my_rewatch_value',
     'my_downloaded_eps', 'my_tags', 'my_rewatching', 'my_rewatching_ep',
     'update_on_import']

    status = {'p' : 'Plan to Watch',
              'c' : 'Completed',
              'w' : 'Watching',
              'name' : 'my_status'
              }

    ALL = 1

    with gzip.open('zip/animelist_1391893533_-_3199957.xml.gz', 'r') as f:
        #tree = etree.parse(f)
        #root = tree.getroot()
        root = etree.fromstringlist(f)
        #print(len(titles))
        count = 0
        for title in root.findall('anime'):
            if (title.find(status['name']).text == status['c'] or ALL):
                name = title.find('series_title').text
                print(name)
                count += 1
        print()
        print('Count: ', count)
Ejemplo n.º 6
0
def parse_for_starting_pitcher_outings(xml_file):
    with open(xml_file) as f:
        it = itertools.chain('<games>', f, '</games>')
        doc = etree.fromstringlist(it)
        pitcher_outings = []
        games = []
        for boxscore in doc.findall('boxscore'):
            year, month, day = map(lambda x: int(x),
                                   boxscore.get('date').split('/'))
            date = datetime.date(year, month, day)
            site = boxscore.get('site')

            linescore = boxscore.find('linescore')
            total_runs = (int(linescore.get('away_runs')) +
                          int(linescore.get('home_runs')))
            games.append(Game(date, site, total_runs))

            for pitching in boxscore.findall('pitching'):
                for pitcher in pitching.findall('pitcher'):
                    p = PitcherStats(pitcher.attrib)
                    p.out = p.outs  # slight difference
                    if p.gs == 1:
                        pitcher_outings.append(Outing(date, p, site))
                        break

    return pitcher_outings, games
Ejemplo n.º 7
0
    def apply(self, splitter):
        super(StrategyQuick, self).apply(splitter)

        # Leaper accepted which means any extra reviews have been added.
        splitter.filter_add(
            './review[@by_user="******" and @state="accepted"]')

        # No @by_project reviews that are not accepted. If not first round stage
        # this should also ignore previous staging project reviews or already
        # accepted human reviews.
        splitter.filter_add(
            'not(./review[@by_project and @state!="accepted"])')

        # Only allow reviews by whitelisted groups and users as all others will
        # be considered non-quick (like @by_group="legal-auto"). The allowed
        # groups are only those configured as reviewers on the target project.
        meta = ET.fromstringlist(
            show_project_meta(splitter.api.apiurl, splitter.api.project))
        allowed_groups = meta.xpath('group[@role="reviewer"]/@groupid')
        allowed_users = []
        if 'repo-checker' in splitter.config:
            allowed_users.append(splitter.config['repo-checker'])

        self.filter_review_whitelist(splitter, 'by_group', allowed_groups)
        self.filter_review_whitelist(splitter, 'by_user', allowed_users)
Ejemplo n.º 8
0
def project_role_expand(apiurl, project, role='maintainer'):
    """
    All users with a certain role on a project, including those who have the role directly assigned
    and those who are part of a group with that role.
    """
    meta = ETL.fromstringlist(show_project_meta(apiurl, project))
    return meta_role_expand(apiurl, meta, role)
Ejemplo n.º 9
0
def e():
    xsltQuery = request.args.get('xml', '')
    xsltStrings = [xsltQuery, "asd", "random"]
    xslt_root = etree.fromstringlist(xsltStrings)

    f = StringIO('<foo><bar></bar></foo>')
    tree = etree.parse(f)
    result = tree.xslt(xslt_root, a="'A'")  # Not OK
Ejemplo n.º 10
0
 async def get_good_list(self) -> None:
     sitemap_body = re.split(
         r'\<\?xml version="1\.0" encoding="UTF\-8"\?\>[\n, \r]', await
         self._get_file(self.url))
     sitemap_xml = etree.fromstringlist(sitemap_body)
     self.url_list = [
         self._gen_dict(items) for items in sitemap_xml.iterchildren()
     ]
Ejemplo n.º 11
0
def repository_published(apiurl, project, repository):
    root = ETL.fromstringlist(
        show_results_meta(apiurl,
                          project,
                          multibuild=True,
                          repository=[repository]))
    return not len(
        root.xpath('result[@state!="published" and @state!="unpublished"]'))
Ejemplo n.º 12
0
def h():
    xsltQuery = '<non><remote><query></query></remote></non>'
    xsltStrings = [xsltQuery, "asd", "random"]
    xslt_root = etree.fromstringlist(xsltStrings)

    f = StringIO('<foo><bar></bar></foo>')
    tree = etree.parse(f)
    result = tree.xslt(xslt_root, a="'A'")  # OK
Ejemplo n.º 13
0
 async def _find_sitemap_nested(self, url: str) -> None:
     logger.debug('find nested sitemap from: %s', url)
     parent = re.split(r'<?xml version="1.0" encoding="UTF-8"?>\r', await
                       self._get_file(url))
     parent_xml = etree.fromstringlist(parent)
     if re.search('sitemapindex', parent_xml.tag) is not None:
         for sitemaps in parent_xml.iterchildren():
             self.sitemap_list.append(
                 self._gen_dict(sitemaps.iterchildren()))
Ejemplo n.º 14
0
    def _perform(self, trans: Transaction) -> ActionResult:
        self.log.debug("config_record_state() with device {0}".format(
            self.dev_name))
        state_name = self.state_name
        self.log.debug("incl_rollbacks=" + str(self.include_rollbacks))
        self.log.debug("style_format=" + str(self.style_format))
        try:
            # list_rollbacks() returns one less rollback than the second argument,
            # i.e. send 2 to get 1 rollback. Therefore the +1
            rollbacks = _ncs.maapi.list_rollbacks(
                trans.maapi.msock,
                int(self.include_rollbacks) + 1)
            # rollbacks are returned 'most recent first', i.e. reverse chronological order
        except _ncs.error.Error:
            rollbacks = []
        self.log.debug("rollbacks=" + str([r.fixed_nr for r in rollbacks]))
        index = 0
        state_filenames = []
        for rb in [None] + rollbacks:
            if rb is None:
                self.log.debug("Recording current transaction state")
            else:
                self.log.debug("Recording rollback" + str(rb.fixed_nr))
                self.log.debug("Recording rollback" + str(rb.nr))
                trans.load_rollback(rb.nr)

            state_name_index = state_name
            if index > 0:
                state_name_index = state_name + "-" + str(index)
            format = 'xml' if 'xml' == str(self.style_format) else 'cfg'
            existing_filename = self.state_name_to_existing_filename(
                state_name_index)
            if existing_filename is not None:
                if not self.overwrite:
                    raise ActionError(
                        "state {} already exists".format(state_name_index))
                self.remove_state_file(existing_filename)
            state_filename = self.format_state_filename(state_name_index,
                                                        format=format)
            device_path = "/ncs:devices/device{" + self.dev_name + "}/config"
            config_type = _ncs.maapi.CONFIG_C
            if format == 'xml':
                config_type = _ncs.maapi.CONFIG_XML
            with open(state_filename, "wb") as state_file:
                save_data = self.save_config(trans, config_type, device_path)
                if format == 'xml':
                    # just pretty_print it
                    tree = etree.fromstringlist(save_data)
                    state_file.write(etree.tostring(tree, pretty_print=True))
                else:
                    for data in save_data:
                        state_file.write(data)
            self.write_metadata(state_filename)
            state_filenames += [state_name_index]
            index += 1
            trans.revert()
        return {'success': "Recorded states " + str(state_filenames)}
Ejemplo n.º 15
0
 def generate_all_archs(self, project):
     meta = ET.fromstringlist(show_project_meta(self.apiurl, project))
     archs = set()
     for arch in meta.findall('.//arch'):
         archs.add(arch.text)
     result = []
     for arch in archs:
         result.append(f"arch_{arch}=1")
     return '&'.join(result)
Ejemplo n.º 16
0
def maintainers_get(apiurl, project, package=None):
    if package is None:
        meta = ETL.fromstringlist(show_project_meta(apiurl, project))
        maintainers = meta.xpath('//person[@role="maintainer"]/@userid')

        groups = meta.xpath('//group[@role="maintainer"]/@groupid')
        maintainers.extend(groups_members(apiurl, groups))

        return maintainers

    # Ugly reparse, but real xpath makes the rest much cleaner.
    root = owner_fallback(apiurl, project, package)
    root = ETL.fromstringlist(ET.tostringlist(root))
    maintainers = root.xpath('//person[@role="maintainer"]/@name')

    groups = root.xpath('//group[@role="maintainer"]/@name')
    maintainers.extend(groups_members(apiurl, groups))

    return maintainers
Ejemplo n.º 17
0
def maintainers_get(apiurl, project, package=None):
    if package is None:
        meta = ETL.fromstringlist(show_project_meta(apiurl, project))
        maintainers = meta.xpath('//person[@role="maintainer"]/@userid')

        groups = meta.xpath('//group[@role="maintainer"]/@groupid')
        maintainers.extend(groups_members(apiurl, groups))

        return maintainers

    # Ugly reparse, but real xpath makes the rest much cleaner.
    root = owner_fallback(apiurl, project, package)
    root = ETL.fromstringlist(ET.tostringlist(root))
    maintainers = root.xpath('//person[@role="maintainer"]/@name')

    groups = root.xpath('//group[@role="maintainer"]/@name')
    maintainers.extend(groups_members(apiurl, groups))

    return maintainers
Ejemplo n.º 18
0
def analysis_config(content):
    ret = {}
    t = etree.fromstringlist(content)
    t = etree.ElementTree(t)
    r = t.getroot()
    for i in r:
        key = i.values()[0]
        val = i.text
        print key, val
        ret[key] = val
    return ret
Ejemplo n.º 19
0
def parse(context, data):
    base = {"source_url": data.get("url"), "source_id": 0}
    with context.load_file(data["content_hash"], read_mode="rt") as res:
        xml_data = etree.fromstringlist(res)
        for i, commitment in enumerate(xml_data.findall(".//commitment")):
            base["source_contract_id"] = i
            base["source_line"] = commitment.sourceline
            rows = convert_commitment(base, commitment)
            for row in rows:
                print(row)
                context.emit(data=row)
Ejemplo n.º 20
0
def package_role_expand(apiurl, project, package, role='maintainer', inherit=True):
    """
    All users with a certain role on a package, including those who have the role directly assigned
    and those who are part of a group with that role.
    """
    meta = ETL.fromstringlist(show_package_meta(apiurl, project, package))
    users = meta_role_expand(apiurl, meta, role)

    if inherit:
        users.extend(project_role_expand(apiurl, project, role))

    return users
Ejemplo n.º 21
0
 def test_push_new_work_invalid_data_xml(self):
     # Note: the recorded cassette returns (magically) a proper error.
     pusher = domain_models.OrcidPusher(self.orcid, self.recid,
                                        self.oauth_token)
     data = b'<work:work xmlns:common="http://www.orcid.org/ns/common" xmlns:work="http://www.orcid.orgsens/work"></work:work>'
     invalid_xml = etree.fromstringlist([data])
     with pytest.raises(exceptions.InputDataInvalidException):
         with mock.patch(
                 "inspirehep.orcid.domain_models.OrcidConverter.get_xml",
                 return_value=invalid_xml,
         ):
             pusher.push()
def parseXMLTree(inpt, fromstring=False):
    try:
        from lxml import etree
    except ImportError:
        import xml.etree.ElementTree as etree

    if not fromstring:
        doc = etree.parse(inpt)
        root = doc.getroot()
    else:
        root = etree.fromstringlist(inpt)

    return root
Ejemplo n.º 23
0
def maintainer(args):
    if args.group is None:
        # Default is appended to rather than overridden (upstream bug).
        args.group = ['factory-maintainers', 'factory-staging']
    desired = set(args.group)

    apiurl = osc.conf.config['apiurl']
    devel_projects = devel_projects_load(args)
    for devel_project in devel_projects:
        meta = ET.fromstringlist(show_project_meta(apiurl, devel_project))
        groups = meta.xpath('group[@role="maintainer"]/@groupid')
        intersection = set(groups).intersection(desired)
        if len(intersection) != len(desired):
            print('{} missing {}'.format(devel_project, ', '.join(desired - intersection)))
Ejemplo n.º 24
0
def repository_path_search(apiurl, project, search_project, search_repository):
    queue = []

    # Initialize breadth first search queue with repositories from top project.
    root = ETL.fromstringlist(show_project_meta(apiurl, project))
    for repository in root.xpath('repository[path[@project and @repository]]/@name'):
        queue.append((repository, project, repository))

    # Perform a breadth first search and return the first repository chain with
    # a series of path elements targeting search project and repository.
    for repository_top, project, repository in queue:
        if root.get('name') != project:
            # Repositories for a single project are in a row so cache parsing.
            root = ETL.fromstringlist(show_project_meta(apiurl, project))

        paths = root.findall('repository[@name="{}"]/path'.format(repository))
        for path in paths:
            if path.get('project') == search_project and path.get('repository') == search_repository:
                return repository_top

            queue.append((repository_top, path.get('project'), path.get('repository')))

    return None
Ejemplo n.º 25
0
    def GetCapabilities(self, options,flags):
        """!Get capabilities from WCS server and print to stdout

        """
        self._debug("GetCapabilities", "started")

        cap  = self._fetchCapabilities(options,flags)
        root = etree.fromstringlist(cap.readlines())
        cov_offering = []
        for label in root.iter('{*}CoverageOfferingBrief'):
            cov_offering.append(label.find('{*}name').text + " : " + label.find('{*}label').text)
        gscript.message("Available layers:")
        gscript.message('\n'.join(cov_offering))
        self._debug("GetCapabilities", "finished")
Ejemplo n.º 26
0
    def GetCapabilities(self, options,flags):
        """!Get capabilities from WCS server and print to stdout

        """
        self._debug("GetCapabilities", "started")

        cap = self._fetchCapabilities(options,flags)
        root = etree.fromstringlist(cap.readlines())
        cov_offering = []
        for label in root.iter('{*}CoverageOfferingBrief'):
            cov_offering.append(label.find('{*}name').text + " : " + label.find('{*}label').text)
        grass.message("Available layers:")
        grass.message('\n'.join(cov_offering))
        self._debug("GetCapabilities", "finished")
Ejemplo n.º 27
0
def fromstringlist(sequence: Sequence[_ParserInputType],
                   parser: Optional[XMLParser] = None) -> etree._Element:
    """Parse XML document from sequence of string fragments.

    :param sequence:
        A list or other sequence of strings containing XML data.
    :param parser:
        Optional parser instance, defaulting to
        :class:`lxml.etree.ETCompatXMLParser`.

    :return:
        An Element instance.
    """
    if parser is None:
        parser = ETCompatXMLParser()
    return etree.fromstringlist(sequence, parser)
Ejemplo n.º 28
0
def revision_index(api):
    if not hasattr(revision_index, 'index'):
        revision_index.index = {}

        project, package = project_pseudometa_package(api.apiurl, api.project)
        try:
            root = ET.fromstringlist(
                get_commitlog(api.apiurl, project, package, None, format='xml'))
        except HTTPError as e:
            return revision_index.index

        for logentry in root.findall('logentry'):
            date = date_parse(logentry.find('date').text)
            revision_index.index[date] = logentry.get('revision')

    return revision_index.index
Ejemplo n.º 29
0
def revision_index(api):
    if not hasattr(revision_index, 'index'):
        revision_index.index = {}

        project, package = project_pseudometa_package(api.apiurl, api.project)
        try:
            root = ET.fromstringlist(
                get_commitlog(api.apiurl, project, package, None, format='xml'))
        except HTTPError as e:
            return revision_index.index

        for logentry in root.findall('logentry'):
            date = date_parse(logentry.find('date').text)
            revision_index.index[date] = logentry.get('revision')

    return revision_index.index
Ejemplo n.º 30
0
    def project_only(self, project, post_comments=False):
        # self.staging_config needed by target_archs().
        api = self.staging_api(project)

        if not self.force and not self.repository_published(project):
            self.logger.info('{}/standard not published'.format(project))
            return

        build = ET.fromstringlist(
            show_results_meta(self.apiurl,
                              project,
                              multibuild=True,
                              repository=['standard'])).get('state')
        dashboard_content = api.dashboard_content_load('repo_checker')
        if not self.force and dashboard_content:
            build_previous = dashboard_content.splitlines()[0]
            if build == build_previous:
                self.logger.info('{} build unchanged'.format(project))
                return

        comment = [build]
        for arch in self.target_archs(project):
            directory_project = self.mirror(project, arch)

            parse = project if post_comments else False
            results = {
                'cycle':
                CheckResult(True, None),
                'install':
                self.install_check(project, [directory_project],
                                   arch,
                                   parse=parse),
            }

            if not all(result.success for _, result in results.items()):
                self.result_comment(arch, results, comment)

        text = '\n'.join(comment).strip()
        if not self.dryrun:
            api.dashboard_content_ensure('repo_checker', text + '\n',
                                         'project_only run')
            self.whitelist_clean(project)
        else:
            print(text)

        if post_comments:
            self.package_comments(project)
Ejemplo n.º 31
0
def parseXML(data, user_id):
    log11 = open('log11.txt', 'w')
    [xml, k] = data
    user_errors[user_id] = 2
    error = "OK"
    df = []
    df_c = []
    xmlschema_doc = etree.parse("apps/neww/static/tir4.xsd")
    xmlschema = etree.XMLSchema(xmlschema_doc)
    xml_doc = etree.parse(xml)
    try:
        xmlschema.assertValid(xml_doc)
        with open(xml, 'r') as file:
            root = etree.fromstringlist(
                file, parser=html.HTMLParser(encoding='utf-8'))
    except UnicodeDecodeError:
        error = "Неподходящий формат. Загрузите файл с росширением .xml"
        user_errors[user_id] = 1
    except etree.XMLSyntaxError:
        error = "Неподходящий формат. Загрузите файл с росширением .xml"
        user_errors[user_id] = 1
    except etree.DocumentInvalid:
        error = "Файл не прошел проверку. Свертесь с правилами состовления файла"
    if error == "OK":
        cols = ['ekp', 'h011', 'h015', 'k030', 'z220', 't100']
        df = pd.DataFrame(columns=cols)
        a = {}
        [a.update({i: 0}) for i in cols]
        result = find_data(root, 0)
        for data in result[0]:
            if data.tag.lower() != "data": continue
            row = {}
            for i in data:
                if i.tag.lower() == 't100':
                    row['t100'] = float(row['t100']) * k
                else:
                    row[i.tag.lower()] = i.text
            df.loc[df.shape[0]] = row
    quarter, year = pd.to_datetime(
        date, dayfirst=True).quarter, pd.to_datetime(date).year
    if error == "OK":
        df_c = df.copy()
        for i1 in range(df.shape[0]):
            df_c.iloc[i1] = a
    result = [error, df, df_c, quarter, year]
    log11.close()
    return result
Ejemplo n.º 32
0
def package_source_hash_history(apiurl,
                                project,
                                package,
                                limit=5,
                                include_project_link=False):
    try:
        # get_commitlog() reverses the order so newest revisions are first.
        root = ETL.fromstringlist(
            get_commitlog(apiurl, project, package, None, format='xml'))
    except HTTPError as e:
        if e.code == 404:
            return

        raise e

    if include_project_link:
        source_hashes = []

    source_md5s = root.xpath('logentry/@srcmd5')
    for source_md5 in source_md5s[:limit]:
        source_hash = package_source_hash(apiurl, project, package, source_md5)
        yield source_hash

        if include_project_link:
            source_hashes.append(source_hash)

    if include_project_link and (not limit or len(source_md5s) < limit):
        link = entity_source_link(apiurl, project)
        if link is None:
            return
        project = link.get('project')

        if limit:
            limit_remaining = limit - len(source_md5s)

        # Allow small margin for duplicates.
        for source_hash in package_source_hash_history(apiurl, project,
                                                       package, None, True):
            if source_hash in source_hashes:
                continue

            yield source_hash

            if limit:
                limit_remaining += -1
                if limit_remaining == 0:
                    break
Ejemplo n.º 33
0
def repository_published(apiurl, project, repository, archs=[]):
    # In a perfect world this would check for the existence of imports from i586
    # into x86_64, but in an even more perfect world OBS would show archs that
    # depend on another arch for imports as not completed until the dependent
    # arch completes. This is a simplified check that ensures x86_64 repos are
    # not indicated as published when i586 has not finished which is primarily
    # useful for repo_checker when only checking x86_64. The API treats archs as
    # a filter on what to return and thus non-existent archs do not cause an
    # issue nor alter the result.
    if 'x86_64' in archs and 'i586' not in archs:
        # Create a copy to avoid altering caller's list.
        archs = list(archs)
        archs.append('i586')

    root = ETL.fromstringlist(show_results_meta(
        apiurl, project, multibuild=True, repository=[repository], arch=archs))
    return not len(root.xpath('result[@state!="published" and @state!="unpublished"]'))
Ejemplo n.º 34
0
def repository_published(apiurl, project, repository, archs=[]):
    # In a perfect world this would check for the existence of imports from i586
    # into x86_64, but in an even more perfect world OBS would show archs that
    # depend on another arch for imports as not completed until the dependent
    # arch completes. This is a simplified check that ensures x86_64 repos are
    # not indicated as published when i586 has not finished which is primarily
    # useful for repo_checker when only checking x86_64. The API treats archs as
    # a filter on what to return and thus non-existent archs do not cause an
    # issue nor alter the result.
    if 'x86_64' in archs and 'i586' not in archs:
        # Create a copy to avoid altering caller's list.
        archs = list(archs)
        archs.append('i586')

    root = ETL.fromstringlist(show_results_meta(
        apiurl, project, multibuild=True, repository=[repository], arch=archs))
    return not len(root.xpath('result[@state!="published" and @state!="unpublished"]'))
Ejemplo n.º 35
0
    def source_has_correct_maintainers(self, source_project):
        """Checks whether the source project has the required maintainer

        If a 'required-source-maintainer' is set, it checks whether it is a
        maintainer for the source project. Inherited maintainership is
        intentionally ignored to have explicit maintainer set.

        source_project - source project name
        """
        self.logger.info(
            'Checking required maintainer from the source project (%s)' % self.required_maintainer
        )
        if not self.required_maintainer: return True

        meta = ETL.fromstringlist(show_project_meta(self.apiurl, source_project))
        maintainers = meta.xpath('//person[@role="maintainer"]/@userid')
        maintainers += ['group:' + g for g in meta.xpath('//group[@role="maintainer"]/@groupid')]

        return self.required_maintainer in maintainers
Ejemplo n.º 36
0
    def insert_entries(self, entries_xml, taxids):
        """insert UniProt entries from XML"""

        # to avoid memory leak reload of etree is necessary
        if 'etree' in sys.modules:
            importlib.reload(etree)

        parser = etree.XMLParser(collect_ids=False)
        entries = etree.fromstringlist(entries_xml, parser)

        for entry in entries:
            self.insert_entry(entry, taxids)
            entry.clear()
            del entry

        etree.clear_error_log()
        del entries

        self.session.commit()
Ejemplo n.º 37
0
def package_source_hash_history(apiurl, project, package, limit=5, include_project_link=False):
    try:
        # get_commitlog() reverses the order so newest revisions are first.
        root = ETL.fromstringlist(
            get_commitlog(apiurl, project, package, None, format='xml'))
    except HTTPError as e:
        if e.code == 404:
            return

        raise e

    if include_project_link:
        source_hashes = []

    source_md5s = root.xpath('logentry/@srcmd5')
    for source_md5 in source_md5s[:limit]:
        source_hash = package_source_hash(apiurl, project, package, source_md5)
        yield source_hash

        if include_project_link:
            source_hashes.append(source_hash)

    if include_project_link and (not limit or len(source_md5s) < limit):
        link = entity_source_link(apiurl, project)
        if link is None:
            return
        project = link.get('project')

        if limit:
            limit_remaining = limit - len(source_md5s)

        # Allow small margin for duplicates.
        for source_hash in package_source_hash_history(apiurl, project, package, None, True):
            if source_hash in source_hashes:
                continue

            yield source_hash

            if limit:
                limit_remaining += -1
                if limit_remaining == 0:
                    break
Ejemplo n.º 38
0
def treat(xml, url, name):
    if DO_BENCHMARK_ONLY:
        return False
    path = tools.get_current_target_folder()+url+name+".xml"
    xml.find("time").text = "nothing"
    years = xml.find("*/years")
    
    if years is not None:
        years.getparent().remove(years)
        
    if DO_SAVE:
        with open(path, "w") as f:
            f.write(etree.tostring(xml))
    else:
        with open(path, "r") as f:
            xml_ref = etree.fromstringlist(f.readlines())
        
        if etree.tostring(xml_ref) != etree.tostring(xml):
            raise Difference(url, name, xml_ref, xml)
    return False
Ejemplo n.º 39
0
def build_sitemaps():
    sitemap_parts = [
        u'<sitemapindex xmlns="http://www.sitemaps.org/'
        u'schemas/sitemap/0.9">'
    ]
    now = datetime.utcnow()
    timestamp = '%s+00:00' % now.replace(microsecond=0).isoformat()
    index_path = os.path.join(settings.MEDIA_ROOT, 'sitemap.xml')

    for locale in settings.MDN_LANGUAGES:
        queryset = (Document.objects.filter(
            is_template=False, locale=locale, is_redirect=False).exclude(
                title__startswith='User:'******'Talk:'))
        if queryset.count() > 0:
            info = {'queryset': queryset, 'date_field': 'modified'}

            directory = os.path.join(settings.MEDIA_ROOT, 'sitemaps', locale)
            if not os.path.exists(directory):
                os.makedirs(directory)

            with open(os.path.join(directory, 'sitemap.xml'), 'w') as f:
                f.write(
                    smart_str(
                        render_to_string(
                            'wiki/sitemap.xml',
                            {'urls': WikiSitemap(info).get_urls(page=1)})))

            del info  # Force the gc to cleanup

            sitemap_url = absolutify('/sitemaps/%s/sitemap.xml' % locale)
            sitemap_parts.append(SITEMAP_ELEMENT % (sitemap_url, timestamp))

        del queryset  # Force the gc to cleanup

    sitemap_parts.append(u'</sitemapindex>')

    sitemap_tree = etree.fromstringlist(sitemap_parts)
    with open(index_path, 'w') as index_file:
        sitemap_tree.getroottree().write(index_file,
                                         encoding='utf-8',
                                         pretty_print=True)
Ejemplo n.º 40
0
def maintainers_get(apiurl, project, package=None):
    if package:
        try:
            meta = show_package_meta(apiurl, project, package)
        except HTTPError as e:
            if e.code == 404:
                # Fallback to project in the case of new package.
                meta = show_project_meta(apiurl, project)
    else:
        meta = show_project_meta(apiurl, project)
    meta = ET.fromstringlist(meta)

    userids = []
    for person in meta.findall('person[@role="maintainer"]'):
        userids.append(person.get('userid'))

    if len(userids) == 0 and package is not None:
        # Fallback to project if package has no maintainers.
        return maintainers_get(apiurl, project)

    return userids
Ejemplo n.º 41
0
def build_index_sitemap(results):
    """
    A chord callback task that writes a sitemap index file for the
    given results of :func:`~kuma.wiki.tasks.build_locale_sitemap` task.
    """
    sitemap_parts = [SITEMAP_START]

    for result in results:
        # result can be empty if no documents were found
        if result is not None:
            locale, names, timestamp = result
            for name in names:
                sitemap_url = absolutify("/sitemaps/%s/%s" % (locale, name))
                sitemap_parts.append(SITEMAP_ELEMENT % (sitemap_url, timestamp))

    sitemap_parts.append(SITEMAP_END)

    index_path = os.path.join(settings.MEDIA_ROOT, "sitemap.xml")
    sitemap_tree = etree.fromstringlist(sitemap_parts)
    with open(index_path, "w") as index_file:
        sitemap_tree.getroottree().write(index_file, encoding="utf-8", pretty_print=True)
    def apply(self, splitter):
        super(StrategyQuick, self).apply(splitter)

        # Leaper accepted which means any extra reviews have been added.
        splitter.filter_add('./review[@by_user="******" and @state="accepted"]')

        # No @by_project reviews that are not accepted. If not first round stage
        # this should also ignore previous staging project reviews or already
        # accepted human reviews.
        splitter.filter_add('not(./review[@by_project and @state!="accepted"])')

        # Only allow reviews by whitelisted groups and users as all others will
        # be considered non-quick (like @by_group="legal-auto"). The allowed
        # groups are only those configured as reviewers on the target project.
        meta = ET.fromstringlist(show_project_meta(splitter.api.apiurl, splitter.api.project))
        allowed_groups = meta.xpath('group[@role="reviewer"]/@groupid')
        allowed_users = []
        if 'repo-checker' in splitter.config:
            allowed_users.append(splitter.config['repo-checker'])

        self.filter_review_whitelist(splitter, 'by_group', allowed_groups)
        self.filter_review_whitelist(splitter, 'by_user', allowed_users)
Ejemplo n.º 43
0
def TransProcess(process, file):
    """Get process describe of wps serive and transform to galaxy tools configure file"""
    
    request = "Service=WPS&Version=1.0.0&request=describeprocess&identifier={0}".format(process)
    wpsreq = pywps.Pywps(pywps.METHOD_GET)
    wpsreq.parseRequest(request)
    wpsreq.performRequest()
    
    if wpsreq.response.find("ExceptionReport") == -1:
        "transform"
        xslttree = etree.parse(os.path.join(workdir,'wpstogalaxy.xsl')).getroot()
        xmltree = etree.fromstringlist(wpsreq.response)
        trans = etree.XSLT(xslttree)
        resxml = trans(xmltree)
        "write result"
        f = open(file,'w')
        f.write(str(resxml))
        f.close()
        return
    else:
        "error of service"
        return wpsreq.response
Ejemplo n.º 44
0
Archivo: tasks.py Proyecto: Osmose/kuma
def build_sitemaps():
    sitemap_parts = [u'<sitemapindex xmlns="http://www.sitemaps.org/'
                     u'schemas/sitemap/0.9">']
    now = datetime.utcnow()
    timestamp = '%s+00:00' % now.replace(microsecond=0).isoformat()
    index_path = os.path.join(settings.MEDIA_ROOT, 'sitemap.xml')

    for locale in settings.MDN_LANGUAGES:
        queryset = (Document.objects
                            .filter(is_template=False,
                                    locale=locale,
                                    is_redirect=False)
                            .exclude(title__startswith='User:'******'Talk:'))
        if queryset.count() > 0:
            info = {'queryset': queryset, 'date_field': 'modified'}

            directory = os.path.join(settings.MEDIA_ROOT, 'sitemaps', locale)
            if not os.path.exists(directory):
                os.makedirs(directory)

            with open(os.path.join(directory, 'sitemap.xml'), 'w') as f:
                f.write(smart_str(render_to_string('wiki/sitemap.xml',
                                  {'urls': WikiSitemap(info).get_urls(page=1)})))

            del info  # Force the gc to cleanup

            sitemap_url = absolutify('/sitemaps/%s/sitemap.xml' % locale)
            sitemap_parts.append(SITEMAP_ELEMENT % (sitemap_url, timestamp))

        del queryset  # Force the gc to cleanup

    sitemap_parts.append(u'</sitemapindex>')

    sitemap_tree = etree.fromstringlist(sitemap_parts)
    with open(index_path, 'w') as index_file:
        sitemap_tree.getroottree().write(index_file,
                                         encoding='utf-8',
                                         pretty_print=True)
Ejemplo n.º 45
0
 def test_from_element(self):
     self.assertEquals(
         type(String.from_element(etree.fromstringlist("<a>b</a>"))),
         type(String)
     )
Ejemplo n.º 46
0
    def update_and_solve_target(self, api, target_project, target_config, main_repo,
                                project, scope, force, no_checkout,
                                only_release_packages, stop_after_solve):
        self.all_architectures = target_config.get('pkglistgen-archs').split(' ')
        self.repos = self.expand_repos(project, main_repo)
        print('[{}] {}/{}: update and solve'.format(scope, project, main_repo))

        group = target_config.get('pkglistgen-group', '000package-groups')
        product = target_config.get('pkglistgen-product', '000product')
        release = target_config.get('pkglistgen-release', '000release-packages')
        oldrepos = target_config.get('pkglistgen-repos', '000update-repos')

        url = api.makeurl(['source', project])
        packages = ET.parse(http_GET(url)).getroot()
        if packages.find('entry[@name="{}"]'.format(product)) is None:
            if not self.dry_run:
                undelete_package(api.apiurl, project, product, 'revive')
            # TODO disable build.
            print('{} undeleted, skip dvd until next cycle'.format(product))
            return
        elif not force:
            root = ET.fromstringlist(show_results_meta(api.apiurl, project, product,
                                                       repository=[main_repo], multibuild=True))
            if len(root.xpath('result[@state="building"]')) or len(root.xpath('result[@state="dirty"]')):
                print('{}/{} build in progress'.format(project, product))
                return

        drop_list = api.item_exists(project, oldrepos)
        checkout_list = [group, product, release]
        if drop_list and not only_release_packages:
            checkout_list.append(oldrepos)

        if packages.find('entry[@name="{}"]'.format(release)) is None:
            if not self.dry_run:
                undelete_package(api.apiurl, project, release, 'revive')
            print('{} undeleted, skip dvd until next cycle'.format(release))
            return

        # Cache dir specific to hostname and project.
        host = urlparse(api.apiurl).hostname
        cache_dir = CacheManager.directory('pkglistgen', host, project)

        if not no_checkout:
            if os.path.exists(cache_dir):
                shutil.rmtree(cache_dir)
            os.makedirs(cache_dir)

        group_dir = os.path.join(cache_dir, group)
        product_dir = os.path.join(cache_dir, product)
        release_dir = os.path.join(cache_dir, release)
        oldrepos_dir = os.path.join(cache_dir, oldrepos)

        for package in checkout_list:
            if no_checkout:
                print('Skipping checkout of {}/{}'.format(project, package))
                continue
            checkout_package(api.apiurl, project, package, expand_link=True, prj_dir=cache_dir)

        file_utils.unlink_all_except(release_dir, ['weakremovers.inc'])
        if not only_release_packages:
            file_utils.unlink_all_except(product_dir)
        file_utils.copy_directory_contents(group_dir, product_dir,
                                     ['supportstatus.txt', 'groups.yml',
                                      'reference-unsorted.yml', 'reference-summary.yml',
                                      'package-groups.changes'])
        file_utils.change_extension(product_dir, '.spec.in', '.spec')
        file_utils.change_extension(product_dir, '.product.in', '.product')

        self.input_dir = group_dir
        self.output_dir = product_dir

        print('-> do_update')
        # make sure we only calculcate existant architectures
        self.filter_architectures(target_archs(api.apiurl, project, main_repo))
        self.update_repos(self.filtered_architectures)

        if only_release_packages:
            self.load_all_groups()
            self.write_group_stubs()
        else:
            summary = self.solve_project(ignore_unresolvable=str2bool(target_config.get('pkglistgen-ignore-unresolvable')),
                                         ignore_recommended=str2bool(target_config.get('pkglistgen-ignore-recommended')),
                                         locale = target_config.get('pkglistgen-local'),
                                         locales_from = target_config.get('pkglistgen-locales-from'))

        if stop_after_solve:
            return

        if drop_list:
            weakremovers_file = os.path.join(release_dir, 'weakremovers.inc')
            self.create_weakremovers(project, target_config, oldrepos_dir, output=open(weakremovers_file, 'w'))

        delete_products = target_config.get('pkglistgen-delete-products', '').split(' ')
        file_utils.unlink_list(product_dir, delete_products)

        print('-> product service')
        product_version = attribute_value_load(api.apiurl, project, 'ProductVersion')
        if not product_version:
            # for stagings the product version doesn't matter (I hope)
            product_version = '1'
        for product_file in glob.glob(os.path.join(product_dir, '*.product')):
            self.replace_product_version(product_file, product_version)
            print(subprocess.check_output(
                [PRODUCT_SERVICE, product_file, product_dir, project], encoding='utf-8'))

        for delete_kiwi in target_config.get('pkglistgen-delete-kiwis-{}'.format(scope), '').split(' '):
            delete_kiwis = glob.glob(os.path.join(product_dir, delete_kiwi))
            file_utils.unlink_list(product_dir, delete_kiwis)
        if scope == 'staging':
            self.strip_medium_from_staging(product_dir)

        spec_files = glob.glob(os.path.join(product_dir, '*.spec'))
        file_utils.move_list(spec_files, release_dir)
        inc_files = glob.glob(os.path.join(group_dir, '*.inc'))
        # filter special inc file
        inc_files = filter(lambda file: file.endswith('weakremovers.inc'), inc_files)
        file_utils.move_list(inc_files, release_dir)

        # do not overwrite weakremovers.inc if it exists
        # we will commit there afterwards if needed
        if not os.path.exists(os.path.join(release_dir, 'weakremovers.inc')):
            file_utils.move_list([os.path.join(group_dir, 'weakremovers.inc')], release_dir)

        file_utils.multibuild_from_glob(release_dir, '*.spec')
        self.build_stub(release_dir, 'spec')
        self.commit_package(release_dir)

        if only_release_packages:
            return

        file_utils.multibuild_from_glob(product_dir, '*.kiwi')
        self.build_stub(product_dir, 'kiwi')
        self.commit_package(product_dir)

        error_output = ''
        reference_summary = os.path.join(group_dir, 'reference-summary.yml')
        if os.path.isfile(reference_summary):
            summary_file = os.path.join(product_dir, 'summary.yml')
            with open(summary_file, 'w') as f:
                f.write('# Summary of packages in groups')
                for group in sorted(summary):
                    # the unsorted group should appear filtered by
                    # unneeded.yml - so we need the content of unsorted.yml
                    # not unsorted.group (this grew a little unnaturally)
                    if group == 'unsorted':
                        continue
                    f.write('\n' + group + ':\n')
                    for package in sorted(summary[group]):
                        f.write('  - ' + package + '\n')

            try:
                error_output += subprocess.check_output(['diff', '-u', reference_summary, summary_file])
            except subprocess.CalledProcessError as e:
                error_output += e.output
            reference_unsorted = os.path.join(group_dir, 'reference-unsorted.yml')
            unsorted_file = os.path.join(product_dir, 'unsorted.yml')
            try:
                error_output += subprocess.check_output(['diff', '-u', reference_unsorted, unsorted_file])
            except subprocess.CalledProcessError as e:
                error_output += e.output

        if len(error_output) > 0:
            self.logger.error('Difference in yml:\n' + error_output)
            return True
Ejemplo n.º 47
0
 def tree_loader(self, url, xpath):
     site_map_lines = urlopen(url).readlines()
     tree = etree.fromstringlist(site_map_lines[1:])
     res = tree.xpath(xpath, namespaces=self.sitemap_name_space)
     return res
Ejemplo n.º 48
0
args = parser.parse_args()

# used for calculating the box size for multiline labels in "autosized" boxes
font_map = {
    #'Arial': './fonts/arial.ttf',
    #'Arial Bold': './fonts/arialbd.ttf',
}
image = Image.new("RGB", (1000, 1000))
draw = ImageDraw.Draw(image)
font_warnings = {}

G = nx.DiGraph()
xml = args.input.read()
xml_lines = [l for l in xml.split('\n') if not l.startswith('<!--')]
ns_pat = re.compile('{.*}')
for tag in etree.fromstringlist(xml_lines).findall('.//child'):
    attributes = {}
    for k, v in tag.items():
        if k.startswith('{'):
            k = ns_pat.sub('', k)
        if k in ['width', 'height', 'x', 'y']:
            v = float(v)
        attributes[k] = v
        for child in tag:
            if child.text is not None:
                if child.tag == 'font':
                    font, font_type, font_size = child.text.split('-')
                    if font_type != 'plain':
                        font = '%s %s' % (font, font_type.capitalize())
                    attributes['font'] = font
                    attributes['fontSize'] = int(font_size)
Ejemplo n.º 49
0
Archivo: tools.py Proyecto: dag/stutuz
def assert_xml(response, xpath):
    """An XPath query on a response returns a True-like result."""
    doc = etree.fromstringlist(response.obj.data)
    assert_(doc.xpath(xpath),
        "XPath {0!r} didn't match:\n{1}".format(xpath, response.obj.data))
Ejemplo n.º 50
0
import json
import os

one_second = datetime.timedelta(seconds=1)

apikey = "FIX ME"
username = "******"

baseurl = "http://ws.audioscrobbler.com/2.0/?api_key=%s&user=%s" % (apikey, username)

def getpage(url):
   return urllib2.urlopen(urllib2.Request(url, headers={"User-Agent":"last.fm-histogrammer"})).readlines()

chart_periods_xml = getpage(baseurl + "&method=user.getWeeklyChartList")
chart_periods = []
t = etree.fromstringlist(chart_periods_xml)
for chart in t.xpath("//chart"):
   chart_periods.append((chart.attrib["from"], chart.attrib["to"]))

start_time = datetime.datetime.now()
end_time = datetime.datetime.now()
album_track_plays = {}
print "%s Starting chart fetching" % str(datetime.datetime.now())
for chart in chart_periods:
   cache_file = "chartcache/%s_%s_%s" % (username, chart[0], chart[1])
   end_time = datetime.datetime.now()
   td = end_time - start_time
   td_secs = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
   if abs(end_time - start_time) < one_second:
      time.sleep(1 - td_secs)
   if os.path.exists(cache_file):
Ejemplo n.º 51
0
def target_archs(apiurl, project, repository='standard'):
    meta = ETL.fromstringlist(show_project_meta(apiurl, project))
    return meta.xpath('repository[@name="{}"]/arch/text()'.format(repository))