コード例 #1
0
 def derive_all_use_libs(self, build_type, lib_name, arg_exclude_libs=[]):
     """Derive a list of all HDL libraries that the specified HDL lib_name library depends on.
     
        The build_type can be:
         'sim'   uses both the 'hdl_lib_uses_synth' and the 'hdl_lib_uses_sim' key in the lib_dict,
         'synth' uses only the 'hdl_lib_uses_synth' key in the lib_dict
         
        Note:
        . Only the generic HDL libraries and the technology specific libraries that match self.technologyNames are used.
        . The hdl_lib_uses_<build type> key only needs to contain all libraries that are declared at the VHDL LIBRARY
          clauses of the source files in this library. This derive_all_use_libs() will recursively find all deeper
          level libraries as well.
        . Pass on exclude_libs through the recursion hierarchy to ensure that the excluded library and all libraries
          that are used by it and not by any other library are excluded as well.
     """
     # use list() to take local copy, to avoid next that default empty list argument arg_exclude_libs=[] gets disturbed
     exclude_libs = list(arg_exclude_libs)
     if lib_name in self.lib_names:
         all_use_libs = [lib_name]
         lib_dict = self.libs.dicts[self.lib_names.index(lib_name)]
         # use recursion to include all used libs
         use_libs, exclude_libs = self.get_used_libs(build_type, lib_dict, exclude_libs)
         try:
             use_libs
             for use_lib in use_libs:
                 if use_lib in self.lib_names:
                     all_use_libs.append(use_lib)
                     all_use_libs += self.derive_all_use_libs(build_type, use_lib, exclude_libs)
         except NameError:
             pass
         # remove all duplicates from the list
         return cm.unique(all_use_libs)
     else:
         sys.exit('Error : Unknown HDL library name')
コード例 #2
0
def __get_file_version__(local_repo, kernel_meta_pkg):
    """Get kernel version in included kernel archive"""
    if not os.path.exists(local_repo):
        try:
            tar_file = tar.open("/usr/share/system-installer/kernel.tar.xz")
        except FileNotFoundError:
            tar_file = tar.open("/usr/share/system-installer/kernel.tar.7z")
        files = tar_file.getnames()
        tar_file.close()
    else:
        files = os.listdir(local_repo)
    for each in range(len(files) - 1, -1, -1):
        if files[each] in ("kernel", "kernel/linux-meta"):
            del files[each]
            continue
        else:
            files[each] = files[each].split("/")[-1]
            files[each] = files[each].split("_")
            if files[each][0] == kernel_meta_pkg:
                del files[each][0]
            if files[each][-1] == "amd64.deb":
                del files[each][-1]
            files[each] = files[each][0]
    files = [each for each in files if "linux" in each]
    version = common.unique(files)[0]
    if version[:6] == "linux-":
        version = version[6:]
    if version[-2:] == "-0":
        version = version[:-2]
    if version[:8] == "headers-":
        version = version[8:]
    if version[:6] == "image-":
        version = version[6:]
    return version
コード例 #3
0
def __get_file_version__():
    """Get kernel version in included kernel archive"""
    try:
        tar_file = tar.open(
            "/usr/share/system-installer/modules/kernel.tar.xz")
    except FileNotFoundError:
        tar_file = tar.open(
            "/usr/share/system-installer/modules/kernel.tar.7z")
    files = tar_file.getnames()
    tar_file.close()
    for each in range(len(files) - 1, -1, -1):
        if files[each] in ("kernel", "kernel/linux-meta"):
            del files[each]
            continue
        else:
            files[each] = files[each].split("/")[-1]
            files[each] = files[each].split("_")
            if files[each][0] == "linux-xanmod":
                del files[each][0]
            if files[each][-1] == "amd64.deb":
                del files[each][-1]
            files[each] = files[each][0]
    version = common.unique(files)[0]
    if version[:6] == "linux-":
        version = version[6:]
    if version[-2:] == "-0":
        version = version[:-2]
    return version
コード例 #4
0
ファイル: statements.py プロジェクト: blippy/pypms
def create_job_statement(job, all_tasks, exps, times):

    title = "Work Statement: {0}".format(period.mmmmyyyy())
    out = rtf.Rtf()
    out.addTitle(title)
    AddTopInfo(out, job)

    tasks = map(lambda o: getattr(o, "task"), times + exps)
    tasks = common.unique(tasks)
    tasks.sort()
    if tasks[0] == "" and len(tasks) > 1:
        tasks = tasks[1:] + [tasks[0]]  # rotate unassigned to end

    # distribute invoice items into sections
    job_code = job["job"]
    sections = []  # a list of Section classes
    totalWork = 0.0
    totalExpenses = 0.0
    for task_key in tasks:

        # work out heading
        if len(task_key) == 0:
            heading = "Expenses not categorised to a specific task"
        else:
            desc = all_tasks[(job_code, task_key)]["TaskDes"]
            heading = "Task {0}: {1}".format(task_key, desc)
        out.add(heading)

        amount_work, num_times = ProcessSubsection(out, times, task_key, "Work subtotal")
        totalWork += amount_work

        amount_expenses, num_expenses = ProcessSubsection(out, exps, task_key, "Expenses subtotal")
        totalExpenses += amount_expenses

        if num_times > 0 and num_expenses > 0:
            subtotal(out, "Task subtotal", amount_work + amount_expenses)
            out.para()

    # output grand summary
    out.add("Overall summary")
    subtotal(out, "Work total", totalWork)
    subtotal(out, "Expenses total", totalExpenses)
    net = totalWork + totalExpenses
    subtotal(out, "Net total", net)

    out.annotation(job)
    outdir = period.perioddir() + "\\statements"
    outfile = job_code + ".rtf"
    out.save(outdir, outfile)

    # remember what we have produced for the invoice summaries
    if job["Weird"] or job["WIP"]:
        billed = 0.0
    else:
        billed = net

    invoice = {"work": totalWork, "expenses": totalExpenses, "net": billed}
    return invoice
コード例 #5
0
ファイル: nbc_tv.py プロジェクト: AbsMate/bluecop-xbmc-repo
    def addEpisodeList( self ):

        print " \n\n adding episodes \n\n"

        content=common.getHTML(common.args.url)

        # get list of pages of additional episodes, if we have any
        try:
            pagesSegment=re.search('div class="nbcu_pager".+?class="nbcu_pager_last">', content, re.DOTALL).group(0)
            pages=common.unique(re.compile('<a href="(.+?)"').findall(pagesSegment))
            print pages
        except:
            pages=None

        # get episode list per page
        episodeListSegment=re.search('<div class="scet-gallery-content">.+?</div><!-- item list -->', content, re.DOTALL).group(0)

        # title, thumbnail, watchURL, episode, plot
        episodeInfo=re.compile('<li class="list_full_detail_horiz" >.+?href="(.+?)".+?title="(.+?)"><img src="(.+?)".+?<strong>.+?Ep\. (\d+):.+?</div>.+?</li>', re.DOTALL).findall(episodeListSegment, re.DOTALL)
        print episodeInfo

        # season number
        season=re.compile('<h2>Full Episodes.+?(\d+)</span>').findall(episodeListSegment)[0]
        print season

        # add first page worth of episodes
        for watchURL, title, thumbnail, episode in episodeInfo:
            plot = ''
            # build s0xe0y season/episode header if wanted; includes trailing space!
            seasonEpisodeHeader=('', "s%02de%03d " % (int(season), int(episode)))[common.settings['show_epi_labels']]
            # see if we want plots
            plot=('', common.cleanNames(plot))[common.settings['get_episode_plot']]
            common.addDirectory(common.cleanNames(seasonEpisodeHeader + title), watchURL, 'TV_play_nbc', thumbnail, thumbnail, common.args.fanart, plot, 'genre')

        # now loop through rest of episode pages, if any; skip the first page

        # TODO: see if we can consolidate the code from episodeListSegment down,
        # as it duplicates the first page stuff above

        if pages:
            for page in pages[1:]:
                content=common.getHTML(common.NBC_BASE_URL + page)

                # get episode list per page
                episodeListSegment=re.search('<div class="scet-gallery-content">.+?</div><!-- item list -->', content, re.DOTALL).group(0)

                # title, thumbnail, watchURL, episode, plot
                episodeInfo=re.compile('<li class="list_full_detail_horiz" >.+?href="(.+?)".+?title="(.+?)"><img src="(.+?)".+?<strong>.+?Ep\. (\d+):.+?</div>.+?</li>', re.DOTALL).findall(episodeListSegment, re.DOTALL)

                # add each add'l page worth of episodes
                for watchURL, title, thumbnail, episode in episodeInfo:
                    plot = ''
                    # build s0xe0y season/episode header if wanted; includes trailing space!
                    seasonEpisodeHeader=('', "s%02de%03d " % (int(season), int(episode)))[common.settings['show_epi_labels']]
                    # see if we want plots
                    plot=('', common.cleanNames(plot))[common.settings['get_episode_plot']]
                    common.addDirectory(common.cleanNames(seasonEpisodeHeader + title), watchURL, 'TV_play_nbc', thumbnail, thumbnail, common.args.fanart, plot, 'genre')
コード例 #6
0
 def rankXpaths(self, xpaths):
     """Xpaths are ranked by most common, length, then alphabetical"""
     def cmp(xpath1, xpath2):
         if xpaths.count(xpath1) != xpaths.count(xpath2):
             return xpaths.count(xpath2) - xpaths.count(xpath1)
         if len(str(xpath1)) != len(str(xpath2)):
             return len(str(xpath2)) - len(str(xpath1))
         else:
             return -1 if str(xpath1) < str(xpath2) else 1
     return unique(sorted(xpaths, cmp=cmp))#[0]
コード例 #7
0
    def addEpisodeList( self ):

        print " \n\n adding episodes \n\n"
        
        content=common.getHTML(common.args.url)
        
        # get list of pages of additional episodes, if we have any
        try:
            pagesSegment=re.search('div class="nbcu_pager".+?class="nbcu_pager_last">', content, re.DOTALL).group(0)
            pages=common.unique(re.compile('<a href="(.+?)"').findall(pagesSegment))
        except:
            pages=None

        # get episode list per page
        episodeListSegment=re.search('<div id="browse_container">.+?</div><!-- #browse_container -->', content, re.DOTALL).group(0)
        
        # title, thumbnail, watchURL, episode, plot
        episodeInfo=re.compile('<li class="list_full_detail">.+?title="(.+?)"><img src="(.+?)".+?<a href="(.+?)".+?<strong>Ep\. (\d+):.+?<p class="list_full_des"><em>(.+?)</em>', re.DOTALL).findall(episodeListSegment, re.DOTALL)
        
        # season number
        season=re.compile('<h2>Full Episodes.+?(\d+)</span>').findall(episodeListSegment)[0]
        
        # add first page worth of episodes
        for title, thumbnail, watchURL, episode, plot in episodeInfo:
            # build s0xe0y season/episode header if wanted; includes trailing space!
            seasonEpisodeHeader=('', "s%02de%03d " % (int(season), int(episode)))[common.settings['show_epi_labels']]
            # see if we want plots
            plot=('', common.cleanNames(plot))[common.settings['get_episode_plot']]
            common.addDirectory(common.cleanNames(seasonEpisodeHeader + title), watchURL, 'TV_play_nbc', thumbnail, thumbnail, common.args.fanart, plot, 'genre')

        # now loop through rest of episode pages, if any; skip the first page
        
        # TODO: see if we can consolidate the code from episodeListSegment down,
        # as it duplicates the first page stuff above
        
        if pages:
            for page in pages[1:]:
                content=common.getHTML(common.NBC_BASE_URL + page)
                
                # get episode list per page
                episodeListSegment=re.search('<div id="browse_container">.+?</div><!-- #browse_container -->', content, re.DOTALL).group(0)
                
                # title, thumbnail, watchURL, episode, plot
                episodeInfo=re.compile('<li class="list_full_detail">.+?title="(.+?)"><img src="(.+?)".+?<a href="(.+?)".+?<strong>Ep\. (\d+):.+?<p class="list_full_des"><em>(.+?)</em>', re.DOTALL).findall(episodeListSegment, re.DOTALL)
                
                # add each add'l page worth of episodes
                for title, thumbnail, watchURL, episode, plot in episodeInfo:
                    # build s0xe0y season/episode header if wanted; includes trailing space!
                    seasonEpisodeHeader=('', "s%02de%03d " % (int(season), int(episode)))[common.settings['show_epi_labels']]
                    # see if we want plots
                    plot=('', common.cleanNames(plot))[common.settings['get_episode_plot']]
                    common.addDirectory(common.cleanNames(seasonEpisodeHeader + title), watchURL, 'TV_play_nbc', thumbnail, thumbnail, common.args.fanart, plot, 'genre')
コード例 #8
0
ファイル: PersonTimeGridFrame.py プロジェクト: blippy/pypms
def aggregate_time_items(initials):
    titems = db.GetTimeitems()
    titems = filter(lambda(x) : x['Person'] == initials, titems)
    #print titems
    
    codings = [ (x['JobCode'], x['Task']) for x in titems]
    codings.sort()
    codings = common.unique(codings)
    
    result = []
    for coding in codings:
        time_vals = []
        for d in range(1, 32):
            total = 0
            for titem in titems:
                if titem['JobCode'] == coding[0] and titem['Task'] == coding[1] and titem['DateVal'].day == d:
                    total += titem['TimeVal']
            time_vals.append(total)
        result.append([coding[0], coding[1], time_vals])
    return result
コード例 #9
0
ファイル: scrape.py プロジェクト: richardpenman/minwrap
def find_columns(url, parsed_content, expected_output):
    """Receives the content of a web page and a dict of the expected output for each field.
    Returns a dict with a selector for each field if they can be found, else None.
    """
    if not expected_output:
        return
    columns = {}
    for field, values in expected_output.items():
        values = common.unique(values)
        paths = collections.defaultdict(list)
        for e, path in parsed_content.find(values):
            # found a selector value we are after in this response
            paths[e].append(path)

        if paths:
            common.logger.debug('AJAX results:')
            for e in paths:
                common.logger.debug('{} {}'.format(
                    e, [str(path) for path in paths[e]]))
        # XXX adjust this threshold for each website?
        if paths and len(paths) > len(values) / 2:
            # found enough matches
            common.logger.info(
                'Content matches expected output: {} {} ({} / {})'.format(
                    url, field, len(paths), len(values)))
            column = common_selector(paths.values())
            if column:
                common.logger.info('Found match for column: {} {}'.format(
                    field, column))
                columns[field] = column
            else:
                common.logger.debug(
                    'Failed to find match for column: {} {}'.format(
                        field, len(paths)))
                return
        else:
            common.logger.debug(
                'Content does not match expected output: {} {} ({} / {})'.
                format(url, field, len(paths), len(values)))
            return
    return columns
コード例 #10
0
def has_internet():
    """Check for internet, using mirrors and ping counts defined in
    the default `system-installer` config file."""
    # Read Mirrors file
    with open("/etc/system-installer/default.json", "r") as mirrors_file:
        mirrors = json.load(mirrors_file)

    ping_count = mirrors["ping count"]
    mirrors = mirrors["ping servers"]

    # get only the unique mirrors
    mirrors = common.unique(mirrors)
    # Get our ping times
    try:
        # Ping all listed servers, in case one or more is blocked
        for each in mirrors:
            ping(each, ping_count)

    except CalledProcessError:
        return False

    return True
コード例 #11
0
 def unique_texts(self, sources):
     return unique(
         chain(self.tp_by_source_and_text[sources].keys(),
               self.fp_by_source_and_text[sources].keys(),
               self.fn_by_source_and_text[sources].keys()))