Example #1
0
    def run(self, entity, discourse, syntax):
        if discourse == 'new':
            if len(self.dbpedia[entity]['givenNames']) > 0:
                givenNames = self.dbpedia[entity]['givenNames']
                first = filter(lambda x: len(x) == min(map(lambda x: len(x), givenNames)), givenNames)[0]

                surnames = self.dbpedia[entity]['surnames']
                last = filter(lambda x: len(x) == min(map(lambda x: len(x), surnames)), surnames)[0]

                name = str(first).strip() + ' ' + str(last).strip()
            else:
                birthNames = self.dbpedia[entity]['birthNames']
                name = str(filter(lambda x: len(x) == min(map(lambda x: len(x), birthNames)), birthNames)[0]).strip()
        else:
            if len(self.dbpedia[entity]['surnames']) > 0:
                surnames = self.dbpedia[entity]['surnames']
                last = filter(lambda x: len(x) == min(map(lambda x: len(x), surnames)), surnames)[0]

                name = str(last).strip()
            else:
                birthNames = self.dbpedia[entity]['birthNames']
                name = str(filter(lambda x: len(x) == min(map(lambda x: len(x), birthNames)), birthNames)[0]).strip().split()[-1]

        name = self.realize(name, syntax)
        return prep.get_label(name, self.dbpedia[entity]), name
Example #2
0
    def _EXPERIMENTAL_VERBAL_PREDICATE_FEATURE_Infinitive(self):
        xcomp_children = filter(lambda x:x.get_parent_relation() in clausal_complement, self.children)
        ret = ([],[])
        for xcomp_child in xcomp_children:
            aux_children = filter(lambda x:x.get_parent_relation() in aux_dependencies, xcomp_child.children)
            to_children = filter(lambda x:x.pos == TO, aux_children)
            if not to_children:
                return (False,False)
            assert (len(to_children)==1)
            to_child = to_children[0]
            subj_children = filter(lambda x:x.get_parent_relation() in subject_dependencies, xcomp_child.children)
            adv_children = filter(lambda x:x.get_parent_relation() in adverb_dependencies, self.children)
#           if subj_children:
#               print(" ".join([self.word,subj_children[0].word,to_child.word,xcomp_child.word]))
#           if adv_children:
#               print(" ".join([adv_children[0].word,self.word,to_child.word,xcomp_child.word]))
            #ids = [x.id for x in [xcomp_child,to_child]]
            words = " ".join([self.word,to_child.word,xcomp_child.word])
            ret[1].extend([self.id,to_child.id,xcomp_child.id])
            # chaining
            childRes = xcomp_child._VERBAL_PREDICATE_FEATURE_Infinitive()
            if childRes[0]:
                words += " "+" ".join(childRes[0][0].split(" ")[1:])


            ret[0].append(words)

        return ret
Example #3
0
def calculateSparseDictCOO(data_set, data_label_hash, jump=1, valid_flag=False):
	row = []
	col = []
	data = []
	row_valid = []
	col_valid = []
	data_valid = []

	doc_ids = set(sorted(map(lambda row:int(row[0]), data_set)))
	base_ids_list = filter(lambda ids: ids % jump == 0, doc_ids)
	train_ids = base_ids_list
	valid_ids = set()
	if valid_flag:
		valid_index = filter(lambda ids: ids % validation_perc == 0, range(len(base_ids_list)))
		valid_ids = [base_ids_list[i] for i in valid_index]
		base_ids = set(base_ids_list)
		train_ids = sorted(base_ids - set(valid_ids))

	labels = map(lambda trid: int(data_label_hash[trid]), train_ids)
	labels_valid = map(lambda vlid: int(data_label_hash[vlid]), valid_ids)
	for i in range(len(data_set)):
		if int(data_set[i][0]) in train_ids:
			row.append(int(data_set[i][0]))
			col.append(int(data_set[i][1])-1)
			data.append(int(data_set[i][2]))
			# labels.append(int(data_label_hash[int(data_set[i][0])]))
		elif int(data_set[i][0]) in valid_ids:
			row_valid.append(int(data_set[i][0]))
			col_valid.append(int(data_set[i][1])-1)
			data_valid.append(int(data_set[i][2]))
			# labels_valid.append(int(data_label_hash[int(data_set[i][0])]))

	train = translate(row), col, data, labels
	valid = translate(row_valid), col_valid, data_valid, labels_valid
	return train, valid
Example #4
0
 def _all_commands(self):
     path = builtins.__xonsh_env__.get('PATH', [])
     # did PATH change?
     path_hash = hash(tuple(path))
     cache_valid = path_hash == self._path_checksum
     self._path_checksum = path_hash
     # did aliases change?
     al_hash = hash(tuple(sorted(builtins.aliases.keys())))
     self._alias_checksum = al_hash
     cache_valid = cache_valid and al_hash == self._alias_checksum
     pm = self._path_mtime
     # did the contents of any directory in PATH change?
     for d in filter(os.path.isdir, path):
         m = os.stat(d).st_mtime
         if m > pm:
             pm = m
             cache_valid = False
     self._path_mtime = pm
     if cache_valid:
         return self._cmds_cache
     allcmds = set()
     for d in filter(os.path.isdir, path):
         allcmds |= set(os.listdir(d))
     allcmds |= set(builtins.aliases.keys())
     self._cmds_cache = frozenset(allcmds)
     return self._cmds_cache
Example #5
0
def fetch_production(country_code='SE', session=None):
    r = session or requests.session()
    timestamp = arrow.now().timestamp * 1000
    url = 'http://driftsdata.statnett.no/restapi/ProductionConsumption/GetLatestDetailedOverview?timestamp=%d' % timestamp
    response = r.get(url)
    obj = response.json()

    data = {
        'countryCode': country_code,
        'production': {
            'nuclear': float(filter(
                lambda x: x['titleTranslationId'] == 'ProductionConsumption.%s%sDesc' % ('Nuclear', country_code),
                obj['NuclearData'])[0]['value'].replace(u'\xa0', '')),
            'hydro': float(filter(
                lambda x: x['titleTranslationId'] == 'ProductionConsumption.%s%sDesc' % ('Hydro', country_code),
                obj['HydroData'])[0]['value'].replace(u'\xa0', '')),
            'wind': float(filter(
                lambda x: x['titleTranslationId'] == 'ProductionConsumption.%s%sDesc' % ('Wind', country_code),
                obj['WindData'])[0]['value'].replace(u'\xa0', '')),
            'unknown':
                float(filter(
                    lambda x: x['titleTranslationId'] == 'ProductionConsumption.%s%sDesc' % ('Thermal', country_code),
                    obj['ThermalData'])[0]['value'].replace(u'\xa0', '')) +
                float(filter(
                    lambda x: x['titleTranslationId'] == 'ProductionConsumption.%s%sDesc' % ('NotSpecified', country_code),
                    obj['NotSpecifiedData'])[0]['value'].replace(u'\xa0', '')),
        },
        'storage': {},
        'source': 'driftsdata.stattnet.no',
    }
    data['datetime'] = arrow.get(obj['MeasuredAt'] / 1000).datetime

    return data
Example #6
0
    def test_search_filter_expired(self):
        """ Account.search() with expire_start, expire_stop args. """
        all = _set_of_ids(self._accounts)
        non_expired = _set_of_ids(filter(nonexpired_filter, self._accounts))
        expired = _set_of_ids(filter(expired_filter, self._accounts))

        # Test criterias
        self.assertGreaterEqual(len(non_expired), 1)
        self.assertGreaterEqual(len(expired), 1)

        # Tests: search params, must match
        for params, match_set, fail_set in (
                ({'expire_start': None, 'expire_stop': None,
                  'owner_id': self.db_tools.get_initial_group_id()},
                 all, set()),
                ({'expire_start': '[:now]', 'expire_stop': None,
                  'owner_id': self.db_tools.get_initial_group_id()},
                 non_expired, expired),
                ({'expire_start': None, 'expire_stop': '[:now]',
                  'owner_id': self.db_tools.get_initial_group_id()},
                 expired, non_expired),):
            result = _set_of_ids(self._ac.search(**params))
            self.assertGreaterEqual(len(result), len(match_set))
            self.assertTrue(result.issuperset(match_set))
            self.assertSetEqual(result.intersection(fail_set), set())
Example #7
0
    def main(self, argv):
        """
        Receives and executes the commands
        """
        global _cs
        #import traceback
        if self.CHIPSEC_LOADED_AS_EXE:
            import zipfile
            myzip = zipfile.ZipFile("library.zip")
            cmds = map( self.map_modname_zip, filter(self.f_mod_zip, myzip.namelist()) )
        else:
            #traceback.print_stack()
            mydir = imp.find_module('chipsec')[1]
            cmds_dir = os.path.join(mydir,os.path.join("utilcmd"))
            cmds = map( self.map_modname, filter(self.f_mod, os.listdir(cmds_dir)) )

        if logger().VERBOSE:
            logger().log( '[CHIPSEC] Loaded command-line extensions:' )
            logger().log( '   %s' % cmds )
        module = None
        for cmd in cmds:
            try:
                #exec 'from chipsec.utilcmd.' + cmd + ' import *'
                cmd_path = 'chipsec.utilcmd.' + cmd
                module = importlib.import_module( cmd_path )
                cu = getattr(module, 'commands')
                self.commands.update(cu)
            except ImportError, msg:
                logger().error( "Couldn't import util command extension '%s'" % cmd )
                raise ImportError, msg
    def remove_chain(self, name, wrap=True):
        """Remove named chain.

        This removal "cascades". All rule in the chain are removed, as are
        all rules in other chains that jump to it.

        If the chain is not found, this is merely logged.

        """
        name = get_chain_name(name, wrap)
        chain_set = self._select_chain_set(wrap)

        if name not in chain_set:
            LOG.warn(_('Attempted to remove chain %s which does not exist'),
                     name)
            return

        chain_set.remove(name)
        self.rules = filter(lambda r: r.chain != name, self.rules)
        if wrap:
            jump_snippet = '-j %s-%s' % (binary_name, name)
        else:
            jump_snippet = '-j %s' % (name,)

        self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)
Example #9
0
def get_vw_nvalues(model_run_uuid):
    """
    Given a model run uuid that contains the lookup table and ESRI .asc with
    vegetation codes, return an ascii file that has the n-values properly
    assigned
    """
    vwc = default_vw_client()

    records = vwc.dataset_search(model_run_uuid=model_run_uuid).records

    downloads = [r['downloads'][0] for r in records]

    asc_url = filter(lambda d: d.keys().pop() == 'ascii',
                     downloads).pop()['ascii']

    xlsx_url = filter(lambda d: d.keys().pop() == 'xlsx',
                      downloads).pop()['xlsx']

    asc_path = 'tmp_' + str(uuid4()) + '.asc'
    vwc.download(asc_url, asc_path)

    xlsx_path = 'tmp_' + str(uuid4()) + '.xlsx'
    vwc.download(xlsx_url, xlsx_path)

    asc_nvals = vegcode_to_nvalue(asc_path, xlsx_path)

    os.remove(asc_path)
    os.remove(xlsx_path)

    return asc_nvals
Example #10
0
def collectintargz(target, source, env):
    """ Puts all source files into a tar.gz file. """
    # the rpm tool depends on a source package, until this is chagned
    # this hack needs to be here that tries to pack all sources in.
    sources = env.FindSourceFiles()

    # filter out the target we are building the source list for.
    #sources = [s for s in sources if not (s in target)]
    sources = filter(lambda s, t=target: not (s in t), sources)

    # find the .spec file for rpm and add it since it is not necessarily found
    # by the FindSourceFiles function.
    #sources.extend( [s for s in source if str(s).rfind('.spec')!=-1] )
    spec_file = lambda s: string.rfind(str(s), '.spec') != -1
    sources.extend( filter(spec_file, source) )

    # as the source contains the url of the source package this rpm package
    # is built from, we extract the target name
    #tarball = (str(target[0])+".tar.gz").replace('.rpm', '')
    tarball = string.replace(str(target[0])+".tar.gz", '.rpm', '')
    try:
        #tarball = env['SOURCE_URL'].split('/')[-1]
        tarball = string.split(env['SOURCE_URL'], '/')[-1]
    except KeyError, e:
        raise SCons.Errors.UserError( "Missing PackageTag '%s' for RPM packager" % e.args[0] )
Example #11
0
    def getPrefLabel(self):
        if self.load_on_cuis:
            if len(self.atoms) == 1:
                return self.atoms[0][MRCONSO_STR]

            labels = set([x[MRCONSO_STR] for x in self.atoms])
            if len(labels) == 1:
                return labels.pop()

            #if there's only one ISPREF=Y then that one.
            is_pref_atoms =  filter(lambda x: x[MRCONSO_ISPREF] == 'Y', self.atoms)
            if len(is_pref_atoms) == 1:
                return is_pref_atoms[0][MRCONSO_STR]
            elif len(is_pref_atoms) > 1:
                is_pref_atoms =  filter(lambda x: x[MRCONSO_STT] == 'PF', is_pref_atoms)
                if len(is_pref_atoms) > 0:
                    return is_pref_atoms[0][MRCONSO_STR]
            is_pref_atoms =  filter(lambda x: x[MRCONSO_STT] == 'PF', self.atoms)
            if len(is_pref_atoms) == 1:
                return is_pref_atoms[0][MRCONSO_STR]
            return self.atoms[0][MRCONSO_STR]
        else:
            #if ISPREF=Y is not 1 then we look into MRRANK.
            if len(self.rank) > 0:
                sort_key = \
                lambda x: int(self.rank[self.rank_by_tty[x[MRCONSO_TTY]][0]][MRRANK_RANK])
                mmrank_sorted_atoms = sorted(self.atoms, key=sort_key, reverse=True)
                return mmrank_sorted_atoms[0][MRCONSO_STR]
            #there is no rank to use
            else:
                pref_atom = filter(lambda x: 'P' in x[MRCONSO_TTY], self.atoms)
                if len(pref_atom) == 1:
                    return pref_atom[0][MRCONSO_STR]
            raise AttributeError, "Unable to select pref label"
Example #12
0
    def run_filters(self, check_removed=True):
        """
        Run all the filters in self.filters.
        @:param check_removed: (bool) Check if as a station has already been added to the removed_station_ids by a
         previous filter. Skips the remaining filters.
        :return:
        """
        # Check if filters have been initialized before running.
        if not bool(self.filters):
            sys.exit("ERROR run_filters: no filters have been initialized.")
        # Get list of all stations in the time series folder
        o_dir = os.getcwd()
        os.chdir(self.ts_path)
        # stations = [n for n in os.listdir('.') if n.isdigit()]  # list of all station folder names

        # Iterate through all the stations and apply filters
        for i, stat in enumerate(self.stations):
            print 'Processing station: %s' % stat
            if self.iter_time_seris:  # Only open and process time series if necessary
                self.ts_df = pd.read_csv('./%s/time_series.csv' % stat, index_col='Timestamp')
                self.ts_df['date'] = [d[0:10] for d in self.ts_df.index]
                self.ts_df['hour'] = [d[-8:-6] for d in self.ts_df.index]
            # Apply all the filters in the self.filters
            for filter in self.filters:
                # TODO setting check_removed to False will cause the OneClass_SVM filtering to break due to empty features (Andrew 16/07/25)
                if check_removed and stat in self.removed_station_ids:
                    break
                filter(str(stat))
        try:
            [self.cleaned_station_ids.remove(s) for s in self.removed_station_ids]  # remove the removed from the cleaned
        except KeyError as e:
            pass
        os.chdir(o_dir)
Example #13
0
    def get_exportable_members( self, sort=None ):
        """returns list of internal declarations that should\\could be exported"""
        #TODO: obviously this function should be shorter. Almost all logic of this class
        #      should be spread between decl_wrapper classes
        members = [mv for mv in self.public_members if mv.ignore == False and mv.exportable]
        #protected and private virtual functions that not overridable and not pure
        #virtual should not be exported
        for member in self.protected_members:
            if isinstance( member, declarations.calldef_t ):
                members.append( member )
            else:
                pass

        vfunction_selector = lambda member: isinstance( member, declarations.member_function_t ) \
                                            and member.virtuality == declarations.VIRTUALITY_TYPES.PURE_VIRTUAL
        members.extend( list(filter( vfunction_selector, self.private_members )) )

        def is_exportable( decl ):
            #filter out non-public member operators - `Py++` does not support them right now
            if isinstance( decl, declarations.member_operator_t ) \
               and decl.access_type != declarations.ACCESS_TYPES.PUBLIC:
                return False
            #remove artificial constructors
            if isinstance( decl, declarations.constructor_t ) and decl.is_artificial:
                return False
            if decl.ignore == True or decl.exportable == False:
                return False
            return True
        #-#if declarations.has_destructor( self ) \
        #-#   and not declarations.has_public_destructor( self ):
        members = list(filter( is_exportable, members ))
        sorted_members = members
        if sort:
            sorted_members = sort( members )
        return sorted_members
Example #14
0
    def find_max_match(self, options_list, el_value):
        """
        Finds the Longest Word Trimmed Match for selecting text in options field.
        @param options_list: The list of options in the options field.
        @param el_value: The text to be matched in the options.
        """
        el_value_list = el_value.split()
        # Remove all words of length = 1 such as hyphens.
        el_value_list = filter(lambda x: len(x) > 1, el_value_list)
        # Initialise max_len as 0 and matchec_option = None.
        max_len = 0
        matched_option = None

        for option in options_list:
            text = option.text
            text_list = text.split()
            # Remove all words of length = 1 such as hyphens.
            text_list = filter(lambda x: len(x) > 1, text_list)
            # Find intersection of el_value_list and text_list
            matched_list = list(set(el_value_list).intersection(text_list))
            # matched_len is number of matching words for the current option.
            matched_len = len(matched_list)
            # Save the maximum matched option in matched_option.
            if matched_len > max_len:
                matched_option = option
                max_len = matched_len

        # Return the maximum matched option.
        return matched_option
Example #15
0
def getWeekdayMeals(day_id):
    "Takes an int in range [0-4] and returns a dict of all meals that day."

    breakfast = tables[day_id].findAll('td', class_='breakfast')
    lunch = tables[day_id].findAll('td', class_='lunch')
    dinner = tables[day_id].findAll('td', class_='dinner')

    breakfast = filter(None, [f.text for f in breakfast])
    lunch = filter(None, [f.text for f in lunch])
    dinner = filter(None, [f.text for f in dinner])
    
    splitComma = lambda s: s.split(', ')
    strStrip = lambda s: s.encode('ascii', 'ignore').strip()
    
    breakfast = map(splitComma, breakfast)
    breakfast = [b for sublist in breakfast for b in sublist]
    breakfast = map(strStrip, breakfast)

    lunch = map(splitComma, lunch)
    lunch = [b for sublist in lunch for b in sublist]
    lunch = map(strStrip, lunch)

    dinner = map(splitComma, dinner)
    dinner = [b for sublist in dinner for b in sublist]
    dinner = map(strStrip, dinner)

    meals_dict = {'breakfast': breakfast,
                  'lunch': lunch,
                  'dinner': dinner}

    return meals_dict
Example #16
0
def do_image_create(gc, args):
    """Create a new image."""
    # Filter out None values
    fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))

    fields["is_public"] = fields.get("is_public")

    if "is_protected" in fields:
        fields["protected"] = fields.pop("is_protected")

    raw_properties = fields.pop("property")
    fields["properties"] = {}
    for datum in raw_properties:
        key, value = datum.split("=", 1)
        fields["properties"][key] = value

    # Filter out values we can't use
    CREATE_PARAMS = glanceclient.v1.images.CREATE_PARAMS
    fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items()))

    _set_data_field(fields, args)

    # Only show progress bar for local image files
    if fields.get("data") and args.progress:
        filesize = utils.get_file_size(fields["data"])
        fields["data"] = progressbar.VerboseFileWrapper(fields["data"], filesize)

    image = gc.images.create(**fields)
    _image_show(image, args.human_readable)
Example #17
0
File: file.py Project: Freso/picard
 def _move_additional_files(self, old_filename, new_filename):
     """Move extra files, like playlists..."""
     old_path = encode_filename(os.path.dirname(old_filename))
     new_path = encode_filename(os.path.dirname(new_filename))
     patterns = encode_filename(config.setting["move_additional_files_pattern"])
     patterns = filter(bool, [p.strip() for p in patterns.split()])
     try:
         names = os.listdir(old_path)
     except os.error:
         log.error("Error: {} directory not found".format(old_path))
         return
     filtered_names = filter(lambda x: x[0] != '.', names)
     for pattern in patterns:
         pattern_regex = re.compile(fnmatch.translate(pattern), re.IGNORECASE)
         file_names = names
         if pattern[0] != '.':
             file_names = filtered_names
         for old_file in file_names:
             if pattern_regex.match(old_file):
                 new_file = os.path.join(new_path, old_file)
                 old_file = os.path.join(old_path, old_file)
                 # FIXME we shouldn't do this from a thread!
                 if self.tagger.files.get(decode_filename(old_file)):
                     log.debug("File loaded in the tagger, not moving %r", old_file)
                     continue
                 log.debug("Moving %r to %r", old_file, new_file)
                 shutil.move(old_file, new_file)
Example #18
0
def do_image_update(gc, args):
    """Update a specific image."""
    # Filter out None values
    fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))

    image_arg = fields.pop("image")
    image = utils.find_resource(gc.images, image_arg)

    if "is_protected" in fields:
        fields["protected"] = fields.pop("is_protected")

    raw_properties = fields.pop("property")
    fields["properties"] = {}
    for datum in raw_properties:
        key, value = datum.split("=", 1)
        fields["properties"][key] = value

    # Filter out values we can't use
    UPDATE_PARAMS = glanceclient.v1.images.UPDATE_PARAMS
    fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items()))

    if image.status == "queued":
        _set_data_field(fields, args)

        if args.progress:
            filesize = utils.get_file_size(fields["data"])
            fields["data"] = progressbar.VerboseFileWrapper(fields["data"], filesize)

    image = gc.images.update(image, purge_props=args.purge_props, **fields)
    _image_show(image, args.human_readable)
Example #19
0
def get_task(task_id, src_id):
    print task_id
    print src_id
    task = filter(lambda t: t['dst'][:5] == task_id[:5], tasks)
    new_task = filter(lambda t: t['src'][:5] == src_id[:5], task)
    if len(new_task) == 0:
	print "cannot find the ip " + task_id + " from the database"
        print "calling king service from server"
	print subprocess.call(["../king/bin/king", src_id, task_id], stdout=open('log.txt','a'))
	re_tasks = []
	with open('out.txt') as ff:
    		lines = ff.readlines()
    		for line in lines:
    			words = line.split(' ')
			re_task = {'src': words[1],
				'dst': words[4],
				'rtt': words[7],
				'bandwidth': words[11]}
			re_tasks.append(re_task)
	print re_tasks
	_task = filter(lambda t: t['dst'][:5] == task_id[:5], re_tasks)
    	inject_task = filter(lambda t: t['src'][:5] == src_id[:5], _task)
	print inject_task
	if len(inject_task) == 0:
		abort(404)
	print inject_task
	new_task = inject_task
    print new_task
    return jsonify( { 'task': make_public_task(new_task[0]) } )
Example #20
0
 def _handcoded_match(problem, newcluster, connected):
     if isinstance(newcluster, Rigid) and len(newcluster.vars)>=3:
         matches = []
         rigid1 = newcluster
         glues = filter(lambda o: isinstance(o, Glueable) and len(o.vars.intersection(rigid1.vars))>=3 , connected)
         for o in glues:
             connected2 = set()
             for var in o.vars:
                 dependend = problem.find_dependend(var)
                 dependend = filter(lambda x: problem.is_top_level(x), dependend)
                 connected2.update(dependend)
             rigids2 = filter(lambda r2: isinstance(r2, Rigid) and r2 != rigid1 and len(r2.vars.intersection(o.vars)) >=3, connected2)
             for rigid2 in rigids2:
                 m = Map({
                     "$r1": rigid1, 
                     "$o": o,
                     "$r2": rigid2
                 })
                 matches.append(m)
         return matches;
     elif isinstance(newcluster, Glueable):
         matches = []
         glue = newcluster
         rigids = filter(lambda r: isinstance(r, Rigid) and len(r.vars.intersection(glue.vars)) >=3, connected)
         for i in range(len(rigids)):
             for j in range(i+1, len(rigids)):
                 m = Map({
                     "$o": glue, 
                     "$r1": rigids[i],
                     "$r2": rigids[j],
                 })
                 matches.append(m)
         return matches;
     else:
         return []
    def runTest(self):
        self.setup_test()
        self.log_entries = self.c.run_command_ignore_fail("cat /sys/firmware/opal/msglog |  grep 'PHB#' | grep -i  ' C:'")
        failed_eplist = []
        failed_slotlist = []
        failed_swuplist = []
        match_list = ["[EP  ]", "[LGCY]", "[PCID]", "[ETOX]" ]

        for entry in self.log_entries:
            if entry == '':
                continue

            matchObj = re.match(r"(.*) PHB#(.*) \[(.*)", entry)
            if matchObj:
                bdfn = matchObj.group(2)
            else:
                log.debug(entry)
                bdfn = entry

            ep_present = False
            # Check for a end point PCI device, it should have LOC_CODE label
            for string in match_list:
                if string in entry:
                    ep_present = True
                    if "LOC_CODE" in entry:
                        log.debug("Location code found for entry %s" % bdfn)
                    else:
                        failed_eplist.append(bdfn)
                    break
            else:
                ep_present = False

            if ep_present:
                continue

            if "[SWUP]" in entry:
                if "LOC_CODE" in entry:
                    log.debug("Entry %s has LOC_CODE".format(bdfn))
                    continue
                if "SLOT" in entry:
                    log.debug("Entry %s has SLOT".format(bdfn))
                    continue
                failed_swuplist.append(bdfn)

            # If it is a pcie slot check for SLOT entry
            if "SLOT" in entry:
                log.debug("Entry %s has the slot label" % bdfn)
            else:
                failed_slotlist.append(bdfn)

        log.debug(repr(failed_eplist))
        log.debug(repr(failed_slotlist))
        log.debug(repr(failed_swuplist))
        if (len(failed_slotlist) == 0) and (len(failed_eplist) == 0):
            return
        failed_eplist = '\n'.join(filter(None, failed_eplist))
        failed_slotlist = '\n'.join(filter(None, failed_slotlist))
        failed_swuplist = '\n'.join(filter(None, failed_swuplist))
        message = "SLOT Label failures: %s\n LOC_CODE failures:%s\nSWUP failures:%s\n" % (failed_slotlist, failed_eplist, failed_swuplist)
        self.assertTrue(False, message)
def _extract_metadata(content):
    tree = etree.fromstring(content)
    ns = {'xhtml': 'http://www.w3.org/1999/xhtml'}
    subject = tree.xpath('//xhtml:title', namespaces=ns)[0].text

    metadata_nodes = tree.xpath('//xhtml:meta', namespaces=ns)
    metadata_nodes = [n for n in metadata_nodes if 'name' in n.attrib]
    metadata = {}
    for node in metadata_nodes:
        metadata[node.attrib['name']] = node.attrib['content']

    for n in metadata_nodes:
        n.getparent().remove(n)

    content = etree.tostring(tree, pretty_print=True, encoding=unicode)

    sender = metadata.get('mail-sender', u'')
    to_recipients_txt = metadata.get('mail-to-recipients', u'')
    cc_recipients_txt = metadata.get('mail-cc-recipients', u'')
    bcc_recipients_txt = metadata.get('mail-bcc-recipients', u'')
    to_recipients = filter(None, re.split(r'\s*,\s*', to_recipients_txt))
    cc_recipients = filter(None, re.split(r'\s*,\s*', cc_recipients_txt))
    bcc_recipients = filter(None, re.split(r'\s*,\s*', bcc_recipients_txt))

    return content, subject, sender, to_recipients, cc_recipients, bcc_recipients
Example #23
0
    def _fill_inheritance(self):
        """
        Traverses this class's ancestor list and attempts to fill in
        missing documentation from its ancestor's documentation.

        The first pass connects variables, methods and functions with
        their inherited couterparts. (The templates will decide how to
        display docstrings.) The second pass attempts to add instance
        variables to this class that were only explicitly declared in
        a parent class. This second pass is necessary since instance
        variables are only discoverable by traversing the abstract
        syntax tree.
        """
        mro = filter(lambda c: c != self and isinstance(c, Class),
                     self.module.mro(self))

        def search(d, fdoc):
            for c in mro:
                doc = fdoc(c)
                if d.name in doc and isinstance(d, type(doc[d.name])):
                    return doc[d.name]
            return None
        for fdoc in (lambda c: c.doc_init, lambda c: c.doc):
            for d in fdoc(self).values():
                dinherit = search(d, fdoc)
                if dinherit is not None:
                    d.inherits = dinherit

        # Since instance variables aren't part of a class's members,
        # we need to manually deduce inheritance. Oh lawdy.
        for c in mro:
            for name in filter(lambda n: n not in self.doc_init, c.doc_init):
                d = c.doc_init[name]
                self.doc_init[name] = Variable(d.name, d.module, '', cls=self)
                self.doc_init[name].inherits = d
Example #24
0
    def setInput(self, index, value):
        temp = self.inputs[:]
        if index >= len(temp):
            temp.append(value)
            if not (
                temp.count(1) == 1
                or list(x.state for x in filter(lambda i: isinstance(i, Connector), temp)).count(1) == 1
            ):
                raise Exception("ERROR: Invalid Input")
                self.inputs.append(value)
            for i in range(len(self.outputType), int(math.log(len(self.inputs), 2))):
                self.outputType.append(0)
                self.outputConnector.append(None)
        else:
            temp[index] = value
            if not (
                temp.count(1) == 1
                or list(x.state for x in filter(lambda i: isinstance(i, Connector), temp)).count(1) == 1
            ):
                raise Exception("ERROR: Invalid Input")
                self.inputs[index] = value

        if isinstance(value, Connector):
            value.tap(self, "input")
            self.trigger()
Example #25
0
def find_segments(doc, key, use_segment_table = True):
    key_pieces = key.split(':')
    while len(key_pieces) < 3:
        key_pieces.append('*')

    filter_func = lambda x: str(x.ifos) == key_pieces[0] and (str(x.name) == key_pieces[1] or key_pieces[1] == '*') and (str(x.version) == key_pieces[2] or key_pieces[2] == '*') 

    # Find all segment definers matching the critieria
    seg_def_table = lsctables.SegmentDefTable.get_table(doc)
    seg_defs      = filter(filter_func, seg_def_table)
    seg_def_ids   = map(lambda x: str(x.segment_def_id), seg_defs)

    # Find all segments belonging to those definers
    if use_segment_table:
        seg_table     = lsctables.SegmentTable.get_table(doc)
        seg_entries   = filter(lambda x: str(x.segment_def_id) in seg_def_ids, seg_table)
    else:
        seg_sum_table = lsctables.SegmentSumTable.get_table(doc)
        seg_entries   = filter(lambda x: str(x.segment_def_id) in seg_def_ids, seg_sum_table)

    # Combine into a segmentlist
    ret = segmentlist(map(lambda x: segment(x.start_time, x.end_time), seg_entries))

    ret.coalesce()

    return ret
Example #26
0
def get_possible_destination(maze, row, col, visited, destination, found):
    explore_destination = []

    north = [None, col]
    for i in range(len(maze)):
        if row - i - 1 < 0 or maze[row - i - 1][col] == 1:  # or  visited[row - i - 1][col ]!= None :
            break
        north[0] = row - i - 1

    east = [row, None]
    for i in range(len(maze[0])):
        if col + i + 1 >= len(maze[0]) or maze[row][col + i + 1] == 1:  # or  visited[row][col + i + 1]!= None  :
            break
        east[1] = col + i + 1

    west = [row, None]
    for i in range(len(maze[0])):
        if col - i - 1 < 0 or maze[row][col - i - 1] == 1:  # or visited[row][col - i - 1]!= None:
            break
        west[1] = col - i - 1

    south = [None, col]
    for i in range(len(maze)):
        if row + i + 1 >= len(maze) or maze[row + i + 1][col] == 1:  # or  visited[row + i + 1][col ]!= None  :
            break
        south[0] = row + i + 1

    explore_destination = [north, east, west, south]
    explore_destination = list(
        filter(lambda x: False if None in x else True, explore_destination))  # Remove the ones with None
    explore_destination = list(filter(lambda x: False if visited[x[0]][x[1]] == True else True,
                                      explore_destination))  # Remove the ones already visited

    return explore_destination
Example #27
0
def parseString2Pagenum(parent, string, nodialog=False):
    """ Parse a string with a list of pagenumbers to an integer list with
        page numbers.
        e.g. "1-3,5,7" --> [1,2,3,5,7]
        parent is important
    """
    listFull = string.split(",")
    PageNumbers = list()
    try:
        for item in listFull:
            pagerange = item.split("-")
            start = pagerange[0].strip()
            start = int(filter(type(start).isdigit, start))
            end = pagerange[-1].strip()
            end = int(filter(type(end).isdigit, end))
            for i in np.arange(end-start+1)+start:
                PageNumbers.append(i)
        PageNumbers.sort()
        return PageNumbers
    except:
        if nodialog is False:
            errstring = "Invalid syntax in page selection: "+string+\
                        ". Please use a comma separated list with"+\
                        " optional dashes, e.g. '1-3,6,8'." 
            try:
                wx.MessageDialog(parent, errstring, "Error", 
                                  style=wx.ICON_ERROR|wx.OK|wx.STAY_ON_TOP)
            except:
                raise ValueError(errstring)
        else:
            raise ValueError(errstring)
        return None
Example #28
0
    def get_fieldsets(self, request, obj=None):
        if self.declared_fieldsets:
            return self.declared_fieldsets

        if self.form:
            fields = set(self.form._meta.fields)
        else:
            fields = ['label']

        in_fields = lambda x: x in fields

        general_fields = filter(in_fields, self.fieldset_general_fields)
        fieldsets = [
            (_('General options'), {'fields': general_fields}),
        ]

        boundries_fields = filter(in_fields, self.fieldset_boundaries_fields)
        if boundries_fields:
            fieldsets.append(
                (_('Min and max values'), {'fields': boundries_fields}))

        required_fields = filter(in_fields, self.fieldset_required_conf_fields)
        if required_fields:
            fieldsets.append(
                (_('Required'), {'fields': required_fields}))

        extra_fields = filter(in_fields, self.fieldset_extra_fields)
        if extra_fields:
            fieldsets.append(
                (_('Extra'), {'fields': extra_fields}))

        return fieldsets
Example #29
0
def trigger_mopage_refresh(obj, event):
    event_pages = filter(None,
                          map(lambda parent: IEventPage(parent, None),
                              aq_chain(obj)))
    if not event_pages:
        # We are not within an event page.
        # We only trigger when publishing an event page
        # or a child of an event page.
        return

    triggers = filter(None,
                      map(lambda parent: IPublisherMopageTrigger(parent, None),
                          aq_chain(obj)))
    if not triggers or not triggers[0].is_enabled():
        return

    for events in event_pages:
        IMopageModificationDate(events).touch()

    from collective.taskqueue import taskqueue

    trigger_url = triggers[0].build_trigger_url()
    callback_path = '/'.join(getSite().getPhysicalPath()
                             + ('taskqueue_events_trigger_mopage_refresh',))
    taskqueue.add(callback_path, params={'target': trigger_url})
Example #30
0
    def empty_locs_around(self, n, filter_out_blocked=False):
        neighbours = locs_around(n)
        neighbours = filter((lambda x: x not in self),neighbours)
        if filter_out_blocked:
            neighbours = filter((lambda x: x not in self.blocked),neighbours)

        return neighbours