Exemple #1
0
    def empty_locs_around(self, n, filter_out_blocked=False):
        neighbours = locs_around(n)
        neighbours = filter((lambda x: x not in self),neighbours)
        if filter_out_blocked:
            neighbours = filter((lambda x: x not in self.blocked),neighbours)

        return neighbours
Exemple #2
0
def trigger_mopage_refresh(obj, event):
    event_pages = filter(None,
                          map(lambda parent: IEventPage(parent, None),
                              aq_chain(obj)))
    if not event_pages:
        # We are not within an event page.
        # We only trigger when publishing an event page
        # or a child of an event page.
        return

    triggers = filter(None,
                      map(lambda parent: IPublisherMopageTrigger(parent, None),
                          aq_chain(obj)))
    if not triggers or not triggers[0].is_enabled():
        return

    for events in event_pages:
        IMopageModificationDate(events).touch()

    from collective.taskqueue import taskqueue

    trigger_url = triggers[0].build_trigger_url()
    callback_path = '/'.join(getSite().getPhysicalPath()
                             + ('taskqueue_events_trigger_mopage_refresh',))
    taskqueue.add(callback_path, params={'target': trigger_url})
Exemple #3
0
def parseString2Pagenum(parent, string, nodialog=False):
    """ Parse a string with a list of pagenumbers to an integer list with
        page numbers.
        e.g. "1-3,5,7" --> [1,2,3,5,7]
        parent is important
    """
    listFull = string.split(",")
    PageNumbers = list()
    try:
        for item in listFull:
            pagerange = item.split("-")
            start = pagerange[0].strip()
            start = int(filter(type(start).isdigit, start))
            end = pagerange[-1].strip()
            end = int(filter(type(end).isdigit, end))
            for i in np.arange(end-start+1)+start:
                PageNumbers.append(i)
        PageNumbers.sort()
        return PageNumbers
    except:
        if nodialog is False:
            errstring = "Invalid syntax in page selection: "+string+\
                        ". Please use a comma separated list with"+\
                        " optional dashes, e.g. '1-3,6,8'." 
            try:
                wx.MessageDialog(parent, errstring, "Error", 
                                  style=wx.ICON_ERROR|wx.OK|wx.STAY_ON_TOP)
            except:
                raise ValueError(errstring)
        else:
            raise ValueError(errstring)
        return None
Exemple #4
0
    def run(self, entity, discourse, syntax):
        if discourse == 'new':
            if len(self.dbpedia[entity]['givenNames']) > 0:
                givenNames = self.dbpedia[entity]['givenNames']
                first = filter(lambda x: len(x) == min(map(lambda x: len(x), givenNames)), givenNames)[0]

                surnames = self.dbpedia[entity]['surnames']
                last = filter(lambda x: len(x) == min(map(lambda x: len(x), surnames)), surnames)[0]

                name = str(first).strip() + ' ' + str(last).strip()
            else:
                birthNames = self.dbpedia[entity]['birthNames']
                name = str(filter(lambda x: len(x) == min(map(lambda x: len(x), birthNames)), birthNames)[0]).strip()
        else:
            if len(self.dbpedia[entity]['surnames']) > 0:
                surnames = self.dbpedia[entity]['surnames']
                last = filter(lambda x: len(x) == min(map(lambda x: len(x), surnames)), surnames)[0]

                name = str(last).strip()
            else:
                birthNames = self.dbpedia[entity]['birthNames']
                name = str(filter(lambda x: len(x) == min(map(lambda x: len(x), birthNames)), birthNames)[0]).strip().split()[-1]

        name = self.realize(name, syntax)
        return prep.get_label(name, self.dbpedia[entity]), name
Exemple #5
0
def calculateSparseDictCOO(data_set, data_label_hash, jump=1, valid_flag=False):
	row = []
	col = []
	data = []
	row_valid = []
	col_valid = []
	data_valid = []

	doc_ids = set(sorted(map(lambda row:int(row[0]), data_set)))
	base_ids_list = filter(lambda ids: ids % jump == 0, doc_ids)
	train_ids = base_ids_list
	valid_ids = set()
	if valid_flag:
		valid_index = filter(lambda ids: ids % validation_perc == 0, range(len(base_ids_list)))
		valid_ids = [base_ids_list[i] for i in valid_index]
		base_ids = set(base_ids_list)
		train_ids = sorted(base_ids - set(valid_ids))

	labels = map(lambda trid: int(data_label_hash[trid]), train_ids)
	labels_valid = map(lambda vlid: int(data_label_hash[vlid]), valid_ids)
	for i in range(len(data_set)):
		if int(data_set[i][0]) in train_ids:
			row.append(int(data_set[i][0]))
			col.append(int(data_set[i][1])-1)
			data.append(int(data_set[i][2]))
			# labels.append(int(data_label_hash[int(data_set[i][0])]))
		elif int(data_set[i][0]) in valid_ids:
			row_valid.append(int(data_set[i][0]))
			col_valid.append(int(data_set[i][1])-1)
			data_valid.append(int(data_set[i][2]))
			# labels_valid.append(int(data_label_hash[int(data_set[i][0])]))

	train = translate(row), col, data, labels
	valid = translate(row_valid), col_valid, data_valid, labels_valid
	return train, valid
def find_segments(doc, key, use_segment_table = True):
    key_pieces = key.split(':')
    while len(key_pieces) < 3:
        key_pieces.append('*')

    filter_func = lambda x: str(x.ifos) == key_pieces[0] and (str(x.name) == key_pieces[1] or key_pieces[1] == '*') and (str(x.version) == key_pieces[2] or key_pieces[2] == '*') 

    # Find all segment definers matching the critieria
    seg_def_table = lsctables.SegmentDefTable.get_table(doc)
    seg_defs      = filter(filter_func, seg_def_table)
    seg_def_ids   = map(lambda x: str(x.segment_def_id), seg_defs)

    # Find all segments belonging to those definers
    if use_segment_table:
        seg_table     = lsctables.SegmentTable.get_table(doc)
        seg_entries   = filter(lambda x: str(x.segment_def_id) in seg_def_ids, seg_table)
    else:
        seg_sum_table = lsctables.SegmentSumTable.get_table(doc)
        seg_entries   = filter(lambda x: str(x.segment_def_id) in seg_def_ids, seg_sum_table)

    # Combine into a segmentlist
    ret = segmentlist(map(lambda x: segment(x.start_time, x.end_time), seg_entries))

    ret.coalesce()

    return ret
Exemple #7
0
    def _EXPERIMENTAL_VERBAL_PREDICATE_FEATURE_Infinitive(self):
        xcomp_children = filter(lambda x:x.get_parent_relation() in clausal_complement, self.children)
        ret = ([],[])
        for xcomp_child in xcomp_children:
            aux_children = filter(lambda x:x.get_parent_relation() in aux_dependencies, xcomp_child.children)
            to_children = filter(lambda x:x.pos == TO, aux_children)
            if not to_children:
                return (False,False)
            assert (len(to_children)==1)
            to_child = to_children[0]
            subj_children = filter(lambda x:x.get_parent_relation() in subject_dependencies, xcomp_child.children)
            adv_children = filter(lambda x:x.get_parent_relation() in adverb_dependencies, self.children)
#           if subj_children:
#               print(" ".join([self.word,subj_children[0].word,to_child.word,xcomp_child.word]))
#           if adv_children:
#               print(" ".join([adv_children[0].word,self.word,to_child.word,xcomp_child.word]))
            #ids = [x.id for x in [xcomp_child,to_child]]
            words = " ".join([self.word,to_child.word,xcomp_child.word])
            ret[1].extend([self.id,to_child.id,xcomp_child.id])
            # chaining
            childRes = xcomp_child._VERBAL_PREDICATE_FEATURE_Infinitive()
            if childRes[0]:
                words += " "+" ".join(childRes[0][0].split(" ")[1:])


            ret[0].append(words)

        return ret
Exemple #8
0
    def _fill_inheritance(self):
        """
        Traverses this class's ancestor list and attempts to fill in
        missing documentation from its ancestor's documentation.

        The first pass connects variables, methods and functions with
        their inherited couterparts. (The templates will decide how to
        display docstrings.) The second pass attempts to add instance
        variables to this class that were only explicitly declared in
        a parent class. This second pass is necessary since instance
        variables are only discoverable by traversing the abstract
        syntax tree.
        """
        mro = filter(lambda c: c != self and isinstance(c, Class),
                     self.module.mro(self))

        def search(d, fdoc):
            for c in mro:
                doc = fdoc(c)
                if d.name in doc and isinstance(d, type(doc[d.name])):
                    return doc[d.name]
            return None
        for fdoc in (lambda c: c.doc_init, lambda c: c.doc):
            for d in fdoc(self).values():
                dinherit = search(d, fdoc)
                if dinherit is not None:
                    d.inherits = dinherit

        # Since instance variables aren't part of a class's members,
        # we need to manually deduce inheritance. Oh lawdy.
        for c in mro:
            for name in filter(lambda n: n not in self.doc_init, c.doc_init):
                d = c.doc_init[name]
                self.doc_init[name] = Variable(d.name, d.module, '', cls=self)
                self.doc_init[name].inherits = d
Exemple #9
0
def fetch_production(country_code='SE', session=None):
    r = session or requests.session()
    timestamp = arrow.now().timestamp * 1000
    url = 'http://driftsdata.statnett.no/restapi/ProductionConsumption/GetLatestDetailedOverview?timestamp=%d' % timestamp
    response = r.get(url)
    obj = response.json()

    data = {
        'countryCode': country_code,
        'production': {
            'nuclear': float(filter(
                lambda x: x['titleTranslationId'] == 'ProductionConsumption.%s%sDesc' % ('Nuclear', country_code),
                obj['NuclearData'])[0]['value'].replace(u'\xa0', '')),
            'hydro': float(filter(
                lambda x: x['titleTranslationId'] == 'ProductionConsumption.%s%sDesc' % ('Hydro', country_code),
                obj['HydroData'])[0]['value'].replace(u'\xa0', '')),
            'wind': float(filter(
                lambda x: x['titleTranslationId'] == 'ProductionConsumption.%s%sDesc' % ('Wind', country_code),
                obj['WindData'])[0]['value'].replace(u'\xa0', '')),
            'unknown':
                float(filter(
                    lambda x: x['titleTranslationId'] == 'ProductionConsumption.%s%sDesc' % ('Thermal', country_code),
                    obj['ThermalData'])[0]['value'].replace(u'\xa0', '')) +
                float(filter(
                    lambda x: x['titleTranslationId'] == 'ProductionConsumption.%s%sDesc' % ('NotSpecified', country_code),
                    obj['NotSpecifiedData'])[0]['value'].replace(u'\xa0', '')),
        },
        'storage': {},
        'source': 'driftsdata.stattnet.no',
    }
    data['datetime'] = arrow.get(obj['MeasuredAt'] / 1000).datetime

    return data
    def runTest(self):
        self.setup_test()
        self.log_entries = self.c.run_command_ignore_fail("cat /sys/firmware/opal/msglog |  grep 'PHB#' | grep -i  ' C:'")
        failed_eplist = []
        failed_slotlist = []
        failed_swuplist = []
        match_list = ["[EP  ]", "[LGCY]", "[PCID]", "[ETOX]" ]

        for entry in self.log_entries:
            if entry == '':
                continue

            matchObj = re.match(r"(.*) PHB#(.*) \[(.*)", entry)
            if matchObj:
                bdfn = matchObj.group(2)
            else:
                log.debug(entry)
                bdfn = entry

            ep_present = False
            # Check for a end point PCI device, it should have LOC_CODE label
            for string in match_list:
                if string in entry:
                    ep_present = True
                    if "LOC_CODE" in entry:
                        log.debug("Location code found for entry %s" % bdfn)
                    else:
                        failed_eplist.append(bdfn)
                    break
            else:
                ep_present = False

            if ep_present:
                continue

            if "[SWUP]" in entry:
                if "LOC_CODE" in entry:
                    log.debug("Entry %s has LOC_CODE".format(bdfn))
                    continue
                if "SLOT" in entry:
                    log.debug("Entry %s has SLOT".format(bdfn))
                    continue
                failed_swuplist.append(bdfn)

            # If it is a pcie slot check for SLOT entry
            if "SLOT" in entry:
                log.debug("Entry %s has the slot label" % bdfn)
            else:
                failed_slotlist.append(bdfn)

        log.debug(repr(failed_eplist))
        log.debug(repr(failed_slotlist))
        log.debug(repr(failed_swuplist))
        if (len(failed_slotlist) == 0) and (len(failed_eplist) == 0):
            return
        failed_eplist = '\n'.join(filter(None, failed_eplist))
        failed_slotlist = '\n'.join(filter(None, failed_slotlist))
        failed_swuplist = '\n'.join(filter(None, failed_swuplist))
        message = "SLOT Label failures: %s\n LOC_CODE failures:%s\nSWUP failures:%s\n" % (failed_slotlist, failed_eplist, failed_swuplist)
        self.assertTrue(False, message)
Exemple #11
0
 def _all_commands(self):
     path = builtins.__xonsh_env__.get('PATH', [])
     # did PATH change?
     path_hash = hash(tuple(path))
     cache_valid = path_hash == self._path_checksum
     self._path_checksum = path_hash
     # did aliases change?
     al_hash = hash(tuple(sorted(builtins.aliases.keys())))
     self._alias_checksum = al_hash
     cache_valid = cache_valid and al_hash == self._alias_checksum
     pm = self._path_mtime
     # did the contents of any directory in PATH change?
     for d in filter(os.path.isdir, path):
         m = os.stat(d).st_mtime
         if m > pm:
             pm = m
             cache_valid = False
     self._path_mtime = pm
     if cache_valid:
         return self._cmds_cache
     allcmds = set()
     for d in filter(os.path.isdir, path):
         allcmds |= set(os.listdir(d))
     allcmds |= set(builtins.aliases.keys())
     self._cmds_cache = frozenset(allcmds)
     return self._cmds_cache
Exemple #12
0
def get_task(task_id, src_id):
    print task_id
    print src_id
    task = filter(lambda t: t['dst'][:5] == task_id[:5], tasks)
    new_task = filter(lambda t: t['src'][:5] == src_id[:5], task)
    if len(new_task) == 0:
	print "cannot find the ip " + task_id + " from the database"
        print "calling king service from server"
	print subprocess.call(["../king/bin/king", src_id, task_id], stdout=open('log.txt','a'))
	re_tasks = []
	with open('out.txt') as ff:
    		lines = ff.readlines()
    		for line in lines:
    			words = line.split(' ')
			re_task = {'src': words[1],
				'dst': words[4],
				'rtt': words[7],
				'bandwidth': words[11]}
			re_tasks.append(re_task)
	print re_tasks
	_task = filter(lambda t: t['dst'][:5] == task_id[:5], re_tasks)
    	inject_task = filter(lambda t: t['src'][:5] == src_id[:5], _task)
	print inject_task
	if len(inject_task) == 0:
		abort(404)
	print inject_task
	new_task = inject_task
    print new_task
    return jsonify( { 'task': make_public_task(new_task[0]) } )
Exemple #13
0
    def main(self, argv):
        """
        Receives and executes the commands
        """
        global _cs
        #import traceback
        if self.CHIPSEC_LOADED_AS_EXE:
            import zipfile
            myzip = zipfile.ZipFile("library.zip")
            cmds = map( self.map_modname_zip, filter(self.f_mod_zip, myzip.namelist()) )
        else:
            #traceback.print_stack()
            mydir = imp.find_module('chipsec')[1]
            cmds_dir = os.path.join(mydir,os.path.join("utilcmd"))
            cmds = map( self.map_modname, filter(self.f_mod, os.listdir(cmds_dir)) )

        if logger().VERBOSE:
            logger().log( '[CHIPSEC] Loaded command-line extensions:' )
            logger().log( '   %s' % cmds )
        module = None
        for cmd in cmds:
            try:
                #exec 'from chipsec.utilcmd.' + cmd + ' import *'
                cmd_path = 'chipsec.utilcmd.' + cmd
                module = importlib.import_module( cmd_path )
                cu = getattr(module, 'commands')
                self.commands.update(cu)
            except ImportError, msg:
                logger().error( "Couldn't import util command extension '%s'" % cmd )
                raise ImportError, msg
Exemple #14
0
 def _move_additional_files(self, old_filename, new_filename):
     """Move extra files, like playlists..."""
     old_path = encode_filename(os.path.dirname(old_filename))
     new_path = encode_filename(os.path.dirname(new_filename))
     patterns = encode_filename(config.setting["move_additional_files_pattern"])
     patterns = filter(bool, [p.strip() for p in patterns.split()])
     try:
         names = os.listdir(old_path)
     except os.error:
         log.error("Error: {} directory not found".format(old_path))
         return
     filtered_names = filter(lambda x: x[0] != '.', names)
     for pattern in patterns:
         pattern_regex = re.compile(fnmatch.translate(pattern), re.IGNORECASE)
         file_names = names
         if pattern[0] != '.':
             file_names = filtered_names
         for old_file in file_names:
             if pattern_regex.match(old_file):
                 new_file = os.path.join(new_path, old_file)
                 old_file = os.path.join(old_path, old_file)
                 # FIXME we shouldn't do this from a thread!
                 if self.tagger.files.get(decode_filename(old_file)):
                     log.debug("File loaded in the tagger, not moving %r", old_file)
                     continue
                 log.debug("Moving %r to %r", old_file, new_file)
                 shutil.move(old_file, new_file)
Exemple #15
0
    def test_search_filter_expired(self):
        """ Account.search() with expire_start, expire_stop args. """
        all = _set_of_ids(self._accounts)
        non_expired = _set_of_ids(filter(nonexpired_filter, self._accounts))
        expired = _set_of_ids(filter(expired_filter, self._accounts))

        # Test criterias
        self.assertGreaterEqual(len(non_expired), 1)
        self.assertGreaterEqual(len(expired), 1)

        # Tests: search params, must match
        for params, match_set, fail_set in (
                ({'expire_start': None, 'expire_stop': None,
                  'owner_id': self.db_tools.get_initial_group_id()},
                 all, set()),
                ({'expire_start': '[:now]', 'expire_stop': None,
                  'owner_id': self.db_tools.get_initial_group_id()},
                 non_expired, expired),
                ({'expire_start': None, 'expire_stop': '[:now]',
                  'owner_id': self.db_tools.get_initial_group_id()},
                 expired, non_expired),):
            result = _set_of_ids(self._ac.search(**params))
            self.assertGreaterEqual(len(result), len(match_set))
            self.assertTrue(result.issuperset(match_set))
            self.assertSetEqual(result.intersection(fail_set), set())
Exemple #16
0
    def get_fieldsets(self, request, obj=None):
        if self.declared_fieldsets:
            return self.declared_fieldsets

        if self.form:
            fields = set(self.form._meta.fields)
        else:
            fields = ['label']

        in_fields = lambda x: x in fields

        general_fields = filter(in_fields, self.fieldset_general_fields)
        fieldsets = [
            (_('General options'), {'fields': general_fields}),
        ]

        boundries_fields = filter(in_fields, self.fieldset_boundaries_fields)
        if boundries_fields:
            fieldsets.append(
                (_('Min and max values'), {'fields': boundries_fields}))

        required_fields = filter(in_fields, self.fieldset_required_conf_fields)
        if required_fields:
            fieldsets.append(
                (_('Required'), {'fields': required_fields}))

        extra_fields = filter(in_fields, self.fieldset_extra_fields)
        if extra_fields:
            fieldsets.append(
                (_('Extra'), {'fields': extra_fields}))

        return fieldsets
Exemple #17
0
def getWeekdayMeals(day_id):
    "Takes an int in range [0-4] and returns a dict of all meals that day."

    breakfast = tables[day_id].findAll('td', class_='breakfast')
    lunch = tables[day_id].findAll('td', class_='lunch')
    dinner = tables[day_id].findAll('td', class_='dinner')

    breakfast = filter(None, [f.text for f in breakfast])
    lunch = filter(None, [f.text for f in lunch])
    dinner = filter(None, [f.text for f in dinner])
    
    splitComma = lambda s: s.split(', ')
    strStrip = lambda s: s.encode('ascii', 'ignore').strip()
    
    breakfast = map(splitComma, breakfast)
    breakfast = [b for sublist in breakfast for b in sublist]
    breakfast = map(strStrip, breakfast)

    lunch = map(splitComma, lunch)
    lunch = [b for sublist in lunch for b in sublist]
    lunch = map(strStrip, lunch)

    dinner = map(splitComma, dinner)
    dinner = [b for sublist in dinner for b in sublist]
    dinner = map(strStrip, dinner)

    meals_dict = {'breakfast': breakfast,
                  'lunch': lunch,
                  'dinner': dinner}

    return meals_dict
Exemple #18
0
    def setInput(self, index, value):
        temp = self.inputs[:]
        if index >= len(temp):
            temp.append(value)
            if not (
                temp.count(1) == 1
                or list(x.state for x in filter(lambda i: isinstance(i, Connector), temp)).count(1) == 1
            ):
                raise Exception("ERROR: Invalid Input")
                self.inputs.append(value)
            for i in range(len(self.outputType), int(math.log(len(self.inputs), 2))):
                self.outputType.append(0)
                self.outputConnector.append(None)
        else:
            temp[index] = value
            if not (
                temp.count(1) == 1
                or list(x.state for x in filter(lambda i: isinstance(i, Connector), temp)).count(1) == 1
            ):
                raise Exception("ERROR: Invalid Input")
                self.inputs[index] = value

        if isinstance(value, Connector):
            value.tap(self, "input")
            self.trigger()
Exemple #19
0
def get_possible_destination(maze, row, col, visited, destination, found):
    explore_destination = []

    north = [None, col]
    for i in range(len(maze)):
        if row - i - 1 < 0 or maze[row - i - 1][col] == 1:  # or  visited[row - i - 1][col ]!= None :
            break
        north[0] = row - i - 1

    east = [row, None]
    for i in range(len(maze[0])):
        if col + i + 1 >= len(maze[0]) or maze[row][col + i + 1] == 1:  # or  visited[row][col + i + 1]!= None  :
            break
        east[1] = col + i + 1

    west = [row, None]
    for i in range(len(maze[0])):
        if col - i - 1 < 0 or maze[row][col - i - 1] == 1:  # or visited[row][col - i - 1]!= None:
            break
        west[1] = col - i - 1

    south = [None, col]
    for i in range(len(maze)):
        if row + i + 1 >= len(maze) or maze[row + i + 1][col] == 1:  # or  visited[row + i + 1][col ]!= None  :
            break
        south[0] = row + i + 1

    explore_destination = [north, east, west, south]
    explore_destination = list(
        filter(lambda x: False if None in x else True, explore_destination))  # Remove the ones with None
    explore_destination = list(filter(lambda x: False if visited[x[0]][x[1]] == True else True,
                                      explore_destination))  # Remove the ones already visited

    return explore_destination
Exemple #20
0
 def _handcoded_match(problem, newcluster, connected):
     if isinstance(newcluster, Rigid) and len(newcluster.vars)>=3:
         matches = []
         rigid1 = newcluster
         glues = filter(lambda o: isinstance(o, Glueable) and len(o.vars.intersection(rigid1.vars))>=3 , connected)
         for o in glues:
             connected2 = set()
             for var in o.vars:
                 dependend = problem.find_dependend(var)
                 dependend = filter(lambda x: problem.is_top_level(x), dependend)
                 connected2.update(dependend)
             rigids2 = filter(lambda r2: isinstance(r2, Rigid) and r2 != rigid1 and len(r2.vars.intersection(o.vars)) >=3, connected2)
             for rigid2 in rigids2:
                 m = Map({
                     "$r1": rigid1, 
                     "$o": o,
                     "$r2": rigid2
                 })
                 matches.append(m)
         return matches;
     elif isinstance(newcluster, Glueable):
         matches = []
         glue = newcluster
         rigids = filter(lambda r: isinstance(r, Rigid) and len(r.vars.intersection(glue.vars)) >=3, connected)
         for i in range(len(rigids)):
             for j in range(i+1, len(rigids)):
                 m = Map({
                     "$o": glue, 
                     "$r1": rigids[i],
                     "$r2": rigids[j],
                 })
                 matches.append(m)
         return matches;
     else:
         return []
def _extract_metadata(content):
    tree = etree.fromstring(content)
    ns = {'xhtml': 'http://www.w3.org/1999/xhtml'}
    subject = tree.xpath('//xhtml:title', namespaces=ns)[0].text

    metadata_nodes = tree.xpath('//xhtml:meta', namespaces=ns)
    metadata_nodes = [n for n in metadata_nodes if 'name' in n.attrib]
    metadata = {}
    for node in metadata_nodes:
        metadata[node.attrib['name']] = node.attrib['content']

    for n in metadata_nodes:
        n.getparent().remove(n)

    content = etree.tostring(tree, pretty_print=True, encoding=unicode)

    sender = metadata.get('mail-sender', u'')
    to_recipients_txt = metadata.get('mail-to-recipients', u'')
    cc_recipients_txt = metadata.get('mail-cc-recipients', u'')
    bcc_recipients_txt = metadata.get('mail-bcc-recipients', u'')
    to_recipients = filter(None, re.split(r'\s*,\s*', to_recipients_txt))
    cc_recipients = filter(None, re.split(r'\s*,\s*', cc_recipients_txt))
    bcc_recipients = filter(None, re.split(r'\s*,\s*', bcc_recipients_txt))

    return content, subject, sender, to_recipients, cc_recipients, bcc_recipients
Exemple #22
0
def do_image_create(gc, args):
    """Create a new image."""
    # Filter out None values
    fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))

    fields["is_public"] = fields.get("is_public")

    if "is_protected" in fields:
        fields["protected"] = fields.pop("is_protected")

    raw_properties = fields.pop("property")
    fields["properties"] = {}
    for datum in raw_properties:
        key, value = datum.split("=", 1)
        fields["properties"][key] = value

    # Filter out values we can't use
    CREATE_PARAMS = glanceclient.v1.images.CREATE_PARAMS
    fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items()))

    _set_data_field(fields, args)

    # Only show progress bar for local image files
    if fields.get("data") and args.progress:
        filesize = utils.get_file_size(fields["data"])
        fields["data"] = progressbar.VerboseFileWrapper(fields["data"], filesize)

    image = gc.images.create(**fields)
    _image_show(image, args.human_readable)
    def remove_chain(self, name, wrap=True):
        """Remove named chain.

        This removal "cascades". All rule in the chain are removed, as are
        all rules in other chains that jump to it.

        If the chain is not found, this is merely logged.

        """
        name = get_chain_name(name, wrap)
        chain_set = self._select_chain_set(wrap)

        if name not in chain_set:
            LOG.warn(_('Attempted to remove chain %s which does not exist'),
                     name)
            return

        chain_set.remove(name)
        self.rules = filter(lambda r: r.chain != name, self.rules)
        if wrap:
            jump_snippet = '-j %s-%s' % (binary_name, name)
        else:
            jump_snippet = '-j %s' % (name,)

        self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)
Exemple #24
0
def collectintargz(target, source, env):
    """ Puts all source files into a tar.gz file. """
    # the rpm tool depends on a source package, until this is chagned
    # this hack needs to be here that tries to pack all sources in.
    sources = env.FindSourceFiles()

    # filter out the target we are building the source list for.
    #sources = [s for s in sources if not (s in target)]
    sources = filter(lambda s, t=target: not (s in t), sources)

    # find the .spec file for rpm and add it since it is not necessarily found
    # by the FindSourceFiles function.
    #sources.extend( [s for s in source if str(s).rfind('.spec')!=-1] )
    spec_file = lambda s: string.rfind(str(s), '.spec') != -1
    sources.extend( filter(spec_file, source) )

    # as the source contains the url of the source package this rpm package
    # is built from, we extract the target name
    #tarball = (str(target[0])+".tar.gz").replace('.rpm', '')
    tarball = string.replace(str(target[0])+".tar.gz", '.rpm', '')
    try:
        #tarball = env['SOURCE_URL'].split('/')[-1]
        tarball = string.split(env['SOURCE_URL'], '/')[-1]
    except KeyError, e:
        raise SCons.Errors.UserError( "Missing PackageTag '%s' for RPM packager" % e.args[0] )
Exemple #25
0
    def getPrefLabel(self):
        if self.load_on_cuis:
            if len(self.atoms) == 1:
                return self.atoms[0][MRCONSO_STR]

            labels = set([x[MRCONSO_STR] for x in self.atoms])
            if len(labels) == 1:
                return labels.pop()

            #if there's only one ISPREF=Y then that one.
            is_pref_atoms =  filter(lambda x: x[MRCONSO_ISPREF] == 'Y', self.atoms)
            if len(is_pref_atoms) == 1:
                return is_pref_atoms[0][MRCONSO_STR]
            elif len(is_pref_atoms) > 1:
                is_pref_atoms =  filter(lambda x: x[MRCONSO_STT] == 'PF', is_pref_atoms)
                if len(is_pref_atoms) > 0:
                    return is_pref_atoms[0][MRCONSO_STR]
            is_pref_atoms =  filter(lambda x: x[MRCONSO_STT] == 'PF', self.atoms)
            if len(is_pref_atoms) == 1:
                return is_pref_atoms[0][MRCONSO_STR]
            return self.atoms[0][MRCONSO_STR]
        else:
            #if ISPREF=Y is not 1 then we look into MRRANK.
            if len(self.rank) > 0:
                sort_key = \
                lambda x: int(self.rank[self.rank_by_tty[x[MRCONSO_TTY]][0]][MRRANK_RANK])
                mmrank_sorted_atoms = sorted(self.atoms, key=sort_key, reverse=True)
                return mmrank_sorted_atoms[0][MRCONSO_STR]
            #there is no rank to use
            else:
                pref_atom = filter(lambda x: 'P' in x[MRCONSO_TTY], self.atoms)
                if len(pref_atom) == 1:
                    return pref_atom[0][MRCONSO_STR]
            raise AttributeError, "Unable to select pref label"
Exemple #26
0
    def get_exportable_members( self, sort=None ):
        """returns list of internal declarations that should\\could be exported"""
        #TODO: obviously this function should be shorter. Almost all logic of this class
        #      should be spread between decl_wrapper classes
        members = [mv for mv in self.public_members if mv.ignore == False and mv.exportable]
        #protected and private virtual functions that not overridable and not pure
        #virtual should not be exported
        for member in self.protected_members:
            if isinstance( member, declarations.calldef_t ):
                members.append( member )
            else:
                pass

        vfunction_selector = lambda member: isinstance( member, declarations.member_function_t ) \
                                            and member.virtuality == declarations.VIRTUALITY_TYPES.PURE_VIRTUAL
        members.extend( list(filter( vfunction_selector, self.private_members )) )

        def is_exportable( decl ):
            #filter out non-public member operators - `Py++` does not support them right now
            if isinstance( decl, declarations.member_operator_t ) \
               and decl.access_type != declarations.ACCESS_TYPES.PUBLIC:
                return False
            #remove artificial constructors
            if isinstance( decl, declarations.constructor_t ) and decl.is_artificial:
                return False
            if decl.ignore == True or decl.exportable == False:
                return False
            return True
        #-#if declarations.has_destructor( self ) \
        #-#   and not declarations.has_public_destructor( self ):
        members = list(filter( is_exportable, members ))
        sorted_members = members
        if sort:
            sorted_members = sort( members )
        return sorted_members
    def run_filters(self, check_removed=True):
        """
        Run all the filters in self.filters.
        @:param check_removed: (bool) Check if as a station has already been added to the removed_station_ids by a
         previous filter. Skips the remaining filters.
        :return:
        """
        # Check if filters have been initialized before running.
        if not bool(self.filters):
            sys.exit("ERROR run_filters: no filters have been initialized.")
        # Get list of all stations in the time series folder
        o_dir = os.getcwd()
        os.chdir(self.ts_path)
        # stations = [n for n in os.listdir('.') if n.isdigit()]  # list of all station folder names

        # Iterate through all the stations and apply filters
        for i, stat in enumerate(self.stations):
            print 'Processing station: %s' % stat
            if self.iter_time_seris:  # Only open and process time series if necessary
                self.ts_df = pd.read_csv('./%s/time_series.csv' % stat, index_col='Timestamp')
                self.ts_df['date'] = [d[0:10] for d in self.ts_df.index]
                self.ts_df['hour'] = [d[-8:-6] for d in self.ts_df.index]
            # Apply all the filters in the self.filters
            for filter in self.filters:
                # TODO setting check_removed to False will cause the OneClass_SVM filtering to break due to empty features (Andrew 16/07/25)
                if check_removed and stat in self.removed_station_ids:
                    break
                filter(str(stat))
        try:
            [self.cleaned_station_ids.remove(s) for s in self.removed_station_ids]  # remove the removed from the cleaned
        except KeyError as e:
            pass
        os.chdir(o_dir)
def get_vw_nvalues(model_run_uuid):
    """
    Given a model run uuid that contains the lookup table and ESRI .asc with
    vegetation codes, return an ascii file that has the n-values properly
    assigned
    """
    vwc = default_vw_client()

    records = vwc.dataset_search(model_run_uuid=model_run_uuid).records

    downloads = [r['downloads'][0] for r in records]

    asc_url = filter(lambda d: d.keys().pop() == 'ascii',
                     downloads).pop()['ascii']

    xlsx_url = filter(lambda d: d.keys().pop() == 'xlsx',
                      downloads).pop()['xlsx']

    asc_path = 'tmp_' + str(uuid4()) + '.asc'
    vwc.download(asc_url, asc_path)

    xlsx_path = 'tmp_' + str(uuid4()) + '.xlsx'
    vwc.download(xlsx_url, xlsx_path)

    asc_nvals = vegcode_to_nvalue(asc_path, xlsx_path)

    os.remove(asc_path)
    os.remove(xlsx_path)

    return asc_nvals
Exemple #29
0
    def find_max_match(self, options_list, el_value):
        """
        Finds the Longest Word Trimmed Match for selecting text in options field.
        @param options_list: The list of options in the options field.
        @param el_value: The text to be matched in the options.
        """
        el_value_list = el_value.split()
        # Remove all words of length = 1 such as hyphens.
        el_value_list = filter(lambda x: len(x) > 1, el_value_list)
        # Initialise max_len as 0 and matchec_option = None.
        max_len = 0
        matched_option = None

        for option in options_list:
            text = option.text
            text_list = text.split()
            # Remove all words of length = 1 such as hyphens.
            text_list = filter(lambda x: len(x) > 1, text_list)
            # Find intersection of el_value_list and text_list
            matched_list = list(set(el_value_list).intersection(text_list))
            # matched_len is number of matching words for the current option.
            matched_len = len(matched_list)
            # Save the maximum matched option in matched_option.
            if matched_len > max_len:
                matched_option = option
                max_len = matched_len

        # Return the maximum matched option.
        return matched_option
Exemple #30
0
def do_image_update(gc, args):
    """Update a specific image."""
    # Filter out None values
    fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))

    image_arg = fields.pop("image")
    image = utils.find_resource(gc.images, image_arg)

    if "is_protected" in fields:
        fields["protected"] = fields.pop("is_protected")

    raw_properties = fields.pop("property")
    fields["properties"] = {}
    for datum in raw_properties:
        key, value = datum.split("=", 1)
        fields["properties"][key] = value

    # Filter out values we can't use
    UPDATE_PARAMS = glanceclient.v1.images.UPDATE_PARAMS
    fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items()))

    if image.status == "queued":
        _set_data_field(fields, args)

        if args.progress:
            filesize = utils.get_file_size(fields["data"])
            fields["data"] = progressbar.VerboseFileWrapper(fields["data"], filesize)

    image = gc.images.update(image, purge_props=args.purge_props, **fields)
    _image_show(image, args.human_readable)
Exemple #31
0
def main():
    # Parse arguments and pass through unrecognised args
    parser = argparse.ArgumentParser(
        add_help=False,
        usage='%(prog)s [test_runner.py options] [script options] [scripts]',
        description=__doc__,
        epilog='''
    Help text and arguments for individual test script:''',
        formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument(
        '--ansi',
        action='store_true',
        default=sys.stdout.isatty(),
        help=
        "Use ANSI colors and dots in output (enabled by default when standard output is a TTY)"
    )
    parser.add_argument(
        '--combinedlogslen',
        '-c',
        type=int,
        default=0,
        metavar='n',
        help=
        'On failure, print a log (of length n lines) to the console, combined from the test framework and all test nodes.'
    )
    parser.add_argument(
        '--coverage',
        action='store_true',
        help='generate a basic coverage report for the RPC interface')
    parser.add_argument(
        '--ci',
        action='store_true',
        help=
        'Run checks and code that are usually only enabled in a continuous integration environment'
    )
    parser.add_argument(
        '--exclude',
        '-x',
        help='specify a comma-separated-list of scripts to exclude.')
    parser.add_argument(
        '--extended',
        action='store_true',
        help='run the extended test suite in addition to the basic tests')
    parser.add_argument('--help',
                        '-h',
                        '-?',
                        action='store_true',
                        help='print help text and exit')
    parser.add_argument(
        '--jobs',
        '-j',
        type=int,
        default=4,
        help='how many test scripts to run in parallel. Default=4.')
    parser.add_argument(
        '--keepcache',
        '-k',
        action='store_true',
        help=
        'the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.'
    )
    parser.add_argument(
        '--quiet',
        '-q',
        action='store_true',
        help='only print dots, results summary and failure logs')
    parser.add_argument('--tmpdirprefix',
                        '-t',
                        default=tempfile.gettempdir(),
                        help="Root directory for datadirs")
    parser.add_argument('--failfast',
                        action='store_true',
                        help='stop execution after the first test failure')
    parser.add_argument('--filter',
                        help='filter scripts to run by regular expression')

    args, unknown_args = parser.parse_known_args()
    if not args.ansi:
        global BOLD, GREEN, RED, GREY
        BOLD = ("", "")
        GREEN = ("", "")
        RED = ("", "")
        GREY = ("", "")

    # args to be passed on always start with two dashes; tests are the remaining unknown args
    tests = [arg for arg in unknown_args if arg[:2] != "--"]
    passon_args = [arg for arg in unknown_args if arg[:2] == "--"]

    # Read config generated by configure.
    config = configparser.ConfigParser()
    configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
    config.read_file(open(configfile, encoding="utf8"))

    passon_args.append("--configfile=%s" % configfile)

    # Set up logging
    logging_level = logging.INFO if args.quiet else logging.DEBUG
    logging.basicConfig(format='%(message)s', level=logging_level)

    # Create base test directory
    tmpdir = "%s/test_runner_₿_🏃_%s" % (
        args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))

    os.makedirs(tmpdir)

    logging.debug("Temporary test directory at %s" % tmpdir)

    enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")

    if not enable_bitcoind:
        print("No functional tests to run.")
        print("Rerun ./configure with --with-daemon and then make")
        sys.exit(0)

    # Build list of tests
    test_list = []
    if tests:
        # Individual tests have been specified. Run specified tests that exist
        # in the ALL_SCRIPTS list. Accept names with or without a .py extension.
        # Specified tests can contain wildcards, but in that case the supplied
        # paths should be coherent, e.g. the same path as that provided to call
        # test_runner.py. Examples:
        #   `test/functional/test_runner.py test/functional/wallet*`
        #   `test/functional/test_runner.py ./test/functional/wallet*`
        #   `test_runner.py wallet*`
        #   but not:
        #   `test/functional/test_runner.py wallet*`
        # Multiple wildcards can be passed:
        #   `test_runner.py tool* mempool*`
        for test in tests:
            script = test.split("/")[-1]
            script = script + ".py" if ".py" not in script else script
            if script in ALL_SCRIPTS:
                test_list.append(script)
            else:
                print("{}WARNING!{} Test '{}' not found in full test list.".
                      format(BOLD[1], BOLD[0], test))
    elif args.extended:
        # Include extended tests
        test_list += ALL_SCRIPTS
    else:
        # Run base tests only
        test_list += BASE_SCRIPTS

    # Remove the test cases that the user has explicitly asked to exclude.
    if args.exclude:
        exclude_tests = [
            test.split('.py')[0] for test in args.exclude.split(',')
        ]
        for exclude_test in exclude_tests:
            # Remove <test_name>.py and <test_name>.py --arg from the test list
            exclude_list = [
                test for test in test_list
                if test.split('.py')[0] == exclude_test
            ]
            for exclude_item in exclude_list:
                test_list.remove(exclude_item)
            if not exclude_list:
                print("{}WARNING!{} Test '{}' not found in current test list.".
                      format(BOLD[1], BOLD[0], exclude_test))

    if args.filter:
        test_list = list(filter(re.compile(args.filter).search, test_list))

    if not test_list:
        print(
            "No valid test scripts specified. Check that your test is in one "
            "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests"
        )
        sys.exit(0)

    if args.help:
        # Print help for test_runner.py, then print help of the first script (with args removed) and exit.
        parser.print_help()
        subprocess.check_call([
            sys.executable,
            os.path.join(config["environment"]["SRCDIR"], 'test', 'functional',
                         test_list[0].split()[0]), '-h'
        ])
        sys.exit(0)

    check_script_list(src_dir=config["environment"]["SRCDIR"],
                      fail_on_warn=args.ci)
    check_script_prefixes()

    if not args.keepcache:
        shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"],
                      ignore_errors=True)

    run_tests(
        test_list=test_list,
        src_dir=config["environment"]["SRCDIR"],
        build_dir=config["environment"]["BUILDDIR"],
        tmpdir=tmpdir,
        jobs=args.jobs,
        enable_coverage=args.coverage,
        args=passon_args,
        combined_logs_len=args.combinedlogslen,
        failfast=args.failfast,
        use_term_control=args.ansi,
    )
Exemple #32
0
 def local_fns(self):
     return list(
         filter(lambda each: each.get('imported') is None,
                self.functions.values()))
 def remove_relevant_hosts_from_model(self, host_ids):
     """Takes a list of host_ids and deletes the one found on the model
     from there. Return None."""
     relevant_host_ids = filter(self._is_host_in_model_by_host_id, host_ids)
     map(self._remove_single_host_from_model, relevant_host_ids)
 def names_str(self, sep='\n', *excl_names):
     names = list(filter(lambda x: x not in excl_names, self.names))
     return ((sep + "{}") * len(names)).format(*names)
Exemple #35
0
list1 = [1, 2, 3, 4, 5, 6]
update_list = map(lambda value: value**2, list1)
logger.info(update_list)
for item in update_list:
    logger.info(item)

# map(fun, list_of)
logger.info(list(map(int, [1.2, 2.3, 3.4, 65])))
# int(1.2) , int(2.3), int(3.4), int(65)

# update_list = []
# for item in [1.2, 2.3, 3.4, 65]:
#     update_list.append(int(item))
# [int(item) for item in [1.2, 2.3, 3.4, 65]]
# filter(function, iterable)
logger.info(list(filter(None, [1, 2, 3, 4, 5, 6])))

logger.info(list(filter(lambda value: value % 2 == 0, [1, 2, 3, 4, 5, 6])))

logger.info(reduce(lambda value1, value2: value1 + value2, [1, 2, 3, 4, 5]))
import operator

logger.info(reduce(operator.add, [1, 2, 3, 4, 5]))
logger.info(reduce(lambda str1, str2: str1 + str2,
                   ['name1', 'name2', 'name3']))

# [1,2,3,4,5]
# (1+2)+3)+4)+5) ->  3, 3 , 6, 10,

# isinstance, issubclass, hasattr, delattr, setattr, classmethod, staticmethod
Exemple #36
0
            status = 0
    eval_reward = evaluate_Q(test_data, model, price_data, i)
    learning_progress.append((eval_reward))
    print("Epoch #: %s Reward: %f Epsilon: %f" % (i,eval_reward, epsilon))
    #learning_progress.append((reward))
    if epsilon > 0.1: #decrement epsilon over time
        epsilon -= (1.0/epochs)


elapsed = np.round(timeit.default_timer() - start_time, decimals=2)
print("Completed in %f" % (elapsed,))

bt = twp.Backtest(pd.Series(data=[x[0,0] for x in xdata]), signal, signalType='shares')
bt.data['delta'] = bt.data['shares'].diff().fillna(0)

print(bt.data)
unique, counts = np.unique(filter(lambda v: v==v, signal.values), return_counts=True)
print(np.asarray((unique, counts)).T)

plt.figure()
plt.subplot(3,1,1)
bt.plotTrades()
plt.subplot(3,1,2)
bt.pnl.plot(style='x-')
plt.subplot(3,1,3)
plt.plot(learning_progress)

plt.savefig('plt/summary'+'.png', bbox_inches='tight', pad_inches=1, dpi=72)
#plt.show()

  def __init__(self,
               config,
               x,
               y,
               x_b,
               y_b,
               x_b_v,
               y_b_v,
               num_classes_a,
               num_classes_b,
               is_training=True,
               ext_wts=None,
               y_sel=None,
               w_class_a=None,
               b_class_a=None,
               nshot=None):
    self._config = config
    self._is_training = is_training
    self._num_classes_a = num_classes_a
    self._num_classes_b = num_classes_b

    if config.backbone_class == 'resnet_backbone':
      bb_config = config.resnet_config
    else:
      assert False, 'Not supported'
    opt_config = config.optimizer_config
    proto_config = config.protonet_config
    transfer_config = config.transfer_config

    self._backbone = get_model(config.backbone_class, bb_config)
    self._inputs = x
    self._labels = y
    # if opt_config.num_gpu > 1:
    #   self._labels_all = allgather(self._labels)
    # else:
    self._labels_all = self._labels
    self._inputs_b = x_b
    self._labels_b = y_b
    self._inputs_b_v = x_b_v
    self._labels_b_v = y_b_v
    # if opt_config.num_gpu > 1:
    #   self._labels_b_v_all = allgather(self._labels_b_v)
    # else:
    self._labels_b_v_all = self._labels_b_v
    self._y_sel = y_sel
    self._mask = tf.placeholder(tf.bool, [], name='mask')

    # global_step = tf.get_variable(
    #     'global_step', shape=[], dtype=tf.int64, trainable=False)
    global_step = tf.contrib.framework.get_or_create_global_step()
    self._global_step = global_step
    log.info('LR decay steps {}'.format(opt_config.lr_decay_steps))
    log.info('LR list {}'.format(opt_config.lr_list))
    learn_rate = tf.train.piecewise_constant(
        global_step, list(
            np.array(opt_config.lr_decay_steps).astype(np.int64)),
        list(opt_config.lr_list))
    self._learn_rate = learn_rate

    opt = self.get_optimizer(opt_config.optimizer, learn_rate)
    # if opt_config.num_gpu > 1:
    #   opt = hvd.DistributedOptimizer(opt)

    with tf.name_scope('TaskA'):
      h_a = self.backbone(x, is_training=is_training, ext_wts=ext_wts)
      self._h_a = h_a

    # Apply BN ops.
    bn_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

    with tf.name_scope('TaskB'):
      x_b_all = tf.concat([x_b, x_b_v], axis=0)
      if ext_wts is not None:
        h_b_all = self.backbone(
            x_b_all, is_training=is_training, reuse=True, ext_wts=ext_wts)
      else:
        h_b_all = self.backbone(x_b_all, is_training=is_training, reuse=True)

    with tf.name_scope('TaskA'):
      # Calculates hidden activation size.
      h_shape = h_a.get_shape()
      h_size = 1
      for ss in h_shape[1:]:
        h_size *= int(ss)

      if w_class_a is None:
        if ext_wts is not None:
          w_class_a = weight_variable(
              [h_size, num_classes_a],
              init_method='numpy',
              dtype=tf.float32,
              init_param={'val': np.transpose(ext_wts['w_class_a'])},
              wd=config.wd,
              name='w_class_a')
          b_class_a = weight_variable([],
                                      init_method='numpy',
                                      dtype=tf.float32,
                                      init_param={'val': ext_wts['b_class_a']},
                                      wd=0e0,
                                      name='b_class_a')
        else:
          w_class_a = weight_variable([h_size, num_classes_a],
                                      init_method='truncated_normal',
                                      dtype=tf.float32,
                                      init_param={'stddev': 0.01},
                                      wd=bb_config.wd,
                                      name='w_class_a')
          b_class_a = weight_variable([num_classes_a],
                                      init_method='constant',
                                      init_param={'val': 0.0},
                                      name='b_class_a')
        self._w_class_a_orig = w_class_a
        self._b_class_a_orig = b_class_a
      else:
        assert b_class_a is not None
        w_class_a_orig = weight_variable([h_size, num_classes_a],
                                         init_method='truncated_normal',
                                         dtype=tf.float32,
                                         init_param={'stddev': 0.01},
                                         wd=bb_config.wd,
                                         name='w_class_a')
        b_class_a_orig = weight_variable([num_classes_a],
                                         init_method='constant',
                                         init_param={'val': 0.0},
                                         name='b_class_a')
        self._w_class_a_orig = w_class_a_orig
        self._b_class_a_orig = b_class_a_orig

      self._w_class_a = w_class_a
      self._b_class_a = b_class_a
      num_classes_a_dyn = tf.cast(tf.shape(b_class_a)[0], tf.int64)
      num_classes_a_dyn32 = tf.shape(b_class_a)[0]

      if proto_config.cosine_a:
        if proto_config.cosine_tau:
          if ext_wts is None:
            init_val = 10.0
          else:
            init_val = ext_wts['tau'][0]
          tau = weight_variable([],
                                init_method='constant',
                                init_param={'val': init_val},
                                name='tau')
        else:
          tau = tf.constant(1.0)
        w_class_a_norm = self._normalize(w_class_a, 0)
        h_a_norm = self._normalize(h_a, 1)
        dot = tf.matmul(h_a_norm, w_class_a_norm)
        if ext_wts is not None:
          dot += b_class_a
        logits_a = tau * dot
      else:
        logits_a = compute_euc(tf.transpose(w_class_a), h_a)
      self._prediction_a = logits_a
      # if opt_config.num_gpu > 1:
      #   self._prediction_a_all = allgather(self._prediction_a)
      # else:
      self._prediction_a_all = self._prediction_a

      xent_a = tf.nn.sparse_softmax_cross_entropy_with_logits(
          logits=logits_a, labels=y)
      cost_a = tf.reduce_mean(xent_a, name='xent')
      self._cost_a = cost_a
      cost_a += self._decay()
      correct_a = tf.equal(tf.argmax(logits_a, axis=1), y)
      self._correct_a = correct_a
      self._acc_a = tf.reduce_mean(tf.cast(correct_a, cost_a.dtype))

    with tf.name_scope('TaskB'):
      h_b = h_b_all[:tf.shape(x_b)[0]]
      h_b_v = h_b_all[tf.shape(x_b)[0]:]

      # Add new axes for the `batch` dimension.
      h_b_ = tf.expand_dims(h_b, 0)
      h_b_v_ = tf.expand_dims(h_b_v, 0)
      y_b_ = tf.expand_dims(y_b, 0)
      y_b_v_ = tf.expand_dims(y_b_v, 0)

      if transfer_config.old_and_new:
        protos_b = self._compute_protos(num_classes_b, h_b_,
                                        y_b_ - num_classes_a)
      else:
        protos_b = self._compute_protos(num_classes_b, h_b_, y_b_)

      w_class_a_ = tf.expand_dims(tf.transpose(w_class_a), 0)
      if proto_config.protos_phi:
        w_p1 = weight_variable([h_size],
                               init_method='constant',
                               dtype=tf.float32,
                               init_param={'val': 1.0},
                               wd=bb_config.wd,
                               name='w_p1')
      if proto_config.cosine_attention:
        w_q = weight_variable([h_size, h_size],
                              init_method='truncated_normal',
                              dtype=tf.float32,
                              init_param={'stddev': 0.1},
                              wd=bb_config.wd,
                              name='w_q')
        k_b = weight_variable([num_classes_a, h_size],
                              init_method='truncated_normal',
                              dtype=tf.float32,
                              init_param={'stddev': 0.1},
                              wd=bb_config.wd,
                              name='k_b')
        tau_q = weight_variable([],
                                init_method='constant',
                                init_param={'val': 10.0},
                                name='tau_q')
        if transfer_config.old_and_new:
          w_class_b = self._compute_protos_attend_fix(
              num_classes_b, h_b_, y_b_ - num_classes_a_dyn, w_q, tau_q, k_b,
              self._w_class_a_orig)
        else:
          w_class_b = self._compute_protos_attend_fix(
              num_classes_b, h_b_, y_b_, w_q, tau_q, k_b, self._w_class_a_orig)
        assert proto_config.protos_phi
        w_p2 = weight_variable([h_size],
                               init_method='constant',
                               dtype=tf.float32,
                               init_param={'val': 1.0},
                               wd=bb_config.wd,
                               name='w_p2')
        self._k_b = tf.expand_dims(w_p2, 1) * self._w_class_a_orig
        self._k_b2 = k_b
        self.bias = w_class_b
        self.new_protos = w_p1 * protos_b
        self.new_bias = w_p2 * w_class_b
        w_class_b = w_p1 * protos_b + w_p2 * w_class_b
        self.protos = protos_b
        self.w_class_b_final = w_class_b
      else:
        w_class_b = protos_b
        if proto_config.protos_phi:
          w_class_b = w_p1 * w_class_b

      self._w_class_b = w_class_b

      if transfer_config.old_and_new:
        w_class_all = tf.concat([w_class_a_, w_class_b], axis=1)
      else:
        w_class_all = w_class_b

      if proto_config.cosine_softmax_tau:
        tau_b = weight_variable([],
                                init_method='constant',
                                init_param={'val': 10.0},
                                name='tau_b')
      else:
        tau_b = tf.constant(1.0)

      if proto_config.similarity == 'euclidean':
        logits_b_v = compute_logits(w_class_all, h_b_v_)
      elif proto_config.similarity == 'cosine':
        logits_b_v = tau_b * compute_logits_cosine(w_class_all, h_b_v_)
      else:
        raise ValueError('Unknown similarity')
      self._logits_b_v = logits_b_v
      self._prediction_b = logits_b_v[0]
      # if opt_config.num_gpu > 1:
      #   self._prediction_b_all = allgather(self._prediction_b)
      # else:
      self._prediction_b_all = self._prediction_b

      # Mask out the old classes.
      def mask_fn():
        bin_mask = tf.expand_dims(
            tf.reduce_sum(
                tf.one_hot(y_sel, num_classes_a + num_classes_b),
                0,
                keep_dims=True), 0)
        logits_b_v_m = logits_b_v * (1.0 - bin_mask)
        logits_b_v_m -= bin_mask * 100.0
        return logits_b_v_m

      # if transfer_config.old_and_new:
      #   logits_b_v = tf.cond(self._mask, mask_fn, lambda: logits_b_v)
      xent_b_v = tf.nn.sparse_softmax_cross_entropy_with_logits(
          logits=logits_b_v, labels=y_b_v_)
      cost_b = tf.reduce_mean(xent_b_v, name='xent')
      self._cost_b = cost_b

    if transfer_config.old_and_new:
      total_cost = cost_b
    else:
      total_cost = (transfer_config.cost_a_ratio * cost_a +
                    transfer_config.cost_b_ratio * cost_b)
    self._total_cost = total_cost

    if not transfer_config.meta_only:
      # assert False, 'let us go for pretrained model first'
      var_list = tf.trainable_variables()
      var_list = list(filter(lambda x: 'phi' in x.name, var_list))
      layers = self.config.transfer_config.meta_layers
      if layers == "all":
        pass
      elif layers == "4":
        keywords = ['TaskB', 'unit_4_']
        filter_fn = lambda x: any([kw in x.name for kw in keywords])
        var_list = list(filter(filter_fn, var_list))
      else:
        raise ValueError('Unknown finetune layers {}'.format(layers))
      [log.info('Slow weights {}'.format(v.name)) for v in var_list]
    else:
      var_list = []

    if proto_config.cosine_softmax_tau:
      var_list += [tau_b]

    if proto_config.cosine_attention:
      var_list += [w_q, tau_q, k_b, w_p2]

    if proto_config.protos_phi:
      var_list += [w_p1]

    if transfer_config.train_wclass_a:
      if proto_config.similarity == 'euclidean':
        var_list += [w_class_a, b_class_a]
      elif proto_config.similarity == 'cosine':
        var_list += [w_class_a]

    if is_training:
      grads_and_vars = opt.compute_gradients(total_cost, var_list)
      with tf.control_dependencies(bn_ops):
        [log.info('BN op {}'.format(op.name)) for op in bn_ops]
        train_op = opt.apply_gradients(grads_and_vars, global_step=global_step)

      grads_and_vars_b = opt.compute_gradients(cost_b, var_list)
      with tf.control_dependencies(bn_ops):
        train_op_b = opt.apply_gradients(
            grads_and_vars_b, global_step=global_step)

      with tf.control_dependencies(bn_ops):
        train_op_a = opt.minimize(cost_a, global_step=global_step)
      self._train_op = train_op
      self._train_op_a = train_op_a
      self._train_op_b = train_op_b
    self._initializer = tf.global_variables_initializer()
    self._w_class_a = w_class_a
                    reg_exon_end - reg_exon_start
            ) % 3 == 0:  # and then we check if the regulated exon length is devidable by 3 (it's a good exon) if not it's a bady
                fout.write(reg_exon_id + '\t' + "GOOD" + '\t' + str(distance) +
                           '\t' + "NA" + '\t' + "NA" + '\t' + "NA" + '\t' +
                           exon_seq + '\t' + "NA" + '\t' +
                           str(total_exonic_length) + '\t' +
                           str(num_upstream_exons_from_regulated) + '\t' +
                           str(num_downstream_exons_from_regulated) + '\t' +
                           "NA" + '\n')
            else:
                fout.write(reg_exon_id + '\t' + "BAD" + '\t' + str(distance) +
                           '\t' + "NA" + '\t' + "NA" + '\t' + "NA" + '\t' +
                           exon_seq + '\t' + "NA" + '\t' +
                           str(total_exonic_length) + '\t' +
                           str(num_upstream_exons_from_regulated) + '\t' +
                           str(num_downstream_exons_from_regulated) + '\t' +
                           "NA" + '\n')

        line = fin.readline()
    fout.close()
    fin.close()


# main
if sys.argv.__len__() == 3:
    fname_in = sys.argv[1]
    fname_out = sys.argv[2]
    filter(fname_in, fname_out)
else:
    print("You need two arguments to run the script")
Exemple #39
0
def process_junctions(annot, junctions_dict, dict_ref_junctions, direction,
                      rundir, outPrefix, list_of_files, samples,
                      affected_status_dict):
    outFile = "%s/%s_junc_outliers.txt" % (rundir, outPrefix)
    outratio = "%s/%s_junc_ratios.txt" % (rundir, outPrefix)
    with open(annot, 'r') as ref_junc, open(outFile,
                                            'w') as OUT, open(outratio,
                                                              'w') as OUTratio:
        #header= ['gene', 'chr', 'junction', 'annotation_status','Cluster', 'OutlierCase', 'OutlierControl']+samples#+[(str(e)) for e in range(len(list_of_files))]
        header = [
            'chr', 'junction_start', 'junction_end', 'gene',
            'annotation_status', 'Cluster', 'OutlierCase', 'OutlierControl'
        ] + samples
        OUT.write('\t'.join(header) + '\n')
        header_ratio = [
            'chr', 'junction_start', 'junction_end', 'gene',
            'annotation_status', 'Cluster'
        ] + samples
        OUTratio.write('\t'.join(header_ratio) + '\n')
        for ref_junc_num, line in enumerate(ref_junc):
            chrom, start, end, gene = line.rstrip().split('\t')
            if direction == 'donor':
                valid_jxns = filter(lambda jxn: jxn[0] == start,
                                    junctions_dict[chrom].keys())
            if direction == 'acceptor':
                valid_jxns = filter(lambda jxn: jxn[1] == end,
                                    junctions_dict[chrom].keys())
            if len(valid_jxns) == 1:
                continue
            statuses = is_junc_annot(valid_jxns, dict_ref_junctions, chrom)
            test_dic = {}
            for jxn in valid_jxns:
                test_dic[jxn] = junctions_dict[chrom].get(
                    jxn,
                    len(list_of_files) * [0])
                #test_dic_ctrl[jxn] = junc_control[chrom].get(jxn, len(list_of_controls_files)*[0])
            # add one to every count before computing ratio
            #test_dic={k: np.array(v)+1 for  k, v in test_dic.items()}
            # get sum of counts over junctions
            junc_sum = [sum(x) for x in zip(*test_dic.values())]
            # filter samples always at 0
            junc_sum_filt = [x if x != 0 else np.nan for x in junc_sum]
            # calculate the ratio of counts for each junction and each individual
            junc_ratio_dic = {
                k: get_junc_ratio(v, junc_sum_filt)
                for k, v in test_dic.items()
            }
            # get percentiles for each junction of teh dictionary
            #junc_percentiles_dic={k: get_percentiles(v) for k, v in junc_ratio_dic.items()}
            # get outlier indexes
            #junc_outlier_dic={k: get_outlier_indexes(v, junc_percentiles_dic[k]) for k, v in junc_ratio_dic.items()}
            # get number of case and controls for outliers
            #junc_affected_status_dic={k: get_outlier_affected_status(v, samples,affected_status_dict) for k, v in junc_outlier_dic.items()}
            # get zcores for ratios
            junc_zscore_dic = {
                k: get_zscores(v)
                for k, v in junc_ratio_dic.items()
            }
            # get zscore outlier
            junc_outlier_zscore_dic = {
                k: get_outlier_indexes_zscore(v)
                for k, v in junc_zscore_dic.items()
            }
            # count affected status for zscore outliers
            junc_affected_status_zscore = {
                k: get_outlier_zscore_affected_status(v, samples,
                                                      affected_status_dict)
                for k, v in junc_outlier_zscore_dic.items()
            }
            # report results for junction set
            for key_number, key in enumerate(junc_ratio_dic):
                values = [chrom] + [i for i in key] + [
                    gene, statuses[key_number], ref_junc_num
                ] + [
                    outlier_number
                    for outlier_number in junc_affected_status_zscore[key]
                ] + junc_zscore_dic[key].tolist()
                values_ratio = [chrom] + [i for i in key] + [
                    gene, statuses[key_number], ref_junc_num
                ] + junc_ratio_dic[key]
                #values=[gene, chrom,key, statuses[key_number], ref_junc_num]+[outlier_number for outlier_number in junc_affected_status_zscore[key]] +junc_zscore_dic[key].tolist()# [value for value in junc_ratio_dic[key]]
                OUT.write('\t'.join([str(s) for s in values]) + '\n')
                OUTratio.write('\t'.join([str(s)
                                          for s in values_ratio]) + '\n')
Exemple #40
0
def remove_static(id, field):
    service = helpers.get_entity(Service, id)
    service.statics = filter(lambda x: x.get("field") != field,
                             service.statics)
    service.put()
    return service
def main(args):
    # initialization
    print("Input arguments:")
    for key, val in vars(args).items():
        print("{:16} {}".format(key, val))

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)
    writer = SummaryWriter(log_dir=os.path.join(args.log_dir, args.method))

    random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    cudnn.benchmark = True

    # conduct seg network
    seg_model = get_model(num_classes=args.num_classes)

    saved_state_dict = torch.load(args.restore_from)
    new_params = seg_model.state_dict().copy()

    if args.init:
        for i in saved_state_dict:
            i_parts = i.split('.')
            if not i_parts[0] == 'fc' and not i_parts[0] == 'last_linear':
                new_params['encoder.' + '.'.join(i_parts[:])] = saved_state_dict[i]
        seg_model.load_state_dict(new_params)
        print('loading params w/o fc')
    else:
        seg_model.load_state_dict(saved_state_dict)
        print('loading params all')

    model = DataParallelModel(seg_model)
    model.float()
    model.cuda()

    # define dataloader
    train_loader = data.DataLoader(DataGenerator(root=args.root, list_path=args.lst,
                                                    crop_size=args.crop_size, training=True),
                                   batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)
    val_loader = data.DataLoader(DataGenerator(root=args.val_root, list_path=args.val_lst,
                                                  crop_size=args.crop_size, training=False),
                                 batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)

    # define criterion & optimizer
    criterion = ABRLovaszLoss(ignore_index=args.ignore_label, only_present=True, num_classes=args.num_classes)
    criterion = DataParallelCriterion(criterion).cuda()

    optimizer = optim.SGD(
        [{'params': filter(lambda p: p.requires_grad, seg_model.parameters()), 'lr': args.learning_rate}],
        lr=args.learning_rate, momentum=0.9, weight_decay=5e-4)

    # key points
    best_val_mIoU = 0
    best_val_pixAcc = 0
    start = time.time()

    for epoch in range(0, args.epochs):
        print('\n{} | {}'.format(epoch, args.epochs - 1))
        # training
        _ = train(model, train_loader, epoch, criterion, optimizer, writer)

        # validation
        if epoch % 10 == 0 or epoch > args.epochs-10:
            val_pixacc, val_miou = validation(model, val_loader, epoch, writer)
            # save model
            if val_pixacc > best_val_pixAcc:
                best_val_pixAcc = val_pixacc
            if val_miou > best_val_mIoU:
                best_val_mIoU = val_miou
                model_dir = os.path.join(args.snapshot_dir, args.method + '_miou.pth')
                torch.save(seg_model.state_dict(), model_dir)
                print('Model saved to %s' % model_dir)

    os.rename(model_dir, os.path.join(args.snapshot_dir, args.method + '_miou'+str(best_val_mIoU)+'.pth'))
    print('Complete using', time.time() - start, 'seconds')
    print('Best pixAcc: {} | Best mIoU: {}'.format(best_val_pixAcc, best_val_mIoU))
Exemple #42
0
from functools import reduce

l = [2, 4, 7, 5, 10, 3]
m = [1, 2, 3, 4, 5, 6]
f = list(filter(lambda x: x % 2 == 0, l))
print("filter result: ", f)

m = list(map(lambda x: x * x, l))
print("map result: ", m)

r = reduce(lambda x, y: x + y, l)
print("reduce result: ", r)

#map function to sum 2 lists
n = [1, 2, 3]
o = [4, 5, 6]
sum = list(map(lambda x, y: x + y, n, o))
print(sum)
def train_y(device, transforms):
    y_dataset = YDS(args.y_root_dir, transform=transforms)
    y_dataloader = DataLoader(y_dataset, batch_size=args.batch_size, shuffle=True)

    dict_path = os.path.join(args.save_dir, "y_training.pth")
    load_dict = torch.load(dict_path) if os.path.exists(dict_path) else None

    # Ey = model.Encoder(args.z_dim)
    # Gy = model.Generator(args.z_dim)
    Ey = model.Encoder()
    Gy = model.Generator()
    Dy = model.DiscriminatorImage()

    if load_dict:
        print("load the trained model")
        Ey.load_state_dict(load_dict['ey'])
        Gy.load_state_dict(load_dict['gy'])
        Dy.load_state_dict(load_dict['dy'])

    Ey.to(device)
    Gy.to(device)
    Dy.to(device)

    optimizer_vae = torch.optim.Adam(
        itertools.chain(
            filter(lambda p: p.requires_grad, Ey.parameters()),
            filter(lambda p: p.requires_grad, Gy.parameters())
        ),
        lr=args.lr,
        betas=(0.0, 0.999)
    )
    optimizer_d = torch.optim.Adam(
        filter(lambda p: p.requires_grad, Dy.parameters()),
        lr=args.lr,
        betas=(0.0, 0.999)
    )
    # optimizer_ey = torch.optim.Adam(Ey.parameters(), lr=args.lr_encoder)
    # optimizer_gy = torch.optim.Adam(Gy.parameters(), lr=args.lr_generator)
    # optimizer_dy = torch.optim.Adam(Dy.parameters(), lr=args.lr_discimg)

    for epoch in range(1, args.epoch_y+1):
        vae_loss_sum = 0.
        vae_kl_loss = 0.
        vae_rec_loss = 0.
        vae_gan_lsloss = 0.

        disc_loss_sum = 0.
        for idx, y in enumerate(y_dataloader):
            y = y.to(device)
            # train VAE
            optimizer_vae.zero_grad()

            # zy = Ey(y)
            # y_ = Gy(zy)
            # sy_, _ = Dy(y_)
            zy, _ = Ey(y)
            y_ = Gy(zy)
            sy_, _ = Dy(y_)

            # KL_loss = loss.latent_loss(Ey.mu, Ey.logvar)
            KL_loss = Ey.kl_divergence.mean()
            reconstruct_loss = loss.VAE_reconstruct_loss(y_, y)
            lsloss_gen = loss.LSLoss_gen(sy_)

            # vae_loss = args.alpha1 * KL_loss + args.alpha2 * reconstruct_loss
            vae_loss = args.kl_weight * KL_loss + args.rec_weight * reconstruct_loss + args.gen_weight * lsloss_gen
            # vae_loss = KL_loss + reconstruct_loss + lsloss_gen
            vae_loss.backward()

            optimizer_vae.step()
            vae_loss_sum += vae_loss.item()
            vae_kl_loss += KL_loss.item()
            vae_rec_loss += reconstruct_loss.item()
            vae_gan_lsloss += lsloss_gen.item()

            # train discriminator
            optimizer_d.zero_grad()
            zy, _ = Ey(y)
            y_ = Gy(zy)
            sy_, _ = Dy(y_)

            lsloss_disc_fake = loss.LSLoss_disc_fake(sy_)
            disc_loss_sum += lsloss_disc_fake.item()

            sy, _ = Dy(y)
            lsloss_disc_real = loss.LSLoss_disc_real(sy)
            disc_loss_sum += lsloss_disc_real.item()

            lsloss_disc = lsloss_disc_fake + lsloss_disc_real
            lsloss_disc = lsloss_disc * args.disc_weight
            lsloss_disc.backward()
            optimizer_d.step()

            if (idx+1) % 50 == 0:
                # print("--idx %d: VAE LOSS: %f" % (idx+1, vae_loss.item()))
                print(
                    "--idx %d: VAE LOSS: %f, KL LOSS: %f, REC LOSS: %f, GEN LOSS: %f, DISC LOSS: %f " %
                    (idx+1, vae_loss.item(), KL_loss.item(), reconstruct_loss.item(), lsloss_gen.item(), lsloss_disc.item())
                    )
                # print("--idx %d: VAE LOSS: %f, DISC LOSS: %f " % (idx+1, vae_loss.item(), lsloss_disc.item()))
        print(
            "epoch %d: VAE LOSS: %f, KL LOSS: %f, REC LOSS: %f, GEN LOSS: %f, DISC LOSS: %f " %
            (epoch, vae_loss_sum, vae_kl_loss, vae_rec_loss, vae_gan_lsloss, disc_loss_sum)
            )
        if epoch % args.save_epoch == 0:
            save_dict = {
                'ey': Ey.state_dict(),
                'gy': Gy.state_dict(),
                'dy': Dy.state_dict(),
                }
            torch.save(save_dict, os.path.join(args.save_dir, "y_training.pth"))
            print("This checkpoint for y training has been saved.")

    print("Training process for y has finished.")
    save_dict = {
        'ey': Ey.state_dict(),
        'gy': Gy.state_dict(),
        'dy': Dy.state_dict(),
        }
    torch.save(save_dict, os.path.join(args.save_dir, "y_training.pth"))
    print("This checkpoint for y training has been saved.")
Exemple #44
0
def controlHarvestToMicelioSession(document, sheet, file, show_a_e=False):

    ID_EVENT = 0
    ID_SESSION = 1
    ACTIVITY_NAME = 2
    AGENT_NAME = 3
    TIME = 4
    ATT1 = 5
    ATT2 = 6
    ATT3 = 7
    ID_FUNCTION = 9
    ATT1_NAME = 10
    ATT2_NAME = 11
    ATT3_NAME = 12
    ROLE = 13

    data = pods.get_data(document)

    not_empty = lambda x: len(x) > 0

    filtered_data = list(filter(not_empty, data[sheet][1::]))

    day = datetime.now()
    session = {
        'session_id': filtered_data[0][ID_SESSION],
        'name': 'exported by script in ' + day.strftime('%d/%m/%Y'),
        'language': 'N/A',
        'game_stage': '1',
        'date': day.strftime('%Y-%m-%d'),
        'session_group_id': None,
        'start_time': day.strftime('%H:%M:%S'),
        'end-time': None
    }

    # Guarda as Atividades
    activities = []
    # Guarda os agentes para buscar os nomes quando não disponibilizados
    agents = {}
    # Guarda as entidades para buscar os nomes quando não disponibilizados
    entities = {}
    # Guarda os ids de todas as plantar plantadas, pois após o desenvolvimento o ID muda
    planted_plants = {}
    # Guarda as posicoes das plantas desenvolvidas, pois quando colhidas a posição não é conhecida
    evolved_plants = {}
    # Guarda os indices das plantas que nao tiveram os ids encontrados para fazer um outro check no final
    plants_without_id = {}

    for evento in filtered_data:
        activity = {}

        activity['activity_id'] = evento[ID_EVENT]
        activity['name'] = evento[ACTIVITY_NAME]
        activity['time'] = str(evento[TIME])
        activity['influenced_by'] = None

        if str(evento[ID_FUNCTION]) == '2':
            activity['position_x'] = None
            activity['position_y'] = None
            activity['entities'] = []
            activity['agents'] = []

            activity['properties'] = {}
            activity['properties'][ATT1_NAME] = evento[ATT1]
            activity['properties'][ATT2_NAME] = evento[ATT2]
            activity['properties']['planta_sorteada'] = evento[AGENT_NAME]

        elif str(evento[ID_FUNCTION]) == '3':
            activity['position_x'] = None
            activity['position_y'] = None
            activity['entities'] = []
            activity['agents'] = []

            activity['properties'] = {}
            activity['properties'][ATT1_NAME] = evento[ATT1]
            activity['properties']['tipo_de_praga'] = evento[AGENT_NAME]

        elif str(evento[ID_FUNCTION]) == '7':

            if evento[ATT1] in evolved_plants:
                real_plant = evolved_plants[evento[ATT1]]
            else:
                real_plant = {'id': -1, 'x': -1, 'y': -1}
                plants_without_id[evento[ATT1]] = len(activities)

            activity['position_x'] = real_plant['x']
            activity['position_y'] = real_plant['y']
            activity['entities'] = [{
                'entity_id': real_plant['id'],
                'name': evento[AGENT_NAME],
                'position_x': real_plant['x'],
                'position_y': real_plant['y'],
                'properties': {},
                'role': 'planta'
            }]
            activity['agents'] = []

            entities[evento[ATT1]] = evento[AGENT_NAME]

            activity['properties'] = {}

        elif str(evento[ID_FUNCTION]) == '8':

            activity['position_x'] = None
            activity['position_y'] = None
            activity['entities'] = []
            activity['agents'] = [{
                'agent_id': evento[ATT1],
                'name': evento[AGENT_NAME],
                'type': 'CPU',
                'position_x': None,
                'position_y': None,
                'properties': {},
                'role': 'predador'
            }]

            agents[evento[ATT1]] = evento[AGENT_NAME]

            activity['properties'] = {}

        elif str(evento[ID_FUNCTION]) == '9':

            if str(evento[ATT2]) + str(evento[ATT3]) in plants_without_id:
                index = plants_without_id[str(evento[ATT2]) +
                                          str(evento[ATT3])]
                activities[index]['entities'][0]['entity_id'] = evento[ATT1]
                del (plants_without_id[str(evento[ATT2]) + str(evento[ATT3])])

            activity['position_x'] = evento[ATT2]
            activity['position_y'] = evento[ATT3]
            activity['entities'] = [{
                'entity_id': evento[ATT1],
                'name': evento[AGENT_NAME],
                'position_x': evento[ATT2],
                'position_y': evento[ATT3],
                'properties': {},
                'role': 'planta'
            }]
            activity['agents'] = []

            entities[evento[ATT1]] = evento[AGENT_NAME]

            activity['properties'] = {}

            plant_id = str(evento[ATT2]) + str(evento[ATT3])
            planted_plants[plant_id] = evento[ATT1]

        elif str(evento[ID_FUNCTION]) == '9.1':

            if str(evento[ATT2]) + str(evento[ATT3]) in evolved_plants:
                real_id = planted_plants[str(evento[ATT2]) + str(evento[ATT3])]
                if evento[ATT1] in plants_without_id:
                    index = plants_without_id[evento[ATT1]]
                    activities[index]['entities'][0]['entity_id'] = real_id
                    activities[index]['entities'][0]['position_x'] = evento[
                        ATT2]
                    activities[index]['entities'][0]['position_y'] = evento[
                        ATT3]
                    del (plants_without_id[evento[ATT1]])
            else:
                real_id = -1
                plants_without_id[str(evento[ATT2]) +
                                  str(evento[ATT3])] = len(activities)

            activity['position_x'] = evento[ATT2]
            activity['position_y'] = evento[ATT3]
            activity['entities'] = [{
                'entity_id': real_id,
                'name': evento[AGENT_NAME],
                'position_x': evento[ATT2],
                'position_y': evento[ATT3],
                'properties': {},
                'role': 'planta'
            }]
            activity['agents'] = []

            entities[evento[ATT1]] = evento[AGENT_NAME]

            activity['properties'] = {}

            evolved_plants[evento[ATT1]] = {
                'id': real_id,
                'x': evento[ATT2],
                'y': evento[ATT3]
            }

        elif str(evento[ID_FUNCTION]) == '10':
            activity['position_x'] = None
            activity['position_y'] = None
            activity['entities'] = []
            activity['agents'] = []

            activity['properties'] = {}
            activity['properties']['tipo_predador'] = evento[AGENT_NAME]

        elif str(evento[ID_FUNCTION]) == '11' or str(
                evento[ID_FUNCTION]) == '12':
            activity['position_x'] = None
            activity['position_y'] = None
            activity['entities'] = []
            activity['agents'] = []

            activity['properties'] = {}
            activity['properties'][ATT1] = evento[ATT1_NAME]
            activity['properties'][ATT2] = evento[ATT2_NAME]
            activity['properties']['tipo_planta'] = evento[AGENT_NAME]

        elif str(evento[ID_FUNCTION]) == '13':
            activity['position_x'] = None
            activity['position_y'] = None
            activity['entities'] = []
            activity['agents'] = []

            activity['properties'] = {}
            activity['properties'][ATT3] = evento[ATT3_NAME]

        elif str(evento[ID_FUNCTION]) == '14':
            activity['position_x'] = None
            activity['position_y'] = None
            activity['entities'] = []
            activity['agents'] = [{
                'agent_id': evento[ATT1],
                'name': evento[AGENT_NAME],
                'type': 'CPU',
                'position_x': None,
                'position_y': None,
                'properties': {},
                'role': 'inseto'
            }]

            agents[evento[ATT1]] = evento[AGENT_NAME]

            activity['properties'] = {}

        elif str(evento[ID_FUNCTION]) == '15':

            x, y = splitCoordinates(evento[ATT3])
            activity['position_x'] = x
            activity['position_y'] = y

            if evento[ATT2] in entities.values():
                activity['entities'] = [{
                    'agent_id': evento[ATT2],
                    'name': evento[AGENT_NAME],
                    'position_x': x,
                    'position_y': y,
                    'properties': {},
                    'role': 'presa'
                }]
                activity['agents'] = []
                entities[evento[ATT2]] = evento[AGENT_NAME]
            else:
                activity['entities'] = []
                activity['agents'] = [{
                    'agent_id': evento[ATT2],
                    'name': evento[AGENT_NAME],
                    'type': 'CPU',
                    'position_x': x,
                    'position_y': y,
                    'properties': {},
                    'role': 'presa'
                }]

                agents[evento[ATT2]] = evento[AGENT_NAME]

            nome_agente = agents[
                evento[ATT1]] if evento[ATT1] in agents else ''

            activity['agents'].append({
                'agent_id': evento[ATT1],
                'name': nome_agente,
                'type': 'CPU',
                'position_x': x,
                'position_y': y,
                'properties': {},
                'role': 'predador'
            })

            agents[evento[ATT1]] = nome_agente

            activity['properties'] = {}

        elif str(evento[ID_FUNCTION]) == '16':

            x, y = splitCoordinates(evento[ATT3])

            activity['position_x'] = x
            activity['position_y'] = y
            activity['entities'] = []
            activity['agents'] = [{
                'agent_id': evento[ATT1],
                'name': evento[AGENT_NAME],
                'type': 'CPU',
                'position_x': x,
                'position_y': y,
                'properties': {},
                'role': 'inseto_1'
            }, {
                'agent_id': evento[ATT2],
                'name': evento[AGENT_NAME],
                'type': 'CPU',
                'position_x': x,
                'position_y': y,
                'properties': {},
                'role': 'inseto_2'
            }]

            agents[evento[ATT1]] = evento[AGENT_NAME]

            activity['properties'] = {}

        elif str(evento[ID_FUNCTION]) == '17':

            if str(evento[AGENT_NAME]).isnumeric():
                x = evento[ATT2]
                y = evento[ATT3]
                nome_agente_1 = agents[
                    evento[AGENT_NAME]] if evento[AGENT_NAME] in agents else ''
                id_obj_1 = evento[AGENT_NAME]
                id_obj_2 = evento[ATT1]

                if id_obj_2 in entities:
                    activity['name'] += ' (Predação Inseto/Planta)'
                    nome_agente_2 = entities[
                        id_obj_2] if id_obj_2 in entities else ''
                    activity['entities'] = [{
                        'entity_id': id_obj_2,
                        'name': nome_agente_2,
                        'position_x': x,
                        'position_y': y,
                        'properties': {},
                        'role': 'objeto_2'
                    }]
                    activity['agents'] = []
                    if nome_agente_2 != '':
                        entities[id_obj_2] = nome_agente_2
                else:
                    activity['name'] += ' (Predação Inseto/Inseto)'
                    nome_agente_2 = agents[
                        id_obj_2] if id_obj_2 in agents else ''
                    activity['entities'] = []
                    activity['agents'] = [{
                        'agent_id': id_obj_2,
                        'name': nome_agente_2,
                        'type': 'CPU',
                        'position_x': x,
                        'position_y': y,
                        'properties': {},
                        'role': 'objeto_2'
                    }]
                    if nome_agente_2 != '':
                        agents[id_obj_2] = nome_agente_2
            else:
                x, y = splitCoordinates(evento[ATT3])
                nome_agente_1 = evento[AGENT_NAME]
                nome_agente_2 = evento[AGENT_NAME]
                activity['name'] += ' (Reprodução)'
                id_obj_1 = evento[ATT1]
                id_obj_2 = evento[ATT2]
                activity['entities'] = []
                activity['agents'] = [{
                    'agent_id': id_obj_2,
                    'name': nome_agente_2,
                    'type': 'CPU',
                    'position_x': x,
                    'position_y': y,
                    'properties': {},
                    'role': 'objeto_2'
                }]

                agents[evento[ATT2]] = nome_agente_1

            activity['position_x'] = x
            activity['position_y'] = y

            activity['agents'].append({
                'agent_id': id_obj_1,
                'name': nome_agente_1,
                'type': 'CPU',
                'position_x': x,
                'position_y': y,
                'properties': {},
                'role': 'obeto_1'
            })

            activity['properties'] = {}

            if nome_agente_1 != '':
                agents[id_obj_1] = nome_agente_1

        elif str(evento[ID_FUNCTION]) == '18':
            activity['position_x'] = evento[ATT2]
            activity['position_y'] = evento[ATT3]
            activity['entities'] = []
            activity['agents'] = [{
                'agent_id': evento[ATT1],
                'name': evento[AGENT_NAME],
                'type': 'CPU',
                'position_x': evento[ATT2],
                'position_y': evento[ATT3],
                'properties': {},
                'role': 'inseto'
            }]

            agents[evento[ATT1]] = evento[AGENT_NAME]

            activity['properties'] = {}

        else:
            activity['position_x'] = None
            activity['position_y'] = None
            activity['entities'] = []
            activity['agents'] = []

            activity['properties'] = {}

        activities.append(activity)

    session['activities'] = activities

    description(document, file, filtered_data, agents, entities, show_a_e)
    export_json(session, file)
Exemple #45
0
import wepy.configuration as cf
import json
import os
import re
from pprint import pprint
import dotmap

# The file location that is in our config file
path_trips = cf.PATHS.trips

# Now find the files with answers
list_files = os.listdir(path_trips)

r = re.compile('response.txt')
list_files = list(filter(r.search, list_files))
len(list_files)

# loads a particular file
data_file = open(path_trips + list_files[0])

# this builds a dictionnary from the json file
data_answer = json.load(data_file)
data_sent = json.load(
    open(path_trips + str.replace(list_files[0], 'response', 'sent')))

# a first way to print the content
pprint(data_answer)
# now data is a regular dictionary accessible like this
data_answer['fuelEstimation']['co2Emission']
Exemple #46
0
    def dump_geoserver_externals(self, config, settings, target_folder):
        """Scan layers xml and see if there are external references.

        Find references to data outside data dir and include them in
        backup. Also, some references may point to specific url, which
        may not be available later.
        """
        external_folder = os.path.join(target_folder, utils.EXTERNAL_ROOT)

        def copy_external_resource(abspath):
            external_path = os.path.join(external_folder, abspath[1:])
            external_dir = os.path.dirname(external_path)

            if not os.path.isdir(external_dir):
                os.makedirs(external_dir)

            shutil.copy(abspath, external_path)

        def match_filename(key, text, regexp=re.compile("^(.+)$")):
            if key in ('filename', ):
                match = regexp.match(text.decode("utf-8"))
                if match:
                    relpath = match.group(1)
                    abspath = relpath if os.path.isabs(relpath) else \
                        os.path.abspath(
                            os.path.join(os.path.dirname(path), relpath))
                    if os.path.exists(abspath):
                        return abspath

        def match_fileurl(key, text, regexp=re.compile("^file:(.+)$")):
            if key in ('url', ):
                match = regexp.match(text.decode("utf-8"))
                if match:
                    relpath = match.group(1)
                    abspath = relpath if os.path.isabs(relpath) else \
                        os.path.abspath(
                            os.path.join(config.gs_data_dir, relpath))
                    if os.path.exists(abspath):
                        return abspath

        def dump_external_resources_from_xml(path):
            def find_external(tree, key=None):
                if isinstance(tree, dict):
                    for key, value in tree.items():
                        for found in find_external(value, key=key):
                            yield found
                elif isinstance(tree, list):
                    for item in tree:
                        for found in find_external(item, key=key):
                            yield found
                elif isinstance(tree, six.string_types):
                    text = tree.encode('utf-8')
                    for find in (match_fileurl, match_filename):
                        found = find(key, text)
                        if found:
                            yield found

            with open(path) as fd:
                content = fd.read()
                tree = parse_xml(content)
                for found in find_external(tree):
                    if found.find(config.gs_data_dir) != 0:
                        copy_external_resource(found)

        def is_xml_file(filename, regexp=re.compile(".*.xml$")):
            return regexp.match(filename) is not None

        for directory in ('workspaces', 'styles'):
            source = os.path.join(config.gs_data_dir, directory)
            for root, dirs, files in os.walk(source):
                for filename in filter(is_xml_file, files):
                    path = os.path.join(root, filename)
                    dump_external_resources_from_xml(path)
 def unregisterRefObj(self, ob, REQUEST):
   _globals.writeLog( self, "[unregisterRefObj]: %s(%s)"%(ob.id,ob.meta_type))
   ref_by = self.getRefByObjs(REQUEST)
   ref_by = filter( lambda x: x[2:-1].split('/')[-1]!=ob.id,ref_by)
   ##### Set Attribute ####
   setattr(self,'ref_by',ref_by)
args = parser.parse_args()

torch.manual_seed(args.seed)
if torch.cuda.is_available():
    if not args.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")
    else:
        torch.cuda.manual_seed(args.seed)


corpus = Corpus(args.task)
model = eval(args.model)(corpus, args)
model.train()
criterion = nn.NLLLoss()

parameters = filter(lambda p: p.requires_grad, model.parameters())
optimizer = optim.Adamax(parameters, lr=args.lr)


if args.cuda:
    model.cuda()
    criterion.cuda()

start_time = time.time()
total_loss = 0
interval = args.interval
save_interval = len(corpus.data_all['train']) // args.batch_size

best_dev_score = -99999
iterations = args.epochs*len(corpus.data_all['train']) // args.batch_size
print('max iterations: '+str(iterations))
'''
Abra um arquivo de texto, calcule e escreva o número de caracteres, o número de linhas
e o número de palavras neste arquivo. Escreva também quantas vezes cada letra ocorre
no arquivo (ignorando letras com acento).
'''

from collections import Counter

raiz = 'c:/Users/Higor H/Documents/Estudos/Python'
pasta = raiz + '/Curso de Programação em Python (Udemy)/Sessão 13 - Exercícios/arquivo.txt'
alfabeto = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']

with open(pasta, 'r', encoding='UTF-8') as arquivo:
    caracteres = arquivo.read()

with open(pasta, 'r', encoding='UTF-8') as arquivo:
    linhas = arquivo.readlines()

with open(pasta, 'r', encoding='UTF-8') as arquivo:
    palavras = arquivo.read().split()

contador = Counter(filter(lambda x: x in alfabeto, caracteres.lower()))

print(f'Total de caracteres: {len(caracteres)}.')
print(f'Total de linhas: {len(linhas)}.')
print(f'Total de palavras: {len(palavras)}.')
print(f'Ocorrência de cada letra: {contador}.')
 def synchronizeRefs( self, ob_id=None, clients=False, unify_ids=False):
   _globals.writeBlock(self,'[synchronizeRefs]')
   
   # Extend object-tree.
   def extendObjectTree(home, home_path):
     message = ''
     if home not in homes:
       homes.append( home)
       home_ob = self
       for home_id in home_path:
         if home_ob is not None:
           home_ob = getattr( home_ob, home_id, None)
       if home_ob is not None:
         t1 = time.time()
         map( lambda x: operator.setitem(obs, x.base_url(), x), _globals.objectTree( home_ob))
         message += '[INFO] Load object-tree for '+home+' (in '+str(int((time.time()-t1)*100.0)/100.0)+' secs.)<br/>'
       else:
         message += '[ERROR] Can\'t load object-tree for '+home+': not found!<br/>'
       _globals.writeBlock(self,'[synchronizeRefs]: '+message)
     return message
   
   # Handle internal references.
   def handleInternalRefs(k,v):
     message = ''
     sp = '{$'
     l = v.split(sp)
     if len(l) > 1:
       m = [l[0]]
       for i in l[1:]:
         ref = i[:i.find('}')]
         if ref.startswith('__') and ref.endswith('__'):
           ref = ref[2:-2]
         if len( ref.split('@')) == 1:
           home_path = [ob.getHome().id]
           home = home_path[-1]
         else:
           home_path = ref.split('@')[0].split('/')
           home = home_path[-1]
         id = ref.split('@')[-1].split('/')[-1]
         if len( id) == 0:
           id = 'content'
         
         # Extend object-tree.
         message += extendObjectTree(home, home_path)
         
         f = filter( lambda x: x.find('/%s/content'%home) >= 0 and x.endswith('/%s'%id), obs.keys())
         if len( f) == 0:
           ref = '__%s__'%ref
         else:
           if len( f) > 1:
             if ref.find('@') > 0:
               ref = ref[ ref.find('@')+1:]
             g = filter( lambda x: x.find('/%s/content'%home) >= 0 and x.endswith('/%s'%ref), obs.keys())
             if len( g) == 1:
               f = g
             else:
               message += '[WARNING] %s: Ambigous reference ref=%s in f=%s'%(ob.absolute_url(),ref,str(f))
           else:
             target = obs[f[0]]
             ref = ob.getRefObjPath( target)[2:-1]
             if ob.version_live_id == obj_vers.id:
               target_ref = target.getRefObjPath( ob)
               target_ref_by = getattr( target, 'ref_by', [])
               if target_ref not in target_ref_by:
                 setattr( target, 'ref_by', target_ref_by + [ target_ref])
         if ref.startswith('__') and ref.endswith('__'):
           message += '<a href="%s/manage_main" target="_blank">%s(%s)%s=%s</a><br/>'%(ob.absolute_url(),ob.absolute_url(),ob.meta_type,k,ref)
         m.append(ref+i[i.find('}'):])
       v = sp.join(m)
     return v, message
   
   # Handle relative references.
   def handleRelativeRefs(k,v):
     message = ''
     for sp in ['href="./','src="./']:
       l = v.split(sp)
       if len(l) > 1:
         m = [l[0]]
         for i in l[1:]:
           if i.find('"') > 0:
             ref = i[:i.find('"')]
             if ref.endswith('/'):
               ref = ref[:-1]
             decl_id = ref.split('/')[-1]
             if getattr(ob.getHome(),decl_id,None) is None: # must not exist as Zope resource
               filtered_did = filter(lambda x: x['decl_id']==decl_id,did)
               if len(filtered_did) == 1: # simplest case: decl_id is unique!
                 found = filtered_did[0]
                 req = REQUEST={'lang':found['lang']}
                 target_url = found['abs_url']
                 target_ref = obs[target_url].getDeclUrl(REQUEST=req)
                 ob_ref = ob.getSelf(ob.PAGES).getDeclUrl(REQUEST=req)
                 ref = self.getRelativeUrl(ob_ref,target_ref)
                 i = ref + i[i.find('"'):]
           m.append(i)
         v = sp.join(m)
     return v, message
   
   # Initialize.
   message = ''
   t0 = time.time()
   obs = {}
   clients = clients or (not self.getPortalMaster() and not self.getPortalClients())
   
   # Initialize object-tree.
   map( lambda x: operator.setitem(obs, x.base_url(), x), _globals.objectTree( self, clients))
   homes = obs.keys()
   homes = map( lambda x: x[:x.find('/content')], homes)
   homes = map( lambda x: x[x.rfind('/')+1:], homes)
   homes = dict.fromkeys(homes).keys()
   message += 'Load object-tree ['+str(len(obs.keys()))+ '] for '+str(homes)+' (in '+str(int((time.time()-t0)*100.0)/100.0)+' secs.)<br/>'
   _globals.writeBlock(self,'[synchronizeRefs]: '+message)
   
   abs_urls = obs.keys()
   abs_urls.sort()
   
   did = []
   if self.getConfProperty('ZMS.pathhandler',0) != 0:
     for x in obs.keys():
       ob = obs[x]
       for lang in self.getLangIds():
         did.append({'decl_id':ob.getDeclId(REQUEST={'lang':lang}),'lang':lang,'abs_url':x})
   
   # Unify object-ids.
   if unify_ids:
     did = {}
     map( lambda x: operator.setitem( did, x.id, did.get(x.id,0)+1), obs.values())
     for id in filter( lambda x: did.get(x) > 1 and x[-1] in ['0','1','2','3','4','5','6','7','8','9'], did.keys()):
       prefix = None
       keys = map( lambda x: (x.find('/content'),x), filter( lambda x: x.endswith('/'+id), obs.keys()))
       keys.sort()
       keys = map( lambda x: x[1], keys)
       for key in keys:
         ob = obs[key]
         if prefix is None:
           prefix = _globals.id_prefix( id)
           message += '[INFO] %s: Keep unique object-id \'%s\'<br/>'%(key,id)
         else:
           new_id = self.getNewId(prefix)
           try:
             ob.getParentNode().manage_renameObject( id=id, new_id=new_id)
             obs[ ob.base_url()] = ob
             message += '[INFO] %s: Rename to unique object-id \'%s\'<br/>'%(key,new_id)
           except:
             message += _globals.writeError( ob, '%s: Can\'t rename to unique object-id \'%s\'<br/>'%(key,new_id))
   
   # Clear 'ref_by' (reference-by) attributes.
   for x in filter( lambda x: hasattr( obs[x], 'ref_by'), abs_urls):
     if clients or True:
       try:
         setattr( obs[x], 'ref_by', [])
       except: pass
     else:
       try:
         ref_by = getattr( obs[x], 'ref_by')
         ref_by = filter( lambda x: x.find('@')<0, ref_by)
         setattr( obs[x], 'ref_by', ref_by)
       except: pass
   
   langs = self.getLangIds()
   for abs_url in abs_urls:
     ob = obs[ abs_url]
     
     # Process recordset.
     if ob.meta_id!='ZMSLinkElement' and ob.getType()=='ZMSRecordSet':
       key = ob.getMetaobjAttrIds(ob.meta_id)[0]
       obj_attr = ob.getObjAttr(key)
       for lang in langs:
         for obj_vers in ob.getObjVersions():
           v = _objattrs.getobjattr(ob,obj_vers,obj_attr,lang)
           c = 0
           for r in v:
             for k in r.keys():
               v = r[k]
               o = v
               if type(v) is str:
                 v, m = handleInternalRefs('%s.%s[%i]'%(key,k,c),v)
                 message += m
                 v, m = handleRelativeRefs('%s.%s[%i]'%(key,k,c),v)
                 message += m
                 if v != o:
                   r[k] = v
             c += 1
     
     # Process object.
     else:
       for key in ob.getObjAttrs().keys():
         obj_attr = ob.getObjAttr(key)
         datatype = obj_attr['datatype_key']
         if datatype in _globals.DT_STRINGS:
           for lang in langs:
             for obj_vers in ob.getObjVersions():
               v = _objattrs.getobjattr(ob,obj_vers,obj_attr,lang)
               o = v
               if type(v) is str:
                 v, m = handleInternalRefs(key,v)
                 message += m
                 v, m = handleRelativeRefs(key,v)
                 message += m
                 if v != o:
                   _objattrs.setobjattr(ob,obj_vers,obj_attr,v,lang)
   
   message += ' (in '+str(int((time.time()-t0)*100.0)/100.0)+' secs.)'
   _globals.writeBlock(self,'[synchronizeRefs]: '+message)
   
   # Return with desired object.
   if ob_id is not None:
     if type( ob_id) is str:
       home = ob_id.split('@')[0]
       id = ob_id.split('@')[1]
       f = filter( lambda x: x.find('/%s/content'%home) > 0 and x.endswith('/%s'%id), abs_urls)
       if len( f) > 0:
         return obs[f[0]]
     return None
   
   # Return with message.
   else:
     return message
Exemple #51
0
def get_sentences():
    with open('../../../data/nlp.txt','r') as f:
        data = list(filter(lambda x:len(x)>0, split_pattern.sub(r'\1\n\2',f.read()).split('\n')))
        return data
Exemple #52
0
    return fmt


# diretório dos templates.
_dir     = 'res/tex'
_tpl_dir = os.path.join(os.path.dirname(__file__), _dir)

# shared templates precisam ser injetados no template alvo durante __init__().

# deretório dos templates compartilhados.
_shd     = os.path.join(_tpl_dir, 'shared')
# lista de arquivos da pasta res/tex/shared
_shd_tpl = [os.path.join(_shd, f) for f in os.listdir(_shd)]
# lista de arquivos .tex da pasta res/tex/shared
_shd_tpl = filter(lambda f: f.endswith('.tex'), _shd_tpl)
# dicionário mapeando template a conteúdo.
_shd_tpl = {'\input{%s}' % f[f.find(_dir):] : open(f).read() for f in _shd_tpl}


class Template:
    def __init__(self, name):
        self.title   = name
        # carrega o template alvo.
        self.content = open(os.path.join(_tpl_dir, name + '.tex')).read()
        # injeta shared templates no template alvo.
        self.content = reduce(lambda x, y : x.replace(y, _shd_tpl[y]),
                              _shd_tpl, self.content)

    def render(self, ctx):
        ctx['@HOJE'] = date_format('dd de MM de aaaa', date.today())
Exemple #53
0
def find_launch_configs(client, module):
    name_regex = module.params.get('name_regex')
    sort_order = module.params.get('sort_order')
    limit = module.params.get('limit')

    paginator = client.get_paginator('describe_launch_configurations')

    response_iterator = paginator.paginate(PaginationConfig={
        'MaxItems': 1000,
        'PageSize': 100
    })

    results = []

    for response in response_iterator:
        response['LaunchConfigurations'] = filter(
            lambda lc: re.compile(name_regex).match(lc[
                'LaunchConfigurationName']), response['LaunchConfigurations'])

        for lc in response['LaunchConfigurations']:
            data = {
                'name':
                lc['LaunchConfigurationName'],
                'arn':
                lc['LaunchConfigurationARN'],
                'created_time':
                lc['CreatedTime'],
                'user_data':
                lc['UserData'],
                'instance_type':
                lc['InstanceType'],
                'image_id':
                lc['ImageId'],
                'ebs_optimized':
                lc['EbsOptimized'],
                'instance_monitoring':
                lc['InstanceMonitoring'],
                'classic_link_vpc_security_groups':
                lc['ClassicLinkVPCSecurityGroups'],
                'block_device_mappings':
                lc['BlockDeviceMappings'],
                'keyname':
                lc['KeyName'],
                'security_groups':
                lc['SecurityGroups'],
                'kernel_id':
                lc['KernelId'],
                'ram_disk_id':
                lc['RamdiskId'],
                'associate_public_address':
                lc.get('AssociatePublicIpAddress', False),
            }

            results.append(data)

    results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending'))

    if limit:
        results = results[:int(limit)]

    module.exit_json(changed=False, results=results)
Exemple #54
0
        with args.input:
            for line in args.input:
                if line.find("Building... ") >= 0:
                    # Find example name (everything after last /)
                    example = line[line.find(" ") + 1:-1]
                elif line.startswith("Sketch uses"):
                    # Find number of bytes of flash
                    matcher = bytes_extractor.search(line)
                    program = matcher.group(1)
                elif line.startswith("Global variables use"):
                    # Find number of bytes of SRAM
                    matcher = bytes_extractor.search(line)
                    data = matcher.group(1)
                    # Write new line to output
                    args.output.write("%s\t%s\t%s\n" %
                                      (example, program, data))


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='XXXXXXXX')
    parser.add_argument('input',
                        nargs='?',
                        type=argparse.FileType('r'),
                        default=sys.stdin)
    parser.add_argument('output',
                        nargs='?',
                        type=argparse.FileType('w'),
                        default=sys.stdout)
    args = parser.parse_args()
    filter(args)
    if response.status_code != 200:
        print("Failed to get list from server")
        continue
    lines = []
    for line in response.iter_lines():
        line = line.strip()
        if line.startswith(b"#") == False:
            lines.append(str(line, 'utf-8'))

    #print("%d remaining after filtering out comments" %(len(lines)))
    for line in lines:
        words = line.split()
        if (len(words) >= 2):
            content.append(words[1])

content = list(filter(remove_invalid, content))
c1 = len(content)
print("Got %d hostnames" % (c1))
content.sort()
content = list(dict.fromkeys(content))
c2 = len(content)
print("Removed %d duplicates" % (c1 - c2))
with open("whitelist.txt") as f:
    whitelist = f.read().splitlines()
content = list(filter(apply_whitelist, content))
c3 = len(content)
print("Removed %d items whitelisted" % (c2 - c3))

with open(outfile, "w") as o:
    for hostname in content:
        try:
Exemple #56
0
    host = socket.gethostname()
    port1 = 8000
    port2 = 8888
    s.connect((host, port1))
    tm = s.recv(1024)
    print "Connection Time:  ", tm
    request = "null"

    break_flag = 0
    m_flag = 0
    content = ""
    u = udp()

    while request != "exit":
        request = raw_input('$> ')
        request_command = filter(None, request.split(' ', 10))
        s.sendall(request)
        content = ""
        list1 = []
        break_flag = 0
        m_flag = 0
        m = re.search("(udp.*)", request)
        if not m:
            while 1:
                data = s.recv(1024)

                if request_command[0] == "FileDownload":
                    m = re.search("(<NULL>.*)", data)
                    if m:
                        list1 = filter(None, data.split('<NULL>', 10))
                        initial_checksum = list1[0]
Exemple #57
0
 def test_long_names(self):
     self.assertEqual(type(filter(lambda x: x>2,[2,5])),type(long_names))
    def _ignore(self, event):
        """Returns True if this event should be ignored."""

        debug.println(debug.LEVEL_INFO, '')
        msg = 'EVENT MANAGER: %s for %s in %s (%s, %s, %s)' % \
              (event.type, event.source, event.host_application,
               event.detail1,event.detail2, event.any_data)
        debug.println(debug.LEVEL_INFO, msg, True)

        if not self._active:
            msg = 'EVENT MANAGER: Ignoring because event manager is not active'
            debug.println(debug.LEVEL_INFO, msg, True)
            return True

        if list(filter(event.type.startswith, self._ignoredEvents)):
            msg = 'EVENT MANAGER: Ignoring because event type is ignored'
            debug.println(debug.LEVEL_INFO, msg, True)
            return True

        if event.type.startswith('window'):
            msg = 'EVENT MANAGER: Not ignoring because event type is never ignored'
            debug.println(debug.LEVEL_INFO, msg, True)
            return False

        script = orca_state.activeScript
        if event.type.startswith('object:children-changed:add'):
            if not script:
                msg = 'EVENT MANAGER: Ignoring because there is no active script'
                debug.println(debug.LEVEL_INFO, msg, True)
                return True
            if script.app != event.host_application:
                msg = 'EVENT MANAGER: Ignoring because event is not from active app'
                debug.println(debug.LEVEL_INFO, msg, True)
                return True

        # This should ultimately be changed as there are valid reasons
        # to handle these events at the application level.
        if event.type.startswith('object:children-changed:remove') \
           and event.source != self._desktop:
            msg = 'EVENT MANAGER: Ignoring because event type is ignored'
            debug.println(debug.LEVEL_INFO, msg, True)
            return True

        if event.type.startswith('object:text-changed') and event.type.endswith('system'):
            # We should also get children-changed events telling us the same thing.
            # Getting a bunch of both can result in a flood that grinds us to a halt.
            if event.any_data == self.EMBEDDED_OBJECT_CHARACTER:
                msg = 'EVENT MANAGER: Ignoring because changed text is embedded object'
                debug.println(debug.LEVEL_INFO, msg, True)
                return True

        try:
            # TODO - JD: For now we won't ask for the name. Simply asking for the name should
            # not break anything, and should be a reliable way to quickly identify defunct
            # objects. But apparently the mere act of asking for the name causes Orca to stop
            # presenting Eclipse (and possibly other) applications. This might be an AT-SPI2
            # issue, but until we know for certain....
            #name = event.source.name
            state = event.source.getState()
            role = event.source.getRole()
        except:
            msg = 'ERROR: Event is from potentially-defunct source'
            debug.println(debug.LEVEL_INFO, msg, True)
            return True
        if state.contains(pyatspi.STATE_DEFUNCT):
            msg = 'ERROR: Event is from defunct source'
            debug.println(debug.LEVEL_INFO, msg, True)
            return True

        if event.type.startswith('object:property-change:accessible-name'):
            if role in [pyatspi.ROLE_CANVAS,
                        pyatspi.ROLE_ICON,
                        pyatspi.ROLE_TABLE_ROW,  # Thunderbird spam
                        pyatspi.ROLE_TABLE_CELL, # Thunderbird spam
                        pyatspi.ROLE_MENU_ITEM]:
                msg = 'EVENT MANAGER: Ignoring event type due to role'
                debug.println(debug.LEVEL_INFO, msg, True)
                return True
        elif event.type.startswith('object:property-change:accessible-value'):
            if role == pyatspi.ROLE_SPLIT_PANE and not state.contains(pyatspi.STATE_FOCUSED):
                msg = 'EVENT MANAGER: Ignoring event type due to role and state'
                debug.println(debug.LEVEL_INFO, msg, True)
                return True
        elif event.type.startswith('object:state-changed:sensitive'):
            if role in [pyatspi.ROLE_MENU_ITEM,
                        pyatspi.ROLE_FILLER,
                        pyatspi.ROLE_CHECK_MENU_ITEM,
                        pyatspi.ROLE_RADIO_MENU_ITEM]:
                msg = 'EVENT MANAGER: Ignoring event type due to role'
                debug.println(debug.LEVEL_INFO, msg, True)
                return True
        elif event.type.startswith('object:state-changed:showing'):
            if role not in [pyatspi.ROLE_ALERT,
                            pyatspi.ROLE_ANIMATION,
                            pyatspi.ROLE_INFO_BAR,
                            pyatspi.ROLE_MENU,
                            pyatspi.ROLE_NOTIFICATION,
                            pyatspi.ROLE_PANEL,
                            pyatspi.ROLE_STATUS_BAR,
                            pyatspi.ROLE_TOOL_TIP]:
                msg = 'EVENT MANAGER: Ignoring event type due to role'
                debug.println(debug.LEVEL_INFO, msg, True)
                return True
        elif event.type.startswith('object:selection-changed'):
            if event.source in self._parentsOfDefunctDescendants:
                msg = 'EVENT MANAGER: Ignoring event from parent of defunct descendants'
                debug.println(debug.LEVEL_INFO, msg, True)
                return True

            try:
                _name = event.source.name
            except:
                msg = 'EVENT MANAGER: Ignoring event from dead source'
                debug.println(debug.LEVEL_INFO, msg, True)
                return True

        if event.type.startswith('object:children-changed:add') \
           or event.type.startswith('object:active-descendant-changed'):
            if role in [pyatspi.ROLE_MENU,
                        pyatspi.ROLE_LAYERED_PANE,
                        pyatspi.ROLE_MENU_ITEM]:
                msg = 'EVENT MANAGER: Ignoring event type due to role'
                debug.println(debug.LEVEL_INFO, msg, True)
                return True
            if not event.any_data:
                msg = 'ERROR: Event any_data lacks child/descendant'
                debug.println(debug.LEVEL_INFO, msg, True)
                return True
            try:
                childState = event.any_data.getState()
                childRole = event.any_data.getRole()
            except:
                msg = 'ERROR: Event any_data contains potentially-defunct child/descendant'
                debug.println(debug.LEVEL_INFO, msg, True)
                return True

            if childState.contains(pyatspi.STATE_DEFUNCT):
                if state.contains(pyatspi.STATE_MANAGES_DESCENDANTS) \
                   and event.source not in self._parentsOfDefunctDescendants:
                    self._parentsOfDefunctDescendants.append(event.source)

                msg = 'ERROR: Event any_data contains defunct child/descendant'
                debug.println(debug.LEVEL_INFO, msg, True)
                return True

            if event.source in self._parentsOfDefunctDescendants:
                self._parentsOfDefunctDescendants.remove(event.source)

            # This should be safe. We do not have a reason to present a newly-added,
            # but not focused image. We do not need to update live regions for images.
            # This is very likely a completely and utterly useless event for us. The
            # reason for ignoring it here rather than quickly processing it is the
            # potential for event floods like we're seeing from matrix.org.
            if childRole == pyatspi.ROLE_IMAGE:
                msg = 'EVENT MANAGER: Ignoring event type due to role'
                debug.println(debug.LEVEL_INFO, msg, True)
                return True

        msg = 'EVENT MANAGER: Not ignoring due to lack of cause'
        debug.println(debug.LEVEL_INFO, msg, True)
        return False
Exemple #59
0
## But be careful that if you use answers from previous problems, you use the LISTs you generated, so that all your tests can still pass and you avoid confusion!
names_and_productivities= []
for every in range(len(programmers)):
    names_and_productivities.append((programmers[every].name, prod_list[every]))


## [PROBLEM 8]
print("\n\n***** Problem 8 *****")
# Use the Python filter function to select the subset of programmers who have names with 5 or more characters. (i.e. ["Albert","Dinesh","Euijin"]) 
# Your result should be an filter object that points to Student instances. Save that filter iterator in a variable called long_names.



## Then write code to cast the value of long_names to a list and save it in the variable long_names_list. 
long_names= filter(lambda x: len(x.name)>=5, programmers)
long_names_list= list(long_names)



## [PROBLEM 9]
print("\n\n***** Problem 9 *****")

# Use a list comprehension to generate a LIST of just the names of those Student instances whose name is longer than their seniority (i.e., ["Albert", "Mai", "Dinesh", "Euijin"]). 
# Assign it to a variable called names_with_not_too_much_seniority.

## Note that you can use another list you have already created for this problem.
names_with_not_too_much_seniority= [student.name for student in programmers if len(student.name)>= (student.years_UM)]


Exemple #60
0
def run_kernel(kernel_file_path, kernel_name,
               gsize, lsize,
               platform_id, device_id,
               samples,
               instcounts, timeit,
               verbose):
    '''
    The hostcode wrapper function
    Essentially, it is nothing more than an OpenCL template hostcode,
    but it is the heart of oclude
    '''

    interact = Interactor(__file__.split(os.sep)[-1])
    interact.set_verbosity(verbose)

    ### step 1: get OpenCL platform, device and context, ###
    ### build the kernel program and create a queue      ###
    platform = cl.get_platforms()[platform_id]
    device = platform.get_devices()[device_id]

    # check if the extension needed
    # for the ulong hidden counters exists in selected device
    if instcounts and 'cl_khr_int64_base_atomics' not in device.get_info(cl.device_info.EXTENSIONS):
        interact('WARNING: Selected device does not support the `cl_khr_int64_base_atomics` OpenCL extension!')
        interact('         This means that instructions will not get correctly reported if they are too many!')

    interact('Using the following device:')
    interact('Platform:\t' + platform.name)
    interact('Device:\t' + device.name)
    interact('Version:\t' + device.version.strip())

    context = cl.Context([device])
    with open(kernel_file_path, 'r') as kernel_file:
        kernel_source = '#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable\n' + kernel_file.read()
    program = cl.Program(context, kernel_source).build()

    if timeit:
        queue = cl.CommandQueue(context, properties=cl.command_queue_properties.PROFILING_ENABLE)
    else:
        queue = cl.CommandQueue(context)

    ### step 2: get kernel arg info ###
    interact(f'Kernel name: {kernel_name}')

    [kernel] = filter(lambda k : k.function_name == kernel_name, program.all_kernels())
    nargs = kernel.get_info(cl.kernel_info.NUM_ARGS)

    args = []

    for idx in range(nargs):
        kernel_arg_name = kernel.get_arg_info(idx, cl.kernel_arg_info.NAME)
        is_oclude_hidden_buffer = kernel_arg_name in [hidden_counter_name_local, hidden_counter_name_global]
        if not is_oclude_hidden_buffer:
            interact(f'Kernel arg {idx + 1}: ', nl=False)
        kernel_arg_type_name = kernel.get_arg_info(idx, cl.kernel_arg_info.TYPE_NAME)
        kernel_arg_address_qualifier = cl.kernel_arg_address_qualifier.to_string(
            kernel.get_arg_info(idx, cl.kernel_arg_info.ADDRESS_QUALIFIER)
        ).lower()
        if not is_oclude_hidden_buffer:
            interact(f'{kernel_arg_name} ({kernel_arg_type_name}, {kernel_arg_address_qualifier})', prompt=False)
        args.append((kernel_arg_name, kernel_arg_type_name, kernel_arg_address_qualifier))

    ### step 3: collect arg types ###
    arg_types = {}
    parser = None
    ast = None
    typedefs = {}
    structs = {}

    for kernel_arg_name, kernel_arg_type_name, _ in args:

        argtype_base = kernel_arg_type_name.split('*')[0]

        try:
            # it is a normal OpenCL type
            arg_types[kernel_arg_name] = eval('cltypes.' + argtype_base)

        except AttributeError:
            # it is a struct (lazy evaluation of structs)
            if parser is None:
                parser = OpenCLCParser()
                cmdout, _ = interact.run_command(None, preprocessor, kernel_file_path)
                kernel_source = '\n'.join(filter(lambda line : line.strip() and not line.startswith('#'), cmdout.splitlines()))
                ast = parser.parse(kernel_source)

                for ext in ast.ext:

                    ### typedefs ###
                    if isinstance(ext, Typedef):
                        if isinstance(ext.type.type, Struct):
                            # typedefed struct (new)
                            if ext.type.type.decls is not None:
                                typedefs[ext.name] = create_struct_type(device, ext.name, ext.type.type)
                            # typedefed struct (already seen it)
                            else:
                                previous_name = 'struct ' + ext.type.type.name
                                new_name = ext.name
                                typedefs[new_name] = structs[previous_name]
                        # simple typedef (not a struct)
                        else:
                            previous_name = ' '.join(ext.type.type.names)
                            new_name = ext.name
                            typedefs[new_name] = ext.type

                    ### struct declarations ###
                    elif isinstance(ext, Decl) and isinstance(ext.type, Struct):
                        name = 'struct ' + ext.type.name
                        structs[name] = create_struct_type(device, ext.type.name, ext.type)

            try:
                arg_types[kernel_arg_name] = structs[argtype_base]
            except KeyError:
                arg_types[kernel_arg_name] = typedefs[argtype_base]

    ### run the kernel as many times are requested by the user ###
    interact(f'About to execute kernel with Global NDRange = {gsize}' + (f' and Local NDRange = {lsize}' if lsize else ''))
    interact(f'Number of executions (a.k.a. samples) to perform: {max(samples, 1)}')

    n_executions = trange(samples, unit=' kernel executions') if samples > 1 else range(1)
    results = []

    for _ in n_executions:

        ### step 4: create argument buffers ###
        (
            arg_bufs,
            which_are_scalar,
            hidden_global_hostbuf,
            hidden_global_buf
        ) = init_kernel_arguments(context, args, arg_types, gsize)

        ### step 5: set kernel arguments and run it!
        kernel.set_scalar_arg_dtypes(which_are_scalar)

        if timeit:
            time_start = time()
            time_finish = None

        if lsize:
            event = kernel(queue, (gsize,), (lsize,), *arg_bufs)
        else:
            event = kernel(queue, (gsize,), None, *arg_bufs)

        if timeit:
            event.wait()
            time_finish = time()

        queue.flush()
        queue.finish()

        ### step 6: read back the results and report them if requested
        this_run_results = {}

        if instcounts:
            if not samples > 1:
                interact('Collecting instruction counts...')
            global_counter = np.empty_like(hidden_global_hostbuf)
            cl.enqueue_copy(queue, global_counter, hidden_global_buf)
            this_run_results['instcounts'] = dict(zip(llvm_instructions, global_counter.tolist()))

        if timeit:
            if not samples > 1:
                interact('Collecting time profiling info...')
            hostcode_time_elapsed = (time_finish - time_start) * 1000
            device_time_elapsed = (event.profile.end - event.profile.start) * 1e-6
            this_run_results['timeit'] = {
                'hostcode': hostcode_time_elapsed,
                'device':   device_time_elapsed,
                'transfer': hostcode_time_elapsed - device_time_elapsed
            }

        if this_run_results:
            results.append(this_run_results)

    interact('Kernel run' + ('s' if samples > 1 else '') + ' completed successfully')

    return results if results else None