Esempio n. 1
1
def parse_atrack_msg(text):
    field_names = (
        "@P,CRC,L,Seq.ID,UNID,GPS time, RTC time, Position time, Lon, "
        "Lat, Heading, Report ID, Odo, HDOP, DI, Speed, DO, AI, DVID, "
        "1st Temp, 2nd Temp, Text"
    ).split(",")
    field_vals = text.split(",")
    logger.debug("field_vals: {}".format(field_vals))
    out = OrderedDict([(k, v) for k, v in zip(field_names, field_vals)])
    out_processed = OrderedDict(
        [
            ("device_id", out.get("UNID", -1)),
            ("driver_id", out.get("DVID", -1)),
            ("report_id", out.get("Report ID", -1)),
            ("timestamp", out.get("Position time", int(time.time()))),
            ("hdop", out.get("HDOP", 990)),
            ("latitude", float(out.get("Lon", 1300000)) / 1e6),
            ("longitude", float(out.get("Lat", 103800000)) / 1e6),
            ("heading", out.get("Heading", -1)),
            ("speed", out.get("Speed", -1)),
            ("odometer", out.get("Odo", -1)),
            ("temperature_1", out.get("1st Temp", -1)),
            ("temperature_2", out.get("2nd Temp", -1)),
        ]
    )
    logger.debug("Parse result:\n{}".format(out_processed))
    return out_processed
Esempio n. 2
0
    def get_constraint_updates(self):
        
        updates = OrderedDict()

        ## unit-variance constraint on hidden-unit activations ##
        if self.flags['unit_std']:
            updates[self.Wv] = self.Wv / self.avg_hact_std

        ## clip parameters to maximum values (if applicable)
        for (k,v) in self.clip_max.iteritems():
            assert k in [param.name for param in self.params()]
            param = getattr(self, k)
            updates[param] = T.clip(param, param, v)

        ## clip parameters to minimum values (if applicable)
        for (k,v) in self.clip_min.iteritems():
            assert k in [param.name for param in self.params()]
            param = getattr(self, k)
            updates[param] = T.clip(updates.get(param, param), v, param)
        
        ## constrain lambd to be a scalar
        if self.flags['scalar_lambd']:
            lambd = updates.get(self.lambd, self.lambd)
            updates[self.lambd] = T.mean(lambd) * T.ones_like(lambd)

        return updates
Esempio n. 3
0
 def _get_cpu_topology(self):
     cpu_topology_file = '/proc/cpuinfo'
     # physical processor -> physical core -> logical processing units (threads)
     cpu_topology = OrderedDict()
     if not os.path.exists(cpu_topology_file):
         raise DpdkSetup('File with CPU topology (%s) does not exist.' % cpu_topology_file)
     with open(cpu_topology_file) as f:
         for lcore in f.read().split('\n\n'):
             if not lcore:
                 continue
             lcore_dict = OrderedDict()
             for line in lcore.split('\n'):
                 key, val = line.split(':', 1)
                 lcore_dict[key.strip()] = val.strip()
             if 'processor' not in lcore_dict:
                 continue
             numa = int(lcore_dict.get('physical id', -1))
             if numa not in cpu_topology:
                 cpu_topology[numa] = OrderedDict()
             core = int(lcore_dict.get('core id', lcore_dict['processor']))
             if core not in cpu_topology[numa]:
                 cpu_topology[numa][core] = []
             cpu_topology[numa][core].append(int(lcore_dict['processor']))
     if not cpu_topology:
         raise DpdkSetup('Cound not determine CPU topology from %s' % cpu_topology_file)
     return cpu_topology
Esempio n. 4
0
class VoidNode(object):
    def __init__(self, name):
        super(VoidNode, self).__init__()
        self.inputPorts = OrderedDict()
        self.outputPorts = OrderedDict()
        self.initPorts()
        self.name = name

    def initPorts(self):
        pass

    def getInputPort(self, name):
        return self.inputPorts.get(name)

    def getOutputPort(self, name):
        return self.outputPorts.get(name)

    def addInputPort(self, name):
        port = InputPort(name)
        port.owner = self
        self.inputPorts[name] = port

    def addOutputPort(self, name):
        port = OutputPort(name)
        port.owner = self
        self.outputPorts[name] = port

    def evaluate(self):
        logger.debug("Evaluating {}".format(self))
Esempio n. 5
0
def writeTotals(journals,folders):
    
    #Declare an ordered dict that will hold the field, then the journal totals in that field 
    total_list = OrderedDict()
    
    for field in journals:
       # print 'Field: ',field[1]
        totals = []
        for i in range(len(field[0])):
            #print 'Journal: ',field[0][i][0][1],' Length',len(field[0][i])
            totals.append([field[0][i][0][1],len(field[0][i])])
        total_list[field[1]] = totals
        
    for key in total_list:   
        writeto = folders[0]+key.split('_')[0]+'/'
        writeto = folders[0]
        make_sure_path_exists(writeto)
        print 'Key: ',key
        #with open(writeto+key.split('_')[1]+'.csv','w') as outfile:
        with open(writeto+key.split('.')[0]+'.csv','w') as outfile:
            for i in range(len(total_list.get(key))):
                outfile.write(str('"'+total_list.get(key)[i][0])+'",'+str(total_list.get(key)[i][1])+'\n')
        
    for key in total_list:   
        writeto = folders[1]+key.split('_')[0]+'/'
        writeto = folders[1]
        make_sure_path_exists(writeto)
        #with open(writeto+key.split('_')[1]+'.csv','w') as outfile:
        with open(writeto+key.split('.')[0]+'.csv','w') as outfile:
            for i in range(len(total_list.get(key))):
                outfile.write(str(i+1)+','+str(total_list.get(key)[i][1])+'\n')
    def get_constraint_updates(self):
        constraint_updates = OrderedDict() 

        if self.flags['wv_norm'] == 'unit':
            constraint_updates[self.Wv] = self.Wv / self.norm_wv
        elif self.flags['wv_norm'] == 'max_unit':
            constraint_updates[self.Wv] = self.Wv / self.norm_wv * T.minimum(self.norm_wv, 1.0)

        if self.flags['scalar_lambd']:
            constraint_updates[self.lambd] = T.mean(self.lambd) * T.ones_like(self.lambd)

        ## Enforce sparsity pattern on g if required ##
        if self.sparse_gmask:
            constraint_updates[self.Wg] = self.Wg * self.sparse_gmask.mask.T

        ## clip parameters to maximum values (if applicable)
        for (k,v) in self.clip_max.iteritems():
            assert k in [param.name for param in self.params()]
            param = constraint_updates.get(k, getattr(self, k))
            constraint_updates[param] = T.clip(param, param, v)

        ## clip parameters to minimum values (if applicable)
        for (k,v) in self.clip_min.iteritems():
            assert k in [param.name for param in self.params()]
            param = constraint_updates.get(k, getattr(self, k))
            constraint_updates[param] = T.clip(constraint_updates.get(param, param), v, param)

        return constraint_updates
Esempio n. 7
0
    def get_constraint_updates(self):
        constraint_updates = OrderedDict() 
        if self.flags['scalar_lambd']:
            constraint_updates[self.lambd] = T.mean(self.lambd) * T.ones_like(self.lambd)

        # constraint filters to have unit norm
        if self.flags['wv_norm'] in ('unit', 'max_unit'):
            wv = constraint_updates.get(self.Wv, self.Wv)
            wv_norm = T.sqrt(T.sum(wv**2, axis=0))
            if self.flags['wv_norm'] == 'unit':
                constraint_updates[self.Wv] = wv / wv_norm
            elif self.flags['wv_norm'] == 'max_unit':
                constraint_updates[self.Wv] = wv / wv_norm * T.minimum(wv_norm, 1.0)

        constraint_updates[self.scalar_norms] = T.maximum(1.0, self.scalar_norms)
        ## clip parameters to maximum values (if applicable)
        for (k,v) in self.clip_max.iteritems():
            assert k in [param.name for param in self.params()]
            param = constraint_updates.get(k, getattr(self, k))
            constraint_updates[param] = T.clip(param, param, v)

        ## clip parameters to minimum values (if applicable)
        for (k,v) in self.clip_min.iteritems():
            assert k in [param.name for param in self.params()]
            param = constraint_updates.get(k, getattr(self, k))
            constraint_updates[param] = T.clip(constraint_updates.get(param, param), v, param)

        return constraint_updates
Esempio n. 8
0
    def cell(self, values):
        if not values:
            values = []
        values = list(values)
        self._cell_unvalidated = values

        filtered_areas = OrderedDict()
        filtered_cells = OrderedDict()
        for value in values:
            valid_area = CellAreaLookup.create(**value)
            if valid_area:
                areaid = valid_area.areaid
                existing = filtered_areas.get(areaid)
                if existing is not None and existing.better(valid_area):
                    pass
                else:
                    filtered_areas[areaid] = valid_area
            valid_cell = CellLookup.create(**value)
            if valid_cell:
                cellid = valid_cell.cellid
                existing = filtered_cells.get(cellid)
                if existing is not None and existing.better(valid_cell):
                    pass
                else:
                    filtered_cells[cellid] = valid_cell
        self._cell_area = list(filtered_areas.values())
        self._cell = list(filtered_cells.values())
Esempio n. 9
0
File: api.py Progetto: eads/elex
    def get_uniques(self, candidate_reporting_units):
        """
        Parses out unique candidates and ballot measures
        from a list of CandidateReportingUnit objects.
        """
        unique_candidates = OrderedDict()
        unique_ballot_measures = OrderedDict()

        for c in candidate_reporting_units:
            if c.is_ballot_measure:
                if not unique_ballot_measures.get(c.candidateid, None):
                    unique_ballot_measures[c.candidateid] = BallotMeasure(
                                                                last=c.last,
                                                                candidateid=c.candidateid,
                                                                polid=c.polid,
                                                                ballotorder=c.ballotorder,
                                                                polnum=c.polnum,
                                                                seatname=c.seatname,
                                                                description=c.description)
            else:
                if not unique_candidates.get(c.candidateid, None):
                    unique_candidates[c.candidateid] = Candidate(
                                                                first=c.first,
                                                                last=c.last,
                                                                candidateid=c.candidateid,
                                                                polid=c.polid,
                                                                ballotorder=c.ballotorder,
                                                                polnum=c.polnum,
                                                                party=c.party)

        candidates = [v for v in unique_candidates.values()]
        ballot_measures = [v for v in unique_ballot_measures.values()]
        return candidates, ballot_measures 
def formatted_dict(form):
        '''
        格式化request.form
        供数据库Highspeed插入数据
        '''
        d  = OrderedDict(form)
        dd = {}

        step = ['step_' + str(x) for x in range(17)[1:]]
        a = ['a' + str(x) for x in range(17)[1:]]
        b = ['b' + str(x) for x in range(17)[1:]]
        e = ['e' + str(x) for x in range(17)[1:]]
        c = ['c' + str(x) for x in range(4)[1:]]

        for x,y,w,z in zip(a,b,e,step):
            j = d.get(x,0)
            k = d.get(y,0)
            m = d.get(w,0)

            if j != '' and k != '' and m !='':
                j = float(j)
                k = float(k)
                m = float(m)
            else:
                j = 0
                k = 0
                m = 0

            l = round(k/60*j,2)
            dd[z] = (j,k,l,m)

        dd['info'] = tuple([d.get(n) for n in c])
        return dd
Esempio n. 11
0
class ArgsCollection(object):

    def __init__(self, *args, **kwargs):
        self._items = OrderedDict()
        for arg in args:
            k, _, v = arg.partition('=')
            k = k.lstrip('-')
            if not kwargs.get('flatten'):
                self._items.setdefault(k, []).append(v)
            else:
                self._items[k] = v

    def get(self, k, default=None):
        return self._items.get(k, default)

    def items(self):
        return self._items.items()

    def __getattr__(self, k, default=None):
        return self._items.get(k, default)

    def __contains__(self, k):
        return k in self._items

    def __getitem__(self, k):
        return self._items['k']
Esempio n. 12
0
    def get_context_data(self, **kwargs):
        context = super().get_context_data(**kwargs)

        cache_key = 'banners_news_%s' % get_language()
        side_banner = cache.get(cache_key, None)
        if side_banner is None:
            side_banner = list(Banner.objects.filter(status=1, location=Banner.BANNER_LOCATIONS.news, show_start__lte=timezone.now(), show_end__gte=timezone.now(), language__in=['', get_language()]).order_by('ordering').values('id', 'kind', 'banner', 'banner_url', 'competition', 'converted', 'show_end', 'show_start', 'url', 'height', 'width', 'ordering'))
            cache.set(cache_key, side_banner, 60*30)  # Cache for 30 minutes

        picked_banners = []
        if side_banner and len(side_banner) > 1:

            banners = OrderedDict()
            for banner in side_banner:
                p_ban = banners.get(banner.get('ordering'), [])
                p_ban.append(banner)
                banners.update({banner.get('ordering'): p_ban})

            for index in list(banners.keys())[:3]:
                b = banners.get(index)
                if len(b) == 1:
                    picked_banners.append(b[0])
                else:
                    shuffle(b)
                    picked_banners.append(b[0])
        else:
            picked_banners = side_banner

        if picked_banners:
            Banner.objects.filter(id__in=[obj.get('id') for obj in picked_banners]).update(view_count=F('view_count') + 1)

        context.update({
            'side_banner': picked_banners,
        })
        return context
Esempio n. 13
0
def strip_out_section(soup, htmldoc, h_level, strip_heading, index):
	if not index == 0 and not index == 1:
		raise Exception("index can only have the following values: 0, 1")

	headings = OrderedDict()
	# obtain current and next heading to strip off unnecessary stuff
	for heading in soup.findAll(h_level):
		headings.update({unicode(heading.text): unicode(heading)})

	cur_head = ""
	next_head = ""
	found_cur_head = False
	for key in headings.keys():
		if found_cur_head:
			next_head = headings.get(key)
			break
		if key == strip_heading:
			cur_head = headings.get(key)
			found_cur_head = True

	# strip off everything other than what we want
	htmldoc = unicode(soup)
	if not cur_head:
		return soup
	elif not next_head:
		htmldoc = htmldoc.split(cur_head)[index]
	else:
		if index == 0:
			htmldoc = htmldoc.split(cur_head)[0] + next_head + htmldoc.split(next_head)[1]
		elif index == 1:
			htmldoc = cur_head + htmldoc.split(cur_head)[1].split(next_head)[0]

	return BeautifulSoup(htmldoc)
Esempio n. 14
0
class Tweak(object):  # {{{

    def __init__(self, name, doc, var_names, defaults, custom):
        translate = _
        self.name = translate(name)
        self.doc = doc.strip()
        self.doc = ' ' + self.doc
        self.var_names = var_names
        if self.var_names:
            self.doc = u"%s: %s\n\n%s"%(_('ID'), self.var_names[0], format_doc(self.doc))
        self.default_values = OrderedDict()
        for x in var_names:
            self.default_values[x] = defaults[x]
        self.custom_values = OrderedDict()
        for x in var_names:
            if x in custom:
                self.custom_values[x] = custom[x]

    def __str__(self):
        ans = ['#: ' + self.name]
        for line in self.doc.splitlines():
            if line:
                ans.append('# ' + line)
        for key, val in iteritems(self.default_values):
            val = self.custom_values.get(key, val)
            ans.append(u'%s = %r'%(key, val))
        ans = '\n'.join(ans)
        if isinstance(ans, unicode_type):
            ans = ans.encode('utf-8')
        return ans

    @property
    def sort_key(self):
        return 0 if self.is_customized else 1

    @property
    def is_customized(self):
        for x, val in iteritems(self.default_values):
            cval = self.custom_values.get(x, val)
            if normalize_tweak(cval) != normalize_tweak(val):
                return True
        return False

    @property
    def edit_text(self):
        from pprint import pformat
        ans = ['# %s'%self.name]
        for x, val in iteritems(self.default_values):
            val = self.custom_values.get(x, val)
            if isinstance(val, (list, tuple, dict, set, frozenset)):
                ans.append(u'%s = %s' % (x, pformat(val)))
            else:
                ans.append(u'%s = %r'%(x, val))
        return '\n\n'.join(ans)

    def restore_to_default(self):
        self.custom_values.clear()

    def update(self, varmap):
        self.custom_values.update(varmap)
Esempio n. 15
0
class Schema:
    def __init__(self):
        self._info = OrderedDict()

    def add_field(self, fldname, fldtype, lentype, nulltype=NULLABLE, fldlength=0):
        self._info[fldname] = FieldInfo(fldname, fldtype, lentype, nulltype, fldlength)

    def add(self, fldname, sch: Schema):
        self._info[fldname] = FieldInfo(fldname, sch.fldtype(fldname), sch.lentype(fldname), sch.nulltype(fldname),
                                        sch.fldlength(fldname))

    def add_all(self, sch: Schema):
        self._info.update(sch)

    def fldtype(self, fldname):
        return self._info.get(fldname).fldtype

    def lentype(self, fldname):
        return self._info.get(fldname).lentype

    def nulltype(self, fldname):
        return self._info.get(fldname).nulltype

    def fldlength(self, fldname):
        return self._info.get(fldname).fldlength

    def has_field(self, fldname):
        return fldname in self._info.keys()

    def fields(self):
        return self._info.keys()
Esempio n. 16
0
def _update_rdc_restraints(restraint_list, alpha, timestep, force_dict):
    # split restraints into rdc and non-rdc
    rdc_restraint_list = [r for r in restraint_list if isinstance(r, RdcRestraint)]
    nonrdc_restraint_list = [r for r in restraint_list if not isinstance(r, RdcRestraint)]

    # if we have any rdc restraints
    if rdc_restraint_list:
        rdc_force = force_dict['rdc']
        # make a dictionary based on the experiment index
        expt_dict = OrderedDict()
        for r in rdc_restraint_list:
            expt_dict.get(r.expt_index, []).append(r)

        # loop over the experiments and update the restraints
        index = 0
        for experiment in expt_dict:
            rests = expt_dict[experiment]
            for r in rests:
                scale = r.scaler(alpha) * r.ramp(timestep)
                rdc_force.updateRdcRestraint(
                    index,
                    r.atom_index_1 - 1,
                    r.atom_index_2 - 1,
                    r.kappa, r.d_obs, r.tolerance,
                    r.force_const * scale, r.weight)
                index = index + 1

    return nonrdc_restraint_list
Esempio n. 17
0
def _prepareFeatureLangSys(langTag, langSys, table, features, scriptTag, scriptStatus, getStatus):
    # This is a part of prepareFeatures
    printScript, scriptRequired, = scriptStatus
    printLang, langRequired = getStatus((langTag, langSys), scriptRequired)
    for featureIdx in langSys.FeatureIndex:
        featureRecord = table.table.FeatureList.FeatureRecord[featureIdx]
        printFeature, featureRequired = getStatus(featureRecord, langRequired)

        featureTag = featureRecord.FeatureTag
        scripts, _ = features.get(featureTag, (None, None))
        if scripts is None:
            scripts = OrderedDict()
            features[featureTag] = (scripts, printFeature)

        languages, _ = scripts.get(scriptTag, (None, None))
        if languages is None:
            languages = OrderedDict()
            scripts[scriptTag] = (languages, printScript)

        lookups, _ = languages.get(langTag, (None, None))
        if lookups is None:
            lookups = []
            languages[langTag] = (lookups, printLang)

        for lookupIdx in featureRecord.Feature.LookupListIndex:
            lookup = table.table.LookupList.Lookup[lookupIdx]
            printLookup, _ = getStatus(lookup, featureRequired)
            lookups.append((lookupIdx, printLookup))
Esempio n. 18
0
def paper_and_author_growth(min_year=1992, max_year=2015):
  graph = cite_graph(GRAPH_CSV)
  year_authors_map = OrderedDict()
  year_papers_map = OrderedDict()
  for _, paper in graph.get_paper_nodes(permitted=THE.permitted).items():
    year = int(paper.year)
    if not (min_year < year <= max_year): continue
    authors = paper.authors.split(",")
    year_authors_map[year] = year_authors_map.get(year, set([])).union(authors)
    year_papers_map[year] = year_papers_map.get(year, 0) + 1
  x_axis = []
  papers = []
  authors = []
  seen = set(year_authors_map[sorted(year_authors_map.keys())[0]])
  f = open("figs/v3/%s/paper_author_count.csv" % THE.permitted, "wb")
  f.write("Year, # Papers, # Authors\n")
  for key in sorted(year_authors_map.keys())[1:]:
    x_axis.append(key)
    papers.append(year_papers_map[key])
    new_authors = set(year_authors_map[key]).difference(seen)
    authors.append(len(new_authors))
    seen = seen.union(set(year_authors_map[key]))
    f.write("%d, %d, %d\n" % (key, year_papers_map[key], len(new_authors)))
  plt.plot(x_axis, papers)
  plt.plot(x_axis, authors)
  legends = ['Papers', 'Authors']
  plt.legend(legends, loc='upper left')
  plt.title('Growth of Papers and Authors')
  plt.xlabel("Year")
  plt.ylabel(" Count")
  plt.savefig("figs/v3/%s/paper_author_count.png" % THE.permitted)
  plt.clf()
  f.close()
Esempio n. 19
0
def linear_seq_clusterer(stats, decisions, key="iqrs", delta=0.25):
  point_to_cluster = OrderedDict()
  for stat in stats:
    vals = stat[key]
    clusters = []
    cluster_prev = [vals[0]]
    clusters.append(cluster_prev)
    current_cluster = point_to_cluster.get(0, set())
    current_cluster.add(len(clusters))
    point_to_cluster[0] = current_cluster
    for index, val in enumerate(vals[1:]):
      prev_mean = np.mean(cluster_prev)
      if abs(prev_mean - val) > delta * prev_mean:
        cluster_prev = [val]
        clusters.append(cluster_prev)
      else:
        cluster_prev.append(val)
      current_cluster = point_to_cluster.get(index+1, set())
      current_cluster.add(len(clusters))
      point_to_cluster[index+1] = current_cluster
  columns = ["Cluster ID", "Decision Name"]
  table = PrettyTable(columns)
  prev_val = None
  for key, val in point_to_cluster.items():
    current_val = ",".join(map(str, list(val)))
    if current_val == prev_val:
      row = ["\"", decisions[key].name]
    else:
      row = [current_val, decisions[key].name]
      prev_val = current_val
    table.add_row(row)
  print("\n### Decisions Clustered")
  print("```")
  print(table)
  print("```")
Esempio n. 20
0
class MockQualtrics(object):
    """ Mock object for unit testing code that uses pyqualtrics library

    """
    def __init__(self, user=None, token=None, api_version="2.5"):
        self.user = user
        self.token = token
        self.api_version = api_version
        self.last_error_message = None
        self.last_url = None
        self.json_response = None
        self.response = None  # For debugging purpose
        self.mock_responses = OrderedDict()
        self.mock_responses_labels = OrderedDict()

    def getResponse(self, SurveyID, ResponseID, Labels=None, **kwargs):
        if Labels == "1":
            return self.mock_responses_labels.get(ResponseID, None)
        else:
            return self.mock_responses.get(ResponseID, None)

    def getLegacyResponseData(self, SurveyID, Labels=None, **kwargs):
        if Labels == "1":
            return self.mock_responses_labels
        else:
            return self.mock_responses
Esempio n. 21
0
def run():
    """Run the agent for a finite number of trials."""

    # Set up environment and agent
    gammas = [x / 10.0 for x in xrange(0, 10)]
    gamma_to_success_rate = OrderedDict()
    gamma_to_average_reward = OrderedDict()
    # Run a simulation for each sample gamma value to test which
    # choice of gamma results in the most successful agent
    for gamma in gammas:
        # Run 10 trials over each choice of gamma to get average performance metrics
        for trial in xrange(10):
            e = Environment()  # create environment (also adds some dummy traffic)
            a = e.create_agent(LearningAgent, (gamma))  # create agent
            e.set_primary_agent(a, enforce_deadline=True)  # set agent to track

            # Now simulate it
            sim = Simulator(e, update_delay=0.0)  # reduce update_delay to speed up simulation
            sim.run(n_trials=50)  # press Esc or close pygame window to quit

            gamma_to_success_rate[a.GAMMA] = gamma_to_success_rate.get(a.GAMMA, 0) + sim.env.successful_trials
            gamma_to_average_reward[a.GAMMA] = (
                gamma_to_average_reward.get(a.GAMMA, 0) + a.get_average_reward_per_action()
            )

        # Get the average of the 10 trials
    for gamma in gamma_to_average_reward.keys():
        gamma_to_average_reward[gamma] = gamma_to_average_reward[gamma] / 10
        gamma_to_success_rate[gamma] = gamma_to_success_rate[gamma] / 10
    print gamma_to_average_reward
    print gamma_to_success_rate
Esempio n. 22
0
class Config(object):

    def __init__(self, axes, positions):

        def sort_key(nx):
            name, axis = nx
            try:
                return (0, 'xyz'.index(name))
            except ValueError:
                return (1, name)
                
        self.axes = OrderedDict(sorted(((a.name, a) for a in axes),
                                       key=sort_key))
        self.positions = {pos.name: pos for pos in positions}

    @property
    def x(self):
        return self.axes.get('x')

    @property
    def y(self):
        return self.axes.get('y')

    @property
    def z(self):
        return self.axes.get('z')

    @property
    def home(self):
        return self.positions.get('home')

    @property
    def origin(self):
        return self.positions.get('origin')
Esempio n. 23
0
def parsepdf(filename):
    fp = open(filename, 'rb')
    parser = PDFParser(fp)
    # Create a PDF document object that stores the document structure.
    # Supply the password for initialization.
    document = PDFDocument(parser)
    # Check if the document allows text extraction. If not, abort.
    if not document.is_extractable:
        raise PDFTextExtractionNotAllowed
    # Create a PDF resource manager object that stores shared resources.
    rsrcmgr = PDFResourceManager()
    laparams = LAParams()
    # Create a PDF device object.
    device = PDFPageAggregator(rsrcmgr, laparams=laparams)
    # Create a PDF interpreter object.
    interpreter = PDFPageInterpreter(rsrcmgr, device)
    # Process each page contained in the document.
    found_randers = False
    found_aarhus = False
    _randers = []
    headings = [u'Ledige lejligheder\n',u'afd. adresse\n',u'rum m2\n',u'leje \n',
                u'a\xb4c varme a\xb4c vand\n',u'indskud\n',u'ledig pr.\n',u'bem\xe6rkning\n'
                ]
    location_map = OrderedDict()
    header_ycord = []
    for page in PDFPage.create_pages(document):
        interpreter.process_page(page)
        layout = device.get_result()

        for obj in layout._objs:
            # print obj
            if isinstance(obj,LTTextBoxHorizontal):
                for o in obj._objs:
                    y0 = o.y0
                    # print o
                    if isinstance(o,LTTextLineHorizontal) and obj.get_text() not in headings:

                        if y0 not in header_ycord:
                            if y0 in location_map :
                                objs = location_map.get(y0)
                            else:
                                objs = []
                            string_val = o.get_text().encode('ascii', 'ignore')
                            string_val = string_val.replace('\n','')
                            objs.append(string_val)
                            location_map.__setitem__(y0,objs)
                    else :
                        if y0 not in header_ycord:
                            header_ycord.append(y0)





    for key in location_map:
        print '**************************'
    #     # print key
        print location_map.get(key)
        print '**************************'
    print 'Total Rowss = %s'%len(location_map)
Esempio n. 24
0
    class Read(OrgObjPermsMixin, ContactFieldsMixin, ContactBase, SmartReadView):

        def derive_fields(self):
            fields = ['urn', 'region', 'group', 'language', 'last_response']
            if self.object.created_by_id:
                fields.append('created_by')

            # Show values for the visible data fields.
            # Which data fields to show are configured on the Org edit page.
            self.data_fields = [(f.key, f) for f in self.object.org.datafield_set.visible()]
            self.data_fields = OrderedDict(self.data_fields)
            fields.extend(self.data_fields.keys())

            return fields

        def get_last_response(self, obj):
            last_response = obj.responses.order_by('-updated_on').first()
            return last_response.updated_on if last_response else _("Never")

        def lookup_field_label(self, context, field, default=None):
            if field == 'urn':
                scheme = self.object.get_urn()[0]
                return dict(URN_SCHEME_CHOICES)[scheme]
            elif field in self.data_fields:
                return self.data_fields.get(field).display_name
            return super(ContactCRUDL.Read, self).lookup_field_label(context, field, default)

        def lookup_field_value(self, context, obj, field):
            if field in self.data_fields:
                value = self.data_fields.get(field).contactfield_set.filter(contact=obj)
                value = value.first()
                return value.get_value() or "-" if value else "Unknown"
            return super(ContactCRUDL.Read, self).lookup_field_value(context, obj, field)
Esempio n. 25
0
File: omega.py Progetto: stefco/gwpy
def parse_omega_channel(fobj, section=None):
    """Parse a `Channel` from an Omega-scan configuration file

    Parameters
    ----------
    fobj : `file`
        the open file-like object to parse
    section : `str`
        name of section in which this channel should be recorded

    Returns
    -------
    channel : `Channel`
        the channel as parsed from this `file`
    """
    params = OrderedDict()
    while True:
        line = next(fobj)
        if line == '}\n':
            break
        key, value = line.split(':', 1)
        params[key.strip().rstrip()] = omega_param(value)
    out = Channel(params.get('channelName'),
                  sample_rate=params.get('sampleFrequency'),
                  frametype=params.get('frameType'),
                  frequency_range=params.get('searchFrequencyRange'))
    out.group = section
    out.params = params
    return out
Esempio n. 26
0
class TransactionSignalsContext:
    """
    Context object that stores handlers and call it after successful pass trough surrounded code block
    with "transaction_signals decorator. Handlers can be unique or standard. Unique handlers are registered
    and executed only once.
    """

    def __init__(self):
        self._unique_handlers = OrderedDict()
        self._handlers = []

    def register(self, handler):
        if getattr(handler, 'is_unique', False):
            if hash(handler) in self._unique_handlers:
                self._unique_handlers.get(hash(handler)).join(handler)
            else:
                self._unique_handlers[hash(handler)] = handler
                self._handlers.append(handler)
        else:
            self._handlers.append(handler)

    def handle_all(self):
        for handler in self._handlers:
            handler()

    def join(self, transaction_signals_context):
        for handler in transaction_signals_context._handlers:
            self.register(handler)
Esempio n. 27
0
    def _make_haloupdate(self, f, hse, key='', **kwargs):
        distributor = f.grid.distributor
        nb = distributor._obj_neighborhood
        comm = distributor._obj_comm

        fixed = {d: Symbol(name="o%s" % d.root) for d in hse.loc_indices}

        # Only retain the halos required by the Diag scheme
        # Note: `sorted` is only for deterministic code generation
        halos = sorted(i for i in hse.halos if isinstance(i.dim, tuple))

        body = []
        for dims, tosides in halos:
            mapper = OrderedDict(zip(dims, tosides))

            sizes = [f._C_get_field(OWNED, d, s).size for d, s in mapper.items()]

            torank = FieldFromPointer(''.join(i.name[0] for i in mapper.values()), nb)
            ofsg = [fixed.get(d, f._C_get_field(OWNED, d, mapper.get(d)).offset)
                    for d in f.dimensions]

            mapper = OrderedDict(zip(dims, [i.flip() for i in tosides]))
            fromrank = FieldFromPointer(''.join(i.name[0] for i in mapper.values()), nb)
            ofss = [fixed.get(d, f._C_get_field(HALO, d, mapper.get(d)).offset)
                    for d in f.dimensions]

            kwargs['haloid'] = len(body)

            body.append(self._call_sendrecv('sendrecv%s' % key, f, sizes, ofsg, ofss,
                                            fromrank, torank, comm, **kwargs))

        iet = List(body=body)
        parameters = [f, comm, nb] + list(fixed.values())
        return Callable('haloupdate%s' % key, iet, 'void', parameters, ('static',))
Esempio n. 28
0
class PlainTextEvent(Event):
    def __str__(self):
        template = "{type} {name}"
        if self.subclass:
            if self.info:
                template = "{type} {name} {subclass} {info}"
            else:
                template = "{type} {name} {subclass}"
        elif self.info:
            template = "{type} {name} {info}"
        return template.format(type=self.type, name=self.name, info=self.info, subclass=self.subclass)

    def parse(self):
        # content contains key:value pairs for plain text events
        content_dict = utils.EventDict(self.content)
        self.dict.update(content_dict)
        self.dict = OrderedDict(sorted(self.dict.items(), key=lambda t: t[0].lower()))
        self.content = content_dict.content
        # delegate the event to any subscription functions
        self.delegate()

    def delegate(self):
        self.protocol._eventPlainTextDelegator(self, self.name, subclass=self.subclass, content=self.content)

    @property
    def name(self):
        return self.dict.get("Event-Name", "").strip()

    @property
    def info(self):
        return self.dict.get("Event-Info", "").strip()

    @property
    def subclass(self):
        return self.dict.get("Event-Subclass", "").strip()
Esempio n. 29
0
class Question:
    __slots__ = ['qid', 'title', 'tags', 'type', 'restriction', 'answers']

    """Class for each question in a questionnaire"""
    def __init__(self, qid, title=None, tags=None, _type='_def', restriction=None):
        self.qid, self.title, self.tags, self.type, self.restriction = qid, title, tags, _type, restriction
        self.answers = OrderedDict()

    def add_answer(self, aid, score=0, content=None, _type=None):
        self.answers[aid] = Answer(aid, score, content, _type)

    def get_answer_score(self, aid):
        a = self.answers.get(aid, None)
        return a.score if a is not None else 0

    def get_answer_content(self, aid):
        a = self.answers.get(aid, None)
        return a.content if a is not None else aid

    def to_dict(self):
        return {
            'qid': self.qid,
            'title': self.title,
            'tags': self.tags,
            'type': self.type,
            'restriction': self.restriction,
            'answers': self.answers
        }
    def get_field_info(self, field):
        """
        Given an instance of a serializer field, return a dictionary
        of metadata about it.
        """
        field_info = OrderedDict()
        serializer = field.parent

        if isinstance(field, serializers.ManyRelatedField):
            field_info['type'] = self.type_lookup[field.child_relation]
        else:
            field_info['type'] = self.type_lookup[field]

        try:
            serializer_model = getattr(serializer.Meta, 'model')
            field_info['relationship_type'] = self.relation_type_lookup[
                getattr(serializer_model, field.field_name)
            ]
        except KeyError:
            pass
        except AttributeError:
            pass
        else:
            field_info['relationship_resource'] = get_related_resource_type(field)

        field_info['required'] = getattr(field, 'required', False)

        attrs = [
            'read_only', 'write_only', 'label', 'help_text',
            'min_length', 'max_length',
            'min_value', 'max_value', 'initial'
        ]

        for attr in attrs:
            value = getattr(field, attr, None)
            if value is not None and value != '':
                field_info[attr] = force_text(value, strings_only=True)

        if getattr(field, 'child', None):
            field_info['child'] = self.get_field_info(field.child)
        elif getattr(field, 'fields', None):
            field_info['children'] = self.get_serializer_info(field)

        if (
            not field_info.get('read_only') and
            not field_info.get('relationship_resource') and
            hasattr(field, 'choices')
        ):
            field_info['choices'] = [
                {
                    'value': choice_value,
                    'display_name': force_text(choice_name, strings_only=True)
                }
                for choice_value, choice_name in field.choices.items()
            ]

        if hasattr(serializer, 'included_serializers') and 'relationship_resource' in field_info:
            field_info['allows_include'] = field.field_name in serializer.included_serializers

        return field_info
Esempio n. 31
0
class WineRegistryKey:
    def __init__(self, key_def=None, path=None):

        self.subkeys = OrderedDict()
        self.metas = OrderedDict()

        if path:
            # Key is created by path, it's a new key
            timestamp = datetime.now().timestamp()
            self.name = path
            self.raw_name = "[{}]".format(path.replace("/", "\\\\"))
            self.raw_timestamp = " ".join(str(timestamp).split("."))

            windows_timestamp = WindowsFileTime.from_unix_timestamp(timestamp)
            self.metas["time"] = windows_timestamp.to_hex()
        else:
            # Existing key loaded from file
            self.raw_name, self.raw_timestamp = re.split(
                re.compile(r"(?<=[^\\]\]) "), key_def, maxsplit=1)
            self.name = self.raw_name.replace("\\\\", "/").strip("[]")

        # Parse timestamp either as int or float
        ts_parts = self.raw_timestamp.strip().split()
        if len(ts_parts) == 1:
            self.timestamp = int(ts_parts[0])
        else:
            self.timestamp = float("{}.{}".format(ts_parts[0], ts_parts[1]))

    def __str__(self):
        return "{0} {1}".format(self.raw_name, self.raw_timestamp)

    def parse(self, line):
        """Parse a registry line, populating meta and subkeys"""
        if len(line) < 4:
            # Line is too short, nothing to parse
            return

        if line.startswith("#"):
            self.add_meta(line)
        elif line.startswith('"'):
            try:
                key, value = re.split(re.compile(r"(?<![^\\]\\\")="),
                                      line,
                                      maxsplit=1)
            except ValueError as ex:
                logger.error("Unable to parse line %s", line)
                logger.exception(ex)
                return
            key = key[1:-1]
            self.subkeys[key] = value
        elif line.startswith("@"):
            key, value = line.split("=", 1)
            self.subkeys["default"] = value

    def add_to_last(self, line):
        try:
            last_subkey = next(reversed(self.subkeys))
        except StopIteration:
            logger.warning("Should this be happening?")
            return
        self.subkeys[last_subkey] += "\n{}".format(line)

    def render(self):
        """Return the content of the key in the wine .reg format"""
        content = self.raw_name + " " + self.raw_timestamp + "\n"
        for key, value in self.metas.items():
            if value is None:
                content += "#{}\n".format(key)
            else:
                content += "#{}={}\n".format(key, value)
        for key, value in self.subkeys.items():
            if key == "default":
                key = "@"
            else:
                key = '"{}"'.format(key)
            content += "{}={}\n".format(key, value)
        return content

    def render_value(self, value):
        if isinstance(value, int):
            return "dword:{:08x}".format(value)
        if isinstance(value, str):
            return '"{}"'.format(value)
        raise NotImplementedError("TODO")

    def decode_unicode(self, string):
        chunks = string.split('\\x')
        out = chunks.pop(0).encode().decode('unicode_escape')
        for chunk in chunks:
            #We have seen file with unicode characters escaped on 1 byte (\xfa), 1.5 bytes (\x444) and 2 bytes (\x00ed)
            #So we try 0 padding, 1 and 2 (python wants its escaped sequence to be exactly on 4 characters).
            #The exception let us know if it worked or not
            for i in [0, 1, 2]:
                try:
                    out += '\\u{}{}'.format(
                        '0' * i, chunk).encode().decode('unicode_escape')
                    break
                except UnicodeDecodeError:
                    pass
        return out

    def add_meta(self, meta_line):
        if not meta_line.startswith("#"):
            raise ValueError("Key metas should start with '#'")
        meta_line = meta_line[1:]
        parts = meta_line.split("=")
        if len(parts) == 2:
            key = parts[0]
            value = parts[1]
        elif len(parts) == 1:
            key = parts[0]
            value = None
        else:
            raise ValueError("Invalid meta line '{}'".format(meta_line))
        self.metas[key] = value

    def get_meta(self, name):
        return self.metas.get(name)

    def set_subkey(self, name, value):
        self.subkeys[name] = self.render_value(value)

    def get_subkey(self, name):
        if name not in self.subkeys:
            return None
        value = self.subkeys[name]
        if value.startswith('"') and value.endswith('"'):
            return self.decode_unicode(value[1:-1])
        if value.startswith("dword:"):
            return int(value[6:], 16)
        raise ValueError("Handle %s" % value)
Esempio n. 32
0
class TestParser:
	def __init__(self):
		self.toplevel_ns = Namespace(None, None)
		self.classes = OrderedDict()
		self.namespaces = []
		self.class_names = set()
		self.children = []
	
	def parse(self, data):
		result = toplevel.parseString(data, parseAll=True)
		self.toplevel_ns = Namespace(None, result)
		self.ns = None
		self.walk_namespace(self.toplevel_ns)
		self.resolve_parents()
		return self.toplevel_ns
	
	def walk_namespace(self, ns):
		self.namespaces.append(self.ns)
		if self.ns and ns.name:
			self.ns += "." + ns.name
		else:
			self.ns = ns.name
		for item in ns.members:
			if isinstance(item, Namespace):
				self.walk_namespace(item)
			elif isinstance(item, Class):
				if self.ns:
					item.name = self.ns + "." + item.name
				self.class_names.add(item.name)
				self.classes[item.name] = item
				if item.parent:
					self.children.append((self.ns, item))
		self.ns = self.namespaces.pop()

	def resolve_parents(self):
		for ns, child in self.children:
			ns = ns.split(".") if ns else []
			while True:
				name = ".".join(ns + [child.parent])
				if name in self.class_names:
					child.parent = name
					break
				try:
					ns.pop()
				except IndexError:
					break

	def is_subclass(self, subclass, parent):
		while subclass:
			if subclass.parent == parent:
				return True
			subclass = self.classes.get(subclass.parent)
		return False

	def find_tests(self):
		for klass in self.classes.values():
			if not klass.name.endswith("Test"):
				info("The class %s has been ignored because it lacks the 'Test' suffix." % klass.name)
			elif klass.abstract:
				info("The class %s has been ignored because it is abstract." % klass.name)
			elif klass.access != "public":
				info("The class %s has been ignored because it is not public." % klass.name)
			elif not self.is_subclass(klass, "Drt.TestCase"):
				info("The class %s has been ignored because it is not a Drt.TestCase subclass." % klass.name)
			else:
				methods_found = set()
				base_path = "/" + klass.name.replace(".", "/") + "/"
				for method in self.find_test_methods(klass, methods_found):
					path = base_path + method.name					
					yield (path, klass.name, method.name, method.is_async, method.throws)
	
	def find_test_methods(self, klass, methods_found):
		for method in klass.methods:
			name = method.name
			if name in methods_found:
				pass
			elif not name.startswith("test_"):
				if name not in ("set_up", "tear_down"):
					info("The method %s has been ignored because it lacks the 'test_' prefix." % name)
			elif method.abstract:
				info("The method %s has been ignored because it is abstract." % name)
			elif method.access != "public":
				info("The method %s has been ignored because it is not public." % name)
			elif method.rtype != "void":
				info("The method %s has been ignored because it returns a value." % name)
			else:
				methods_found.add(method.name)
				yield method
		try:
			parent = self.classes[klass.parent]
		except KeyError:
			pass
		else:
			yield from self.find_test_methods(parent, methods_found)
Esempio n. 33
0
    def __call__(path=None,
                 dataset=None,
                 annex=None,
                 untracked='normal',
                 recursive=False,
                 recursion_limit=None,
                 eval_subdataset_state='full',
                 report_filetype='eval'):
        # To the next white knight that comes in to re-implement `status` as a
        # special case of `diff`. There is one fundamental difference between
        # the two commands: `status` can always use the worktree as evident on
        # disk as a contsraint (e.g. to figure out which subdataset a path is
        # in) `diff` cannot do that (everything need to be handled based on a
        # "virtual" representation of a dataset hierarchy).
        # MIH concludes that while `status` can be implemented as a special case
        # of `diff` doing so would complicate and slow down both `diff` and
        # `status`. So while the apparent almost code-duplication between the
        # two commands feels wrong, the benefit is speed. Any future RF should
        # come with evidence that speed does not suffer, and complexity stays
        # on a manageable level
        ds = require_dataset(dataset,
                             check_installed=True,
                             purpose='status reporting')

        paths_by_ds = OrderedDict()
        if path:
            # sort any path argument into the respective subdatasets
            for p in sorted(map(assure_unicode, assure_list(path))):
                # it is important to capture the exact form of the
                # given path argument, before any normalization happens
                # for further decision logic below
                orig_path = str(p)
                p = resolve_path(p, dataset)
                root = get_dataset_root(str(p))
                if root is None:
                    # no root, not possibly underneath the refds
                    yield dict(action='status',
                               path=p,
                               refds=ds.path,
                               status='error',
                               message='path not underneath this dataset',
                               logger=lgr)
                    continue
                else:
                    if dataset and root == str(p) and \
                            not (orig_path.endswith(op.sep) or
                                 orig_path == "."):
                        # the given path is pointing to a dataset
                        # distinguish rsync-link syntax to identify
                        # the dataset as whole (e.g. 'ds') vs its
                        # content (e.g. 'ds/')
                        super_root = get_dataset_root(op.dirname(root))
                        if super_root:
                            # the dataset identified by the path argument
                            # is contained in a superdataset, and no
                            # trailing path separator was found in the
                            # argument -> user wants to address the dataset
                            # as a whole (in the superdataset)
                            root = super_root

                root = ut.Path(root)
                ps = paths_by_ds.get(root, [])
                ps.append(p)
                paths_by_ds[root] = ps
        else:
            paths_by_ds[ds.pathobj] = None

        queried = set()
        content_info_cache = {}
        while paths_by_ds:
            qdspath, qpaths = paths_by_ds.popitem(last=False)
            if qpaths and qdspath in qpaths:
                # this is supposed to be a full query, save some
                # cycles sifting through the actual path arguments
                qpaths = []
            # try to recode the dataset path wrt to the reference
            # dataset
            # the path that it might have been located by could
            # have been a resolved path or another funky thing
            qds_inrefds = path_under_rev_dataset(ds, qdspath)
            if qds_inrefds is None:
                # nothing we support handling any further
                # there is only a single refds
                yield dict(
                    path=str(qdspath),
                    refds=ds.path,
                    action='status',
                    status='error',
                    message=(
                        "dataset containing given paths is not underneath "
                        "the reference dataset %s: %s", ds, qpaths),
                    logger=lgr,
                )
                continue
            elif qds_inrefds != qdspath:
                # the path this dataset was located by is not how it would
                # be referenced underneath the refds (possibly resolved
                # realpath) -> recode all paths to be underneath the refds
                qpaths = [qds_inrefds / p.relative_to(qdspath) for p in qpaths]
                qdspath = qds_inrefds
            if qdspath in queried:
                # do not report on a single dataset twice
                continue
            qds = Dataset(str(qdspath))
            for r in _yield_status(
                    qds, qpaths, annex, untracked, recursion_limit
                    if recursion_limit is not None else -1 if recursive else 0,
                    queried, eval_subdataset_state, report_filetype == 'eval',
                    content_info_cache):
                yield dict(
                    r,
                    refds=ds.path,
                    action='status',
                    status='ok',
                )
Esempio n. 34
0
class Tree_elem(Generic_elem):
    def __init__(self,
                 config_dict,
                 path,
                 name=None,
                 args=[],
                 config_name=None):
        super(Tree_elem, self).__init__(path)
        self.props = OrderedDict()
        self.name = name
        self.config_name = config_name

        dump = False

        if 'soc' in config_dict.keys():
            dump = True

        for key, value in config_dict.items():

            if key == 'includes':
                for inc in value:
                    tree = get_config_tree_from_file(find_config(
                        inc, self.path),
                                                     args=args)
                    self.merge(tree)

            elif key == 'includes_eval':
                config = self
                for inc in value:
                    tree = get_config_tree_from_file(find_config(
                        eval(inc), self.path),
                                                     args=args)
                    self.merge(tree)

            else:

                child_args = []
                for arg in args:
                    if arg[0][0] == '*' or arg[0][0] == key:
                        child_args.append([arg[0][1:], arg[1]])
                    elif (arg[0][0] in ['*', '**']) and (arg[0][1] == key):
                        child_args.append([arg[0][2:], arg[1]])
                    elif arg[0][0] == '**':
                        child_args.append(arg)

                set_prop = False

                for arg in args:
                    if (len(arg[0]) == 1 and arg[0][0] == key) or (
                            len(arg[0]) == 2 and arg[0][0] in ['*', '**']
                            and arg[0][1] == key):
                        self.set_prop(key, Value_elem(arg[1]))
                        set_prop = True

                if not set_prop:
                    self.set_prop(key, self.get_tree(value, args=child_args))

    def dump_doc_internal(self,
                          dump_regs=False,
                          dump_regs_fields=False,
                          header=None):
        regmap_conf = self.props.get('regmap')
        if regmap_conf is not None:
            regmap.Regmap(regmap_conf.get_dict()).dump_memmap(
                dump_regs=dump_regs,
                dump_regs_fields=dump_regs_fields,
                header=header)
        else:
            for elem in self.props.values():
                elem.dump_doc_internal(dump_regs=dump_regs,
                                       dump_regs_fields=dump_regs_fields,
                                       header=header)

    def browse(self, callback, *kargs, **kwargs):
        callback(self, *kargs, **kwargs)
        for key, value in self.props.items():
            value.browse(callback, *kargs, **kwargs)

    def __str__(self):
        return self.get_config_name()

    def dump_help(self, name=None, root=None):
        prop_help = self.props.get('help')
        if prop_help is not None:
            print('')
            print('  ' + name + ' group:')
            for key in prop_help.keys():
                full_name = key
                if name is not None:
                    full_name = '%s/%s' % (name, key)
                print('    %-40s %s' % (full_name, prop_help.get(key)))

        for key, prop in self.props.items():
            full_name = key
            if name is not None:
                full_name = '%s/%s' % (name, key)
            prop.dump_help(name=full_name)

    def get_config_name(self):
        if self.config_name is not None:
            return self.config_name
        else:
            return self.name

    def keys(self):
        return self.props.keys()

    def items(self):
        return self.props.items()

    def get_name(self, nice=False):
        if not nice: return self.name
        return self.name.replace('=', '.').replace(':', '_')

    def set_prop(self, key, value):
        if key in self.props:
            self.props.get(key).merge(value)
        else:
            self.props[key] = value
            self.__dict__[key] = value

    def merge(self, tree):
        for key, value in tree.props.items():
            if self.props.get(key) != None:
                self.props.get(key).merge(value)
            else:
                self.set_prop(key, value)

    def get_int(self, name):
        value = self.get(name)
        if type(value) == str: return int(value, 0)
        else: return value

    def get_bool(self, name):
        value = self.get(name)
        if type(value) == str: return value == "True" or value == "true"
        elif value is None:
            return False
        else:
            return value

    def get_config(self, name):
        return self.get(name, tree=True)

    def get_prop(self, name):
        return self.props.get(name)

    def get(self, name, rec=False, tree=False):

        # TODO this is to keep compatiblity with old sources
        # Get rid of it as soon as everything is ported to the new flow
        if name == 'pulpChip': name = 'pulp_chip'
        elif name == 'pulpChipFamily': name = 'pulp_chip_family'
        elif name == 'pulpChipVersion': name = 'pulp_chip_version'
        elif name == 'pulpCompiler': name = 'pulp_compiler'
        elif name == 'pulpRtVersin': name = 'pulp_rt_version'
        elif name == 'pulpCoreArchi': name = 'pe/version'
        elif name == 'pulpCoreFamily': name = 'pe/archi'
        elif name == 'pulpFcCoreArchi': name = 'fc/version'
        elif name == 'pulpFcCoreFamily': name = 'fc/archi'
        elif name == 'stackSize': name = 'stack_size'
        elif name == 'fcStackSize': name = 'fc_stack_size'

        if name == None:
            if tree: return self
            elif rec: return self.get_dict()
            else:
                default = self.props.get('default')
                if default != None:
                    return default.value
                else:
                    return list(self.props.keys())[0]
        else:
            name_list = name.split('/')
            parent_name = name_list[0]
            elem = self.props.get(parent_name)
            if elem == None:
                for prop in self.props.values():
                    value = prop.get(name, rec, tree)
                    if value != None: return value
                return None
            else:
                child_name = None if len(name_list) == 1 else '/'.join(
                    name_list[1:])
                return elem.get(child_name, rec, tree)

    def __set(self, name, value, set_first):
        if name == None:
            # In case name is None, it means we are in the item where the property must be set
            # As we are a dictionary, this means removing all items except the one specified
            keys = list(self.props.keys())
            is_set = False
            for prop_name in keys:
                if prop_name != value:
                    del self.props[prop_name]
                    is_set = True
            return is_set
        else:
            # We haven't reached yet the point in the hierarchy where the property must be set
            # Either the property name matches one of the properties and then only this one is set
            # otherwise we propagate to all properties
            name_list = name.split('/')
            parent_name = name_list[0]
            elem = self.props.get(parent_name)
            if elem == None:
                is_set = False

                if set_first and len(name_list) == 1:
                    is_set = True
                    self.set_prop(parent_name, self.get_tree(value))

                else:
                    for prop in self.props.values():
                        is_set = is_set or prop.set(
                            name, value, set_first=set_first)

                return is_set
            else:
                child_name = None if len(name_list) == 1 else '/'.join(
                    name_list[1:])
                return elem.set(child_name, value, set_first=set_first)

    def set(self, name, value, set_first=None):
        if set_first != None:
            self.__set(name, value, set_first=set_first)
        else:
            if not self.__set(name, value, set_first=False):
                self.__set(name, value, set_first=True)

    def get_dict(self, serialize=True):
        result = OrderedDict()
        for key, value in self.props.items():
            result[key] = value.get_dict(serialize=serialize)
        return result

    def dump_to_file(self, file, root=None):
        if root != None: tree_dict = self.get(root, rec=True)
        else: tree_dict = self.get_dict()
        file.write(json.dumps(tree_dict, indent='  '))

    def get_name_from_items(self, items):
        result = []
        for item in items:
            value = self.get(item)
            if value == None: continue
            result.append("%s=%s" % (item, value))
        return ":".join(result)
Esempio n. 35
0
class Vexrc(object):
    """Parsed representation of a .vexrc config file.
    """
    default_heading = "root"
    default_encoding = "utf-8"

    def __init__(self):
        self.encoding = self.default_encoding
        self.headings = OrderedDict()
        self.headings[self.default_heading] = OrderedDict()
        self.headings['env'] = OrderedDict()

    def __getitem__(self, key):
        return self.headings.get(key)

    @classmethod
    def from_file(cls, path, environ):
        """Make a Vexrc instance from given file in given environ.
        """
        instance = cls()
        instance.read(path, environ)
        return instance

    def read(self, path, environ):
        """Read data from file into this vexrc instance.
        """
        try:
            inp = open(path, 'rb')
        except FileNotFoundError as error:
            if error.errno != 2:
                raise
            return None
        parsing = parse_vexrc(inp, environ)
        for heading, key, value in parsing:
            heading = self.default_heading if heading is None else heading
            if heading not in self.headings:
                self.headings[heading] = OrderedDict()
            self.headings[heading][key] = value
        parsing.close()

    def get_ve_base(self, environ):
        """Find a directory to look for virtualenvs in.
        """
        # set ve_base to a path we can look for virtualenvs:
        # 1. .vexrc
        # 2. WORKON_HOME (as defined for virtualenvwrapper's benefit)
        # 3. $HOME/.virtualenvs
        # (unless we got --path, then we don't need it)
        ve_base_value = self.headings[self.default_heading].get('virtualenvs')
        if ve_base_value:
            ve_base = os.path.expanduser(ve_base_value)
        else:
            ve_base = environ.get('WORKON_HOME', '')
        if not ve_base:
            # On Cygwin os.name == 'posix' and we want $HOME.
            if platform.system() == 'Windows' and os.name == 'nt':
                _win_drive = environ.get('HOMEDRIVE')
                home = environ.get('HOMEPATH', '')
                if home:
                    home = os.path.join(_win_drive, home)
            else:
                home = environ.get('HOME', '')
            if not home:
                home = os.path.expanduser('~')
            if not home:
                return ''
            ve_base = os.path.join(home, '.virtualenvs')
        # pass through invalid paths so messages can be generated
        # if not os.path.exists(ve_base) or os.path.isfile(ve_base):
        # return ''
        return ve_base or ''

    def get_shell(self, environ):
        """Find a command to run.
        """
        command = self.headings[self.default_heading].get('shell')
        if not command and os.name != 'nt':
            command = environ.get('SHELL', '')
        command = shlex.split(command) if command else None
        return command
def overlayFeatureVariations(conditionalSubstitutions):
    """Compute overlaps between all conditional substitutions.

    The `conditionalSubstitutions` argument is a list of (Region, Substitutions)
    tuples.

    A Region is a list of Boxes. A Box is a dict mapping axisTags to
    (minValue, maxValue) tuples. Irrelevant axes may be omitted and they are
    interpretted as extending to end of axis in each direction.  A Box represents
    an orthogonal 'rectangular' subset of an N-dimensional design space.
    A Region represents a more complex subset of an N-dimensional design space,
    ie. the union of all the Boxes in the Region.
    For efficiency, Boxes within a Region should ideally not overlap, but
    functionality is not compromised if they do.

    The minimum and maximum values are expressed in normalized coordinates.

    A Substitution is a dict mapping source glyph names to substitute glyph names.

    Returns data is in similar but different format.  Overlaps of distinct
    substitution Boxes (*not* Regions) are explicitly listed as distinct rules,
    and rules with the same Box merged.  The more specific rules appear earlier
    in the resulting list.  Moreover, instead of just a dictionary of substitutions,
    a list of dictionaries is returned for substitutions corresponding to each
    unique space, with each dictionary being identical to one of the input
    substitution dictionaries.  These dictionaries are not merged to allow data
    sharing when they are converted into font tables.

    Example:
    >>> condSubst = [
    ...     # A list of (Region, Substitution) tuples.
    ...     ([{"wght": (0.5, 1.0)}], {"dollar": "dollar.rvrn"}),
    ...     ([{"wght": (0.5, 1.0)}], {"dollar": "dollar.rvrn"}),
    ...     ([{"wdth": (0.5, 1.0)}], {"cent": "cent.rvrn"}),
    ... ]
    >>> from pprint import pprint
    >>> pprint(overlayFeatureVariations(condSubst))
    [({'wdth': (0.5, 1.0), 'wght': (0.5, 1.0)},
      [{'dollar': 'dollar.rvrn'}, {'cent': 'cent.rvrn'}]),
     ({'wdth': (0.5, 1.0)}, [{'cent': 'cent.rvrn'}]),
     ({'wght': (0.5, 1.0)}, [{'dollar': 'dollar.rvrn'}])]
    """

    # Merge same-substitutions rules, as this creates fewer number oflookups.
    merged = OrderedDict()
    for value, key in conditionalSubstitutions:
        key = hashdict(key)
        if key in merged:
            merged[key].extend(value)
        else:
            merged[key] = value
    conditionalSubstitutions = [(v, dict(k)) for k, v in merged.items()]
    del merged

    # Merge same-region rules, as this is cheaper.
    # Also convert boxes to hashdict()
    #
    # Reversing is such that earlier entries win in case of conflicting substitution
    # rules for the same region.
    merged = OrderedDict()
    for key, value in reversed(conditionalSubstitutions):
        key = tuple(
            sorted((hashdict(cleanupBox(k)) for k in key),
                   key=lambda d: tuple(sorted(d.items()))))
        if key in merged:
            merged[key].update(value)
        else:
            merged[key] = dict(value)
    conditionalSubstitutions = list(reversed(merged.items()))
    del merged

    # Overlay
    #
    # Rank is the bit-set of the index of all contributing layers.
    initMapInit = ((hashdict(), 0),
                   )  # Initializer representing the entire space
    boxMap = OrderedDict(initMapInit)  # Map from Box to Rank
    for i, (currRegion, _) in enumerate(conditionalSubstitutions):
        newMap = OrderedDict(initMapInit)
        currRank = 1 << i
        for box, rank in boxMap.items():
            for currBox in currRegion:
                intersection, remainder = overlayBox(currBox, box)
                if intersection is not None:
                    intersection = hashdict(intersection)
                    newMap[intersection] = newMap.get(intersection,
                                                      0) | rank | currRank
                if remainder is not None:
                    remainder = hashdict(remainder)
                    newMap[remainder] = newMap.get(remainder, 0) | rank
        boxMap = newMap
    del boxMap[hashdict()]

    # Generate output
    items = []
    for box, rank in sorted(boxMap.items(),
                            key=(lambda BoxAndRank: -popCount(BoxAndRank[1]))):
        substsList = []
        i = 0
        while rank:
            if rank & 1:
                substsList.append(conditionalSubstitutions[i][1])
            rank >>= 1
            i += 1
        items.append((dict(box), substsList))
    return items
Esempio n. 37
0
class EventListener(object):
    """``Session``, ``Call`` and ``Job`` tracking through a default set of
    event handlers.

    Tracks various session entities by wrapping received event data in local
    ``models`` APIs and/or data structures. Serves as a higher level API on
    top of the underlying event loop.
    """
    def __init__(
            self,
            event_loop,
            call_tracking_header='variable_call_uuid',
            max_limit=float('inf'),
    ):
        """
        :param str call_tracking_header:
            Name of the freeswitch variable (including the 'variable_' prefix)
            to use for associating sessions into tracked calls
            (see `_handle_create`).

            It is common to set this to an Xheader variable if attempting
            to track calls "through" an intermediary device (i.e. the first
            hop receiving requests) such as a B2BUA.

            NOTE: in order for this association mechanism to work the
            intermediary device must be configured to forward the Xheaders
            it receives.
        """
        self.event_loop = event_loop
        self.sessions = OrderedDict()
        self.log = utils.get_logger(utils.pstr(self))
        # store last 1k of each type of failed session
        self.failed_sessions = OrderedDict()
        self.bg_jobs = OrderedDict()
        self.calls = OrderedDict()  # maps aleg uuids to Sessions instances
        self.hangup_causes = Counter()  # record of causes by category
        self.sessions_per_app = Counter()
        self.max_limit = max_limit
        self.call_tracking_header = call_tracking_header
        # state reset
        self.reset()

        # add default handlers
        for evname, cbtype, cb in get_callbacks(self, only='handler'):
            self.event_loop.add_handler(evname, cb)

    def register_job(self, future, **kwargs):
        '''Register for a job to be handled when the appropriate event arrives.
        Once an event corresponding to the job is received, the bgjob event
        handler will 'consume' it and invoke its callback.

        Parameters
        ----------
        event : ESL.ESLevent
            as returned from an ESLConnection.bgapi call
        kwargs : dict
            same signatures as for Job.__init__

        Returns
        -------
        bj : an instance of Job (a background job)
        '''
        bj = Job(future, **kwargs)
        self.bg_jobs[bj.uuid] = bj
        return bj

    def count_jobs(self):
        return len(self.bg_jobs)

    def count_sessions(self):
        return len(self.sessions)

    def count_calls(self):
        '''Count the number of active calls hosted by the slave process
        '''
        return len(self.calls)

    def count_failed(self):
        '''Return the failed session count
        '''
        return sum(self.hangup_causes.values()
                   ) - self.hangup_causes['NORMAL_CLEARING']

    def reset(self):
        '''Clear all internal stats and counters
        '''
        self.log.debug('resetting all stats...')
        self.hangup_causes.clear()
        self.failed_jobs = Counter()
        self.total_answered_sessions = 0

    @handler('CHANNEL_HANGUP')
    @handler('CHANNEL_PARK')
    @handler('CALL_UPDATE')
    def lookup_sess(self, e):
        """The most basic handler template which looks up the locally tracked
        session corresponding to event `e` and updates it with event data
        """
        sess = self.sessions.get(e.get('Unique-ID'), False)
        if sess:
            sess.update(e)
            return True, sess
        return False, None

    def lookup_sess_and_job(self, e):
        """Look up and return the session and any corresponding background job.
        """
        consumed, sess = self.lookup_sess(e)
        if consumed:
            return True, sess, sess.bg_job
        return False, None, None

    @handler('LOG')
    def _handle_log(self, e):
        self.log.info(e.get('Body'))
        return True, None

    @handler('SERVER_DISCONNECTED')
    def _handle_disconnect(self, e):
        """Log disconnects.
        """
        self.log.warning("Received DISCONNECT from server '{}'".format(
            self.host))
        return True, None

    @handler('BACKGROUND_JOB')
    def _handle_bj(self, e):
        '''Handle bjs and report failures.
        If a job is found in the local cache then update the instance
        with event data.
        This handler returns 'None' on error (i.e. failed bj)
        which must be handled by any callbacks.
        '''
        error = False
        consumed = False
        resp = None
        sess = None
        ok = '+OK '
        err = '-ERR'
        job_uuid = e.get('Job-UUID')
        body = e.get('Body')

        # always report errors even for jobs which we aren't tracking
        if err in body:
            resp = body.strip(err).strip()
            error = True
            self.log.debug("job '{}' failed with:\n{}".format(
                job_uuid, str(body)))
        elif ok in body:
            resp = body.strip(ok + '\n')

        job = self.bg_jobs.get(job_uuid, None)
        job_just_created = False
        if not job:
            job = Job(event=e)
            job_just_created = True
        else:
            job.events.update(e)

        # attempt to lookup an associated session
        sess = self.sessions.get(job.sess_uuid or resp, None)

        if error:
            # if the job returned an error, report it and remove the job
            self.log.error("Job '{}' corresponding to session '{}'"
                           " failed with:\n{}".format(job_uuid, job.sess_uuid,
                                                      str(body)))
            job.fail(resp)  # fail the job
            if not job_just_created:
                # always pop failed jobs
                self.bg_jobs.pop(job_uuid)
            # append the id for later lookup and discard?
            self.failed_jobs[resp] += 1
            consumed = True

        else:  # OK case
            if sess:
                # special case: the bg job event returns a known originated
                # session's (i.e. pre-registered) uuid in its body
                if job.sess_uuid:

                    assert str(job.sess_uuid) == str(resp), \
                        ("""Session uuid '{}' <-> BgJob uuid '{}' mismatch!?
                         """.format(job.sess_uuid, resp))

                # reference this job in the corresponding session
                # self.sessions[resp].bg_job = job
                sess.bg_job = job
                self.log.debug("Job '{}' was sucessful".format(job_uuid))
                consumed = True
            else:
                self.log.warn(
                    "No session corresponding to bj '{}'".format(job_uuid))

            # run the job's callback
            job(resp)

        return consumed, sess, job

    @handler('CHANNEL_CREATE')
    @handler('CHANNEL_ORIGINATE')
    def _handle_initial_event(self, e):
        '''Handle channel create events by building local
        `Session` and `Call` objects for state tracking.
        '''
        uuid = e.get('Unique-ID')
        # Record the newly activated session
        # TODO: pass con as weakref?
        con = self.event_loop._con

        # short circuit if we have already allocated a session since FS is
        # indeterminate about which event create|originate will arrive first
        sess = self.sessions.get(uuid)
        if sess:
            return True, sess

        # allocate a session model
        sess = Session(e, event_loop=self.event_loop, uuid=uuid, con=con)
        direction = sess['Call-Direction']
        self.log.debug("{} session created with uuid '{}'".format(
            direction, uuid))
        sess.cid = self.event_loop.get_id(e, 'default')

        # Use our specified "call identification variable" to try and associate
        # sessions into calls. By default the 'variable_call_uuid' channel
        # variable is used for tracking locally bridged calls
        call_uuid = e.get(self.call_tracking_header)  # could be 'None'
        if not call_uuid:
            self.log.warn(
                "Unable to associate {} session '{}' with a call using "
                "variable '{}'".format(direction, sess.uuid,
                                       self.call_tracking_header))
            call_uuid = uuid

        # associate sessions into a call
        # (i.e. set the relevant sessions to reference each other)
        if call_uuid in self.calls:
            call = self.calls[call_uuid]
            self.log.debug("session '{}' is bridged to call '{}'".format(
                uuid, call.uuid))
            # append this session to the call's set
            call.append(sess)

        else:  # this sess is not yet tracked so use its id as the 'call' id
            call = Call(call_uuid, sess)
            self.calls[call_uuid] = call
            self.log.debug("call created for session '{}'".format(call_uuid))
        sess.call = call
        self.sessions[uuid] = sess
        self.sessions_per_app[sess.cid] += 1
        return True, sess

    @handler('CHANNEL_ANSWER')
    def _handle_answer(self, e):
        '''Handle answer events

        Returns
        -------
        sess : session instance corresponding to uuid
        '''
        uuid = e.get('Unique-ID')
        sess = self.sessions.get(uuid, None)
        if sess:
            self.log.debug("answered {} session '{}'".format(
                e.get('Call-Direction'), uuid))
            sess.answered = True
            self.total_answered_sessions += 1
            sess.update(e)
            return True, sess
        else:
            self.log.warn('Skipping answer of {}'.format(uuid))
            return False, None

    @handler('CHANNEL_DESTROY')
    # @handler('CHANNEL_HANGUP_COMPLETE')  # XXX: a race between these two...
    def _handle_destroy(self, e):
        '''Handle channel destroy events.

        Returns
        -------
        sess : session instance corresponding to uuid
        job  : corresponding bj for a session if exists, ow None
        '''
        uuid = e.get('Unique-ID')
        sess = self.sessions.pop(uuid, None)
        direction = sess['Call-Direction'] if sess else 'unknown'
        if not sess:
            return False, None
        sess.update(e)
        sess.hungup = True
        cause = e.get('Hangup-Cause')
        self.hangup_causes[cause] += 1  # count session causes
        self.sessions_per_app[sess.cid] -= 1

        # if possible lookup the relevant call
        call_uuid = e.get(self.call_tracking_header)
        if not call_uuid:
            self.log.warn(
                "handling HANGUP for {} session '{}' which can not be "
                "associated with an active call using {}?".format(
                    direction, sess.uuid, self.call_tracking_header))
            call_uuid = uuid

        # XXX seems like sometimes FS changes the `call_uuid`
        # between create and hangup oddly enough
        call = self.calls.get(call_uuid, sess.call)
        if call:
            if sess in call.sessions:
                self.log.debug("hungup {} session '{}' for Call '{}'".format(
                    direction, uuid, call.uuid))
                call.sessions.remove(sess)
            else:
                # session was somehow tracked by the wrong call
                self.log.err("session '{}' mismatched with call '{}'?".format(
                    sess.uuid, call.uuid))

            # all sessions hungup
            if len(call.sessions) == 0:
                self.log.debug(
                    "all sessions for call '{}' were hung up".format(
                        call_uuid))
                # remove call from our set
                call = self.calls.pop(call.uuid, None)
                if not call:
                    self.log.warn(
                        "Call with id '{}' containing Session '{}' was "
                        "already removed".format(call.uuid, sess.uuid))
        else:
            # we should never get hangups for calls we never saw created
            self.log.err("no call found for '{}'".format(call_uuid))

        # pop any corresponding job
        job = sess.bg_job
        # may have been popped by the partner
        self.bg_jobs.pop(job.uuid if job else None, None)
        sess.bg_job = None  # deref job - avoid mem leaks

        if not sess.answered or cause != 'NORMAL_CLEARING':
            self.log.debug("'{}' was not successful??".format(sess.uuid))
            self.failed_sessions.setdefault(cause,
                                            deque(maxlen=1000)).append(sess)

        self.log.debug("hungup Session '{}'".format(uuid))

        # hangups are always consumed
        return True, sess, job

    @property
    def host(self):
        return self.event_loop.host

    @property
    def port(self):
        return self.event_loop.port

    def is_alive(self):
        return self.event_loop.is_alive()

    def is_running(self):
        return self.event_loop.is_running()

    def connect(self, **kwargs):
        return self.event_loop.connect(**kwargs)

    def connected(self):
        return self.event_loop.connected()

    def start(self):
        return self.event_loop.start()

    def disconnect(self, **kwargs):
        return self.event_loop.disconnect(**kwargs)

    def unsubscribe(self, evname):
        return self.event_loop.unsubscribe(evname)
Esempio n. 38
0
    def add_edges(self, edges):
        last_index = self._index_list[-1]
        index_list = [last_index]
        neighbors_list = []
        weights_list = []

        nodes_amount = len(self._map_node_to_number.keys())
        free = nodes_amount
        map_node_to_number = OrderedDict()

        '''create dictionary to self._map_node_to_number from edges to our new numbering'''
        for edge in edges:
            if (self._map_node_to_number.get(edge[0], None) is not None or
                (not self.directed and self._map_node_to_number.get(edge[1], None) is not None)):
                print("Error: add_edges can't add edges from an existing node")
                return
            if map_node_to_number.get(edge[0], None) is None:
                map_node_to_number[edge[0]] = free
                free += 1
            if map_node_to_number.get(edge[1], None) is None and self._map_node_to_number.get(edge[1], None) is None:
                map_node_to_number[edge[1]] = free
                free += 1
        '''create the opposite dictionary'''
        map_number_to_node = OrderedDict((y, x) for x, y in map_node_to_number.items())

        """update the original dicts"""
        self._map_node_to_number.update(map_node_to_number)
        self._map_number_to_node.update(map_number_to_node)

        d = OrderedDict()
        '''starting to create the index list. Unordered is important'''
        for idx, edge in enumerate(edges):
            d[self._map_node_to_number[edge[0]]] = d.get(self._map_node_to_number[edge[0]], 0) + 1
            if not self.directed:
                d[self._map_node_to_number[edge[1]]] = d.get(self._map_node_to_number[edge[1]], 0) + 1
            elif self._map_node_to_number[edge[1]] not in d.keys() and edge[1] in map_node_to_number.keys():
                d[self._map_node_to_number[edge[1]]] = 0

        '''transfer the dictionary to list'''
        for j in range(1, len(d.keys()) + 1):
            index_list.append(index_list[j - 1] + d.get(nodes_amount + j - 1, 0))

        '''create the second list'''
        if self.is_directed():
            neighbors_list = [-1] * len(edges)
        else:
            neighbors_list = [-1] * len(edges) * 2
        if self.is_weighted():
            weights_list = [0] * len(edges)

        space = OrderedDict((x, -1) for x in self._map_number_to_node.keys())
        for idx, edge in enumerate(edges):
            left = self._map_node_to_number[edge[0]]
            right = self._map_node_to_number[edge[1]]
            if self.is_weighted():
                weight = float(edge[2])

            if space[left] != -1:
                space[left] += 1
                i = space[left]
            else:
                i = index_list[left - nodes_amount] - last_index
                space[left] = i
            neighbors_list[i] = right
            if self.is_weighted():
                weights_list[i] = weight

            if not self.is_directed():
                if space[right] != -1:
                    space[right] += 1
                    i = space[right]
                else:
                    i = index_list[right - len(self._index_list) - 1]
                    space[right] = i
                neighbors_list[i] = left
                if self.is_weighted():
                    weights_list[i] = weight

        """sort the neighbors"""
        neighbors_list, weights_list = self.sort_all(index_list=index_list,
                                                     neighbors_list=neighbors_list,
                                                     weights_list=weights_list)

        """update the original dicts"""
        self._index_list += index_list[1:]
        self._neighbors_list += neighbors_list
        self._weights_list += weights_list
Esempio n. 39
0
class Section:
    """
    This class holds a set of settings.
    """
    @staticmethod
    def __prepare_key(key):
        return str(key).lower().strip()

    def __init__(self, name, defaults=None):
        if defaults is not None and not isinstance(defaults, Section):
            raise TypeError("defaults has to be a Section object or None.")
        if defaults is self:
            raise ValueError("defaults may not be self for non-recursivity.")

        self.name = str(name)
        self.defaults = defaults
        self.contents = OrderedDict()

    def bear_dirs(self):
        bear_dirs = path_list(self.get("bear_dirs", ""))
        for bear_dir in bear_dirs:
            sys.path.append(bear_dir)
        bear_dirs = [os.path.join(bear_dir, "**") for bear_dir in bear_dirs]
        bear_dirs += [
            os.path.join(bear_dir, "**")
            for bear_dir in collect_registered_bears_dirs('coalabears')
        ]
        return bear_dirs

    def is_enabled(self, targets):
        """
        Checks if this section is enabled or, if targets is not empty, if it is
        included in the targets list.

        :param targets: List of target section names, all lower case.
        :return:        True or False
        """
        if len(targets) == 0:
            return bool(self.get("enabled", "true"))

        return self.name.lower() in targets

    def append(self, setting, custom_key=None):
        if not isinstance(setting, Setting):
            raise TypeError
        if custom_key is None:
            key = self.__prepare_key(setting.key)
        else:
            key = self.__prepare_key(custom_key)

        # Setting asserts key != "" for us
        self.contents[key] = setting

    def add_or_create_setting(self,
                              setting,
                              custom_key=None,
                              allow_appending=True):
        """
        Adds the value of the setting to an existing setting if there is
        already a setting  with the key. Otherwise creates a new setting.
        """
        if custom_key is None:
            key = setting.key
        else:
            key = custom_key

        if self.__contains__(key, ignore_defaults=True) and allow_appending:
            val = self[key]
            val.value = str(val.value) + "\n" + setting.value
        else:
            self.append(setting, custom_key=key)

    @enforce_signature
    def __setitem__(self, key: str, value: (str, Setting)):
        """
        Creates a Setting object from the given value if needed and assigns the
        setting to the key:

        >>> section = Section('section_name')
        >>> section['key'] = 'value'
        >>> section['key'].value
        'value'

        :param key:   Argument whose value is to be set
        :param value: The value of the given key
        :return:      Returns nothing.
        """
        if isinstance(value, Setting):
            self.append(value, custom_key=key)
        else:  # It must be a string since signature is enforced
            self.append(Setting(key, value))

    def __iter__(self, ignore_defaults=False):
        joined = self.contents.copy()
        if self.defaults is not None and not ignore_defaults:
            # Since we only return the iterator of joined (which doesnt contain
            # values) it's ok to override values here
            joined.update(self.defaults.contents)

        return iter(joined)

    def __contains__(self, item, ignore_defaults=False):
        try:
            self.__getitem__(item, ignore_defaults)

            return True
        except IndexError:
            return False

    def __getitem__(self, item, ignore_defaults=False):
        key = self.__prepare_key(item)
        if key == "":
            raise IndexError("Empty keys are invalid.")

        res = self.contents.get(key, None)
        if res is not None:
            return res

        if self.defaults is None or ignore_defaults:
            raise IndexError("Required index is unavailable.")

        return self.defaults[key]

    def __str__(self):
        value_list = ", ".join(key + " : " + repr(str(self.contents[key]))
                               for key in self.contents)
        return self.name + " {" + value_list + "}"

    def get(self, key, default="", ignore_defaults=False):
        """
        Retrieves the item without raising an exception. If the item is not
        available an appropriate Setting will be generated from your provided
        default value.

        :param key:             The key of the setting to return.
        :param default:         The default value
        :param ignore_defaults: Whether or not to ignore the default section.
        :return:                The setting.
        """
        try:
            return self.__getitem__(key, ignore_defaults)
        except IndexError:
            return Setting(key, str(default))

    def copy(self):
        """
        :return: a deep copy of this object
        """
        result = copy.copy(self)
        result.contents = copy.deepcopy(self.contents)
        if self.defaults is not None:
            result.defaults = self.defaults.copy()

        return result

    def update(self, other_section, ignore_defaults=False):
        """
        Incorporates all keys and values from the other section into this one.
        Values from the other section override the ones from this one.

        Default values from the other section override the default values from
        this only.

        :param other_section:   Another Section
        :param ignore_defaults: If set to true, do not take default values from
                                other
        :return:                self
        """
        if not isinstance(other_section, Section):
            raise TypeError("other_section has to be a Section")

        self.contents.update(other_section.contents)

        if not ignore_defaults and other_section.defaults is not None:
            if self.defaults is None:
                self.defaults = other_section.defaults.copy()
            else:
                self.defaults.update(other_section.defaults)

        return self

    def update_setting(self, key, new_key=None, new_value=None):
        """
        Updates a setting with new values.
        :param key:       The old key string.
        :param new_key:   The new key string.
        :param new_value: The new value for the setting
        """
        if new_key is not None:
            self.contents[key].key = new_key
            self.contents = update_ordered_dict_key(self.contents, key,
                                                    new_key)
        if new_value is not None:
            if new_key is not None:
                self.contents[new_key].value = new_value
            else:
                self.contents[key].value = new_value

    def delete_setting(self, key):
        """
        Delete a setting
        :param key: The key of the setting to be deleted
        """
        del self.contents[key]
Esempio n. 40
0
class LolGraph:

    def __init__(self, directed=True, weighted=True):
        self._index_list = [0]
        self._neighbors_list = []
        self._weights_list = []
        self._map_node_to_number = OrderedDict()
        self._map_number_to_node = OrderedDict()
        self.directed = directed
        self.weighted = weighted

    def is_directed(self):
        return self.directed

    def is_weighted(self):
        return self.weighted

    def number_of_edges(self):
        if self.is_directed():
            return len(self._neighbors_list)
        return len(self._neighbors_list) / 2

    def number_of_nodes(self):
        return len(self._index_list) - 1

    def copy(self):
        new_lol_graph = LolGraph()
        new_lol_graph._index_list = self._index_list.copy()
        new_lol_graph._neighbors_list = self._neighbors_list.copy()
        new_lol_graph._weights_list = self._weights_list.copy()
        new_lol_graph._map_node_to_number = self._map_node_to_number.copy()
        new_lol_graph._map_number_to_node = self._map_number_to_node.copy()
        new_lol_graph.directed = self.directed
        new_lol_graph.weighted = self.weighted
        return new_lol_graph

    # outdegree
    def out_degree(self, node):
        number = self._map_node_to_number[node]
        idx = self._index_list[number]
        idx_end = self._index_list[number + 1]
        if self.is_weighted():
            weights_list = self._weights_list[idx: idx_end]
            return sum(weights_list)
        else:
            return idx_end - idx

    # Iterative Binary Search Function
    # It returns index of x in given array arr if present,
    # else returns -1
    def binary_search(self, arr, x):
        low = 0
        high = len(arr) - 1
        mid = 0

        while low <= high:
            mid = (high + low) // 2

            # Check if x is present at mid
            if arr[mid] < x:
                low = mid + 1

            # If x is greater, ignore left half
            elif arr[mid] > x:
                high = mid - 1

            # If x is smaller, ignore right half
            else:
                return mid

        # If we reach here, then the element was not present
        return -1

    def nodes_binary_search(self, arr, x):
        # num = self._map_node_to_number[x]
        # arr_num = [self._map_node_to_number[node] for node in arr]
        # num_found_index = self.binary_search(arr_num, num)
        # return num_found_index
        if x in arr:
            return arr.index(x)
        return -1

    # # indegree
    # def in_degree(self, node):
    #     sum = 0
    #     for node_from in self.nodes():
    #         if self.is_weighted():
    #             neighbors_list, weights_list = self.neighbors(node_from)
    #         else:
    #             neighbors_list = self.neighbors(node_from)
    #         x = self.nodes_binary_search(neighbors_list, node)
    #         if x != -1:
    #             if self.is_weighted():
    #                 sum += weights_list[x]
    #             else:
    #                 sum += 1
    #     return sum
    #
    # def predecessors(self, node):
    #     nodes_list = []
    #     for node_from in self.nodes():
    #         if self.is_edge_between_nodes(node_from, node):
    #             nodes_list.append(node_from)
    #     return nodes_list

    def nodes(self):
        return list(self._map_node_to_number.keys())

    def edges(self):
        return self.convert_back()

    def is_edge_between_nodes(self, node1, node2):
        number = self._map_node_to_number[node1]
        idx = self._index_list[number]
        idx_end = self._index_list[number + 1]
        return self._map_node_to_number[node2] in self._neighbors_list[idx: idx_end]
        # for neighbor in self._neighbors_list[idx: idx_end]:
        #     if node2 == self._map_number_to_node[neighbor]:
        #         return True
        # return False

    def size(self):
        if self.is_weighted() and not self.is_directed():
            edges = self.edges()
            size = 0
            for edge in edges:
                if edge[0] == edge[1]:
                    size += edge[2]
                else:
                    size += edge[2] / 2
            return size
        if self.is_weighted():
            return sum(self._weights_list)
        if not self.is_directed():
            return len(self._neighbors_list) / 2
        return len(self._neighbors_list)

    def get_edge_data(self, node1, node2, default=None):
        if self.is_weighted():
            number = self._map_node_to_number[node1]
            idx = self._index_list[number]
            idx_end = self._index_list[number + 1]
            node1_neighbors = self._neighbors_list[idx: idx_end]
            node2_index = self.binary_search(node1_neighbors, self._map_node_to_number[node2])
            if node2_index != -1:
            # if self._map_node_to_number[node2] in node1_neighbors:
            #     node2_index = node1_neighbors.index(self._map_node_to_number[node2])
                return {"weight": self._weights_list[idx + node2_index]}
            else:
                if default is not None:
                    return default
                else:
                    return {'weight': 0}
        else:
            return {'weight': 1}

    # input: csv file containing edges list, in the form of [[5,1],[2,3],[5,3],[4,5]]
    def convert_with_csv(self, files_name, header=True):
        self._map_node_to_number = OrderedDict()
        graph = []
        for i in range(len(files_name)):
            file = files_name[i]
            with open(file, "r") as csvfile:
                datareader = csv.reader(csvfile)
                if header:
                    next(datareader, None)  # skip the headers
                for edge in datareader:
                    edge[2] = float(edge[2])
                    graph.append(edge)
                csvfile.close()
        self.convert(graph)

    # input: np array of edges, in the form of np array [[5,1,0.1],[2,3,3],[5,3,0.2],[4,5,9]]
    def convert(self, graph):
        free = 0
        '''create dictionary to self._map_node_to_number from edges to our new numbering'''

        for edge in graph:
            if self._map_node_to_number.get(edge[0], None) is None:
                self._map_node_to_number[edge[0]] = free
                free += 1
            if self._map_node_to_number.get(edge[1], None) is None:
                self._map_node_to_number[edge[1]] = free
                free += 1

        '''create the opposite dictionary'''
        self._map_number_to_node = OrderedDict((y, x) for x, y in self._map_node_to_number.items())

        d = OrderedDict()
        '''starting to create the index list. Unordered is important'''
        for idx, edge in enumerate(graph):
            d[self._map_node_to_number[edge[0]]] = d.get(self._map_node_to_number[edge[0]], 0) + 1
            if not self.is_directed():
                if edge[0] != edge[1]:
                    d[self._map_node_to_number[edge[1]]] = d.get(self._map_node_to_number[edge[1]], 0) + 1
            elif self._map_node_to_number[edge[1]] not in d.keys():
                d[self._map_node_to_number[edge[1]]] = 0

        '''transfer the dictionary to list'''
        for j in range(1, len(d.keys()) + 1):
            self._index_list.append(self._index_list[j - 1] + d.get(j - 1, 0))

        '''create the second list'''
        if self.is_directed():
            self._neighbors_list = [-1] * len(graph)
            if self.is_weighted():
                self._weights_list = [0] * len(graph)
        else:
            self._neighbors_list = [-1] * len(graph) * 2
            if self.weighted:
                self._weights_list = [0] * len(graph) * 2

        space = OrderedDict((x, -1) for x in self._map_number_to_node.keys())
        for idx, edge in enumerate(graph):
            left = self._map_node_to_number[edge[0]]
            right = self._map_node_to_number[edge[1]]
            if self.is_weighted():
                weight = float(edge[2])

            if space[left] != -1:
                space[left] += 1
                i = space[left]
            else:
                i = self._index_list[left]
                space[left] = i
            self._neighbors_list[i] = right
            if self.weighted:
                self._weights_list[i] = weight

            if not self.is_directed() and left != right:
                if space[right] != -1:
                    space[right] += 1
                    i = space[right]
                else:
                    i = self._index_list[right]
                    space[right] = i
                self._neighbors_list[i] = left
                if self.is_weighted():
                    self._weights_list[i] = weight
        self._neighbors_list, self._weights_list = self.sort_all(index_list=self._index_list,
                                                                 neighbors_list=self._neighbors_list,
                                                                 weights_list=self._weights_list)

    # convert back to [[5,1,0.1],[2,3,3],[5,3,0.2],[4,5,9]] format using self dicts
    def convert_back(self):
        graph = []
        for number in range(len(self._index_list) - 1):
            node = self._map_number_to_node[number]
            index = self._index_list[number]
            while index < self._index_list[number + 1]:
                to_node = self._map_number_to_node[self._neighbors_list[index]]
                if self.is_weighted():
                    weight = self._weights_list[index]
                    edge = [node, to_node, weight]
                else:
                    edge = [node, to_node]
                graph.append(edge)
                index += 1
        return graph

    # sort the neighbors for each node
    def sort_all(self, index_list=None, neighbors_list=None, weights_list=None):
        for number in range(len(index_list) - 1):
            start = index_list[number]
            end = index_list[number + 1]
            neighbors_list[start: end], weights_list[start: end] = self.sort_neighbors(neighbors_list[start: end], weights_list[start: end])
        return neighbors_list, weights_list

    def sort_neighbors(self, neighbors_list=None, weights_list=None):
        if self.weighted:
            neighbors_weights = {neighbors_list[i]: weights_list[i] for i in range(len(neighbors_list))}
            neighbors_weights = OrderedDict(sorted(neighbors_weights.items()))
            return neighbors_weights.keys(), neighbors_weights.values()
        else:
            return sorted(neighbors_list), weights_list

    # get neighbors of specific node n
    def neighbors(self, node):
        number = self._map_node_to_number[node]
        idx = self._index_list[number]
        idx_end = self._index_list[number+1]
        neighbors_list = [0] * (idx_end-idx)
        if self.is_weighted():
            weights_list = self._weights_list[idx: idx_end]
        for i, neighbor in enumerate(self._neighbors_list[idx: idx_end]):
            neighbors_list[i] = self._map_number_to_node[neighbor]
        if self.is_weighted():
            return neighbors_list, weights_list
        else:
            return neighbors_list

    # get neighbors and weights for every node
    def graph_adjacency(self):
        graph_adjacency_dict = dict()
        for number in range(len(self._index_list) - 1):
            node = self._map_number_to_node[number]
            if node not in graph_adjacency_dict.keys():
                graph_adjacency_dict[node] = dict()

            if self.is_weighted():
                neighbors_list, weights_list = self.neighbors(node)
                for neighbor, weight in zip(neighbors_list, weights_list):
                    graph_adjacency_dict[node][neighbor] = {'weight': weight}
            else:
                neighbors_list = self.neighbors(node)
                for neighbor in neighbors_list:
                    graph_adjacency_dict[node][neighbor] = {'weight': 1}
        return graph_adjacency_dict

    # Add new edges to the graph, but with limitations:
    # For example, if the edge is [w,v] and the graph is diracted, w can't be an existing node.
    # if the graph is not diracted, w and v can't be existing nodes.
    def add_edges(self, edges):
        last_index = self._index_list[-1]
        index_list = [last_index]
        neighbors_list = []
        weights_list = []

        nodes_amount = len(self._map_node_to_number.keys())
        free = nodes_amount
        map_node_to_number = OrderedDict()

        '''create dictionary to self._map_node_to_number from edges to our new numbering'''
        for edge in edges:
            if (self._map_node_to_number.get(edge[0], None) is not None or
                (not self.directed and self._map_node_to_number.get(edge[1], None) is not None)):
                print("Error: add_edges can't add edges from an existing node")
                return
            if map_node_to_number.get(edge[0], None) is None:
                map_node_to_number[edge[0]] = free
                free += 1
            if map_node_to_number.get(edge[1], None) is None and self._map_node_to_number.get(edge[1], None) is None:
                map_node_to_number[edge[1]] = free
                free += 1
        '''create the opposite dictionary'''
        map_number_to_node = OrderedDict((y, x) for x, y in map_node_to_number.items())

        """update the original dicts"""
        self._map_node_to_number.update(map_node_to_number)
        self._map_number_to_node.update(map_number_to_node)

        d = OrderedDict()
        '''starting to create the index list. Unordered is important'''
        for idx, edge in enumerate(edges):
            d[self._map_node_to_number[edge[0]]] = d.get(self._map_node_to_number[edge[0]], 0) + 1
            if not self.directed:
                d[self._map_node_to_number[edge[1]]] = d.get(self._map_node_to_number[edge[1]], 0) + 1
            elif self._map_node_to_number[edge[1]] not in d.keys() and edge[1] in map_node_to_number.keys():
                d[self._map_node_to_number[edge[1]]] = 0

        '''transfer the dictionary to list'''
        for j in range(1, len(d.keys()) + 1):
            index_list.append(index_list[j - 1] + d.get(nodes_amount + j - 1, 0))

        '''create the second list'''
        if self.is_directed():
            neighbors_list = [-1] * len(edges)
        else:
            neighbors_list = [-1] * len(edges) * 2
        if self.is_weighted():
            weights_list = [0] * len(edges)

        space = OrderedDict((x, -1) for x in self._map_number_to_node.keys())
        for idx, edge in enumerate(edges):
            left = self._map_node_to_number[edge[0]]
            right = self._map_node_to_number[edge[1]]
            if self.is_weighted():
                weight = float(edge[2])

            if space[left] != -1:
                space[left] += 1
                i = space[left]
            else:
                i = index_list[left - nodes_amount] - last_index
                space[left] = i
            neighbors_list[i] = right
            if self.is_weighted():
                weights_list[i] = weight

            if not self.is_directed():
                if space[right] != -1:
                    space[right] += 1
                    i = space[right]
                else:
                    i = index_list[right - len(self._index_list) - 1]
                    space[right] = i
                neighbors_list[i] = left
                if self.is_weighted():
                    weights_list[i] = weight

        """sort the neighbors"""
        neighbors_list, weights_list = self.sort_all(index_list=index_list,
                                                     neighbors_list=neighbors_list,
                                                     weights_list=weights_list)

        """update the original dicts"""
        self._index_list += index_list[1:]
        self._neighbors_list += neighbors_list
        self._weights_list += weights_list


    # swap between two edges, but with limitations.
    # For example, the graph can only be directed.
    # The swap can only be in the form of: from edge [a,b] to edge [a, c].
    def swap_edge(self, edge_to_delete, edge_to_add):
        if not self.directed or (self.directed and edge_to_delete[0] != edge_to_add[0]):
            print("Error: swap_edge can only be only on directed graph and from the same node")
            return
        if not self.is_edge_between_nodes(edge_to_delete[0], edge_to_delete[1]):
            print("Error: edge_to_delete was not found")
            return
        number = self._map_node_to_number[edge_to_add[0]]
        to_number = self._map_node_to_number[edge_to_add[1]]
        from_number = self._map_node_to_number[edge_to_delete[1]]
        start_index_of_source = self._index_list[number]
        end_index_of_source = self._index_list[number+1]

        neighbors_list = self._neighbors_list[start_index_of_source: end_index_of_source]
        neighbor_index = self.binary_search(neighbors_list, from_number)
        neighbors_list[neighbor_index] = to_number

        if self.is_weighted():
            weights_list = self._weights_list[start_index_of_source: end_index_of_source]
            weights_list[neighbor_index] = edge_to_add[2]
            neighbor_index, weights_list = self.sort_neighbors(neighbors_list, weights_list)
            self._weights_list[start_index_of_source: end_index_of_source] = weights_list
            # index_of_replacement = start_index_of_source + neighbor_index
            # neighbors_list[neighbor_index] = to_number
            # weights_list[neighbor_index] = edge_to_add[2]
        else:
            neighbor_index, weights_list = self.sort_neighbors(neighbors_list)
        self._neighbors_list[start_index_of_source: end_index_of_source] = neighbor_index

    # get memory usage of the lol object
    def get_memory(self):
        return sum([sys.getsizeof(var) for var in [self._index_list, self._neighbors_list, self._weights_list,
                                                   self._map_node_to_number, self._map_number_to_node,
                                                   self.directed, self.weighted]])

    def create_sub( self, node):

        node1 = self._map_node_to_number[node]
        ns = []
        all = set()
        nodes = self._neighbors_list[self._index_list[node1]:  self._index_list[node1 + 1]]
        for n in nodes:
            node22 =  self._map_number_to_node[n]
            if (node22, node) not in ns:
                ns.append((node, node22))
                all.update([node, node22])
            node2 = self._map_node_to_number[node22]
            nodes2 = self._neighbors_list[self._index_list[node2]:  self._index_list[node2 + 1]]
            for n2 in nodes2:
                # if len(all) > 2500:
                #     break
                node33 =  self._map_number_to_node[n2]
                # if (node33, node22) not in ns and (node22, node33) not in ns:
                ns.append((node22, node33))
                all.add(node33)
                all.add(node22)
                node3 = self._map_node_to_number[node33]
                nodes3 = self._neighbors_list[self._index_list[node3]:  self._index_list[node3 + 1]]
                for n3 in nodes3:
                    # if len(all) > 2500:
                    #     break
                    node4 = self._map_number_to_node[n3]
                    if node4 in all:# and (node33, node4) not in ns and (node4, node33) not in ns:
                        ns.append((node4, node33))
                        all.update([node4, node33])
        # print(len(all), len(ns))

        return nx.Graph(ns)
Esempio n. 41
0
class FontConfig(object):
    def __init__(self):
        self._fontconfig = self._load_fontconfig_library()
        self._search_cache = OrderedDict()
        self._cache_size = 20

    def dispose(self):
        while len(self._search_cache) > 0:
            self._search_cache.popitem().dispose()

        self._fontconfig.FcFini()
        self._fontconfig = None

    def create_search_pattern(self):
        return FontConfigSearchPattern(self._fontconfig)

    def find_font(self, name, size=12, bold=False, italic=False):
        result = self._get_from_search_cache(name, size, bold, italic)
        if result:
            return result

        search_pattern = self.create_search_pattern()
        search_pattern.name = name
        search_pattern.size = size
        search_pattern.bold = bold
        search_pattern.italic = italic

        result = search_pattern.match()
        self._add_to_search_cache(search_pattern, result)
        return result

    def char_index(self, face, character):
        return self._fontconfig.FcFreeTypeCharIndex(byref(face),
                                                    ord(character))

    def _add_to_search_cache(self, search_pattern, result_pattern):
        self._search_cache[(search_pattern.name, search_pattern.size,
                            search_pattern.bold,
                            search_pattern.italic)] = result_pattern
        if len(self._search_cache) > self._cache_size:
            self._search_cache.popitem(last=False).dispose()

    def _get_from_search_cache(self, name, size, bold, italic):
        result = self._search_cache.get((name, size, bold, italic), None)

        if result and result.is_valid:
            return result
        else:
            return None

    @staticmethod
    def _load_fontconfig_library():
        fontconfig = pyglet.lib.load_library('fontconfig')
        fontconfig.FcInit()

        fontconfig.FcPatternBuild.restype = c_void_p
        fontconfig.FcPatternCreate.restype = c_void_p
        fontconfig.FcFontMatch.restype = c_void_p
        fontconfig.FcFreeTypeCharIndex.restype = c_uint

        fontconfig.FcPatternAddDouble.argtypes = [c_void_p, c_char_p, c_double]
        fontconfig.FcPatternAddInteger.argtypes = [c_void_p, c_char_p, c_int]
        fontconfig.FcPatternAddString.argtypes = [c_void_p, c_char_p, c_char_p]
        fontconfig.FcConfigSubstitute.argtypes = [c_void_p, c_void_p, c_int]
        fontconfig.FcDefaultSubstitute.argtypes = [c_void_p]
        fontconfig.FcFontMatch.argtypes = [c_void_p, c_void_p, c_void_p]
        fontconfig.FcPatternDestroy.argtypes = [c_void_p]

        fontconfig.FcPatternGetFTFace.argtypes = [
            c_void_p, c_char_p, c_int, c_void_p
        ]
        fontconfig.FcPatternGet.argtypes = [
            c_void_p, c_char_p, c_int, c_void_p
        ]

        return fontconfig
Esempio n. 42
0
    def convert(self, graph):
        free = 0
        '''create dictionary to self._map_node_to_number from edges to our new numbering'''

        for edge in graph:
            if self._map_node_to_number.get(edge[0], None) is None:
                self._map_node_to_number[edge[0]] = free
                free += 1
            if self._map_node_to_number.get(edge[1], None) is None:
                self._map_node_to_number[edge[1]] = free
                free += 1

        '''create the opposite dictionary'''
        self._map_number_to_node = OrderedDict((y, x) for x, y in self._map_node_to_number.items())

        d = OrderedDict()
        '''starting to create the index list. Unordered is important'''
        for idx, edge in enumerate(graph):
            d[self._map_node_to_number[edge[0]]] = d.get(self._map_node_to_number[edge[0]], 0) + 1
            if not self.is_directed():
                if edge[0] != edge[1]:
                    d[self._map_node_to_number[edge[1]]] = d.get(self._map_node_to_number[edge[1]], 0) + 1
            elif self._map_node_to_number[edge[1]] not in d.keys():
                d[self._map_node_to_number[edge[1]]] = 0

        '''transfer the dictionary to list'''
        for j in range(1, len(d.keys()) + 1):
            self._index_list.append(self._index_list[j - 1] + d.get(j - 1, 0))

        '''create the second list'''
        if self.is_directed():
            self._neighbors_list = [-1] * len(graph)
            if self.is_weighted():
                self._weights_list = [0] * len(graph)
        else:
            self._neighbors_list = [-1] * len(graph) * 2
            if self.weighted:
                self._weights_list = [0] * len(graph) * 2

        space = OrderedDict((x, -1) for x in self._map_number_to_node.keys())
        for idx, edge in enumerate(graph):
            left = self._map_node_to_number[edge[0]]
            right = self._map_node_to_number[edge[1]]
            if self.is_weighted():
                weight = float(edge[2])

            if space[left] != -1:
                space[left] += 1
                i = space[left]
            else:
                i = self._index_list[left]
                space[left] = i
            self._neighbors_list[i] = right
            if self.weighted:
                self._weights_list[i] = weight

            if not self.is_directed() and left != right:
                if space[right] != -1:
                    space[right] += 1
                    i = space[right]
                else:
                    i = self._index_list[right]
                    space[right] = i
                self._neighbors_list[i] = left
                if self.is_weighted():
                    self._weights_list[i] = weight
        self._neighbors_list, self._weights_list = self.sort_all(index_list=self._index_list,
                                                                 neighbors_list=self._neighbors_list,
                                                                 weights_list=self._weights_list)
Esempio n. 43
0
def make_editor_session(pad,
                        path,
                        is_attachment=None,
                        alt=PRIMARY_ALT,
                        datamodel=None):
    """Creates an editor session for the given path object."""
    if alt != PRIMARY_ALT and not pad.db.config.is_valid_alternative(alt):
        raise BadEdit("Attempted to edit an invalid alternative (%s)" % alt)

    raw_data = pad.db.load_raw_data(path,
                                    cls=OrderedDict,
                                    alt=alt,
                                    fallback=False)
    raw_data_fallback = None
    if alt != PRIMARY_ALT:
        raw_data_fallback = pad.db.load_raw_data(path, cls=OrderedDict)
        all_data = OrderedDict()
        all_data.update(raw_data_fallback or ())
        all_data.update(raw_data or ())
    else:
        all_data = raw_data

    id = posixpath.basename(path)
    if not is_valid_id(id):
        raise BadEdit("Invalid ID")

    record = None
    exists = raw_data is not None or raw_data_fallback is not None
    if raw_data is None:
        raw_data = OrderedDict()

    if is_attachment is None:
        if not exists:
            is_attachment = False
        else:
            is_attachment = bool(all_data.get("_attachment_for"))
    elif bool(all_data.get("_attachment_for")) != is_attachment:
        raise BadEdit("The attachment flag passed is conflicting with the "
                      "record's attachment flag.")

    if exists:
        # XXX: what about changing the datamodel after the fact?
        if datamodel is not None:
            raise BadEdit("When editing an existing record, a datamodel "
                          "must not be provided.")
        datamodel = pad.db.get_datamodel_for_raw_data(all_data, pad)
    else:
        if datamodel is None:
            datamodel = pad.db.get_implied_datamodel(path, is_attachment, pad)
        elif isinstance(datamodel, str):
            datamodel = pad.db.datamodels[datamodel]

    if exists:
        record = pad.instance_from_data(dict(all_data), datamodel)

    for key in implied_keys:
        raw_data.pop(key, None)
        if raw_data_fallback:
            raw_data_fallback.pop(key, None)

    return EditorSession(
        pad,
        id,
        str(path),
        raw_data,
        raw_data_fallback,
        datamodel,
        record,
        exists,
        is_attachment,
        alt,
    )
Esempio n. 44
0
class Item(CompHashable, Geometry2D):
    """Each item can be registered with multiple scenes. Data is owned by the Item instance,
    plot settings are owned by the scenes.
    """
    def __init__(self, scene=None, name=None, **kwargs):
        self._scenes = OrderedDict()
        self._sceneindex = -1
        self.name = name

        if scene is not None:
            self.register(scene)

        Item._updatedata(self, **kwargs)

    def _updatedata(self, axis0name="Dim0", axis1name="Dim1", **settings):
        self.axis0name = axis0name
        self.axis1name = axis1name
        try:
            if settings:
                self.set_settings(settings)
        except RuntimeError:
            logger.warning("Item settings are not applied (provide a scene)")

    def updatedata(self, **kwargs):
        Item._updatedata(self, **kwargs)

    def register(self, scene):
        scene.register(self)

    def useaxesnames(self):
        self.scene.axlabels = [self.axis0name, self.axis1name]

    def defaultsettings(self):
        return {}

    def set_settings(self, settings):
        settings2 = self.scene.getitemsettings(self)
        for k, v in settings.items():
            settings2[k] = v

    def get_settings(self, keys):
        settings = self.scene.getitemsettings(self)
        return {k: settings[k] for k in keys}

    def set_setting(self, attr, value):
        settings = self.scene.getitemsettings(self)
        settings[attr] = value

    def get_setting(self, key):
        settings = self.scene.getitemsettings(self)
        return settings[key]

    def selectscene(self, s):
        try:
            self._sceneindex = list(self._scenes.keys()).index(s)
        except:
            raise RuntimeError(
                "This object is not registered with scene {}".format(s))

    @property
    def scene(self):
        if len(self._scenes) == 0:
            raise RuntimeError("This object is not registered with any scene")
        return list(self._scenes.keys())[self._sceneindex]

    def addscene(self, s):
        if s not in self._scenes:
            self._scenes[s] = OrderedDict()

    @property
    def sceneitems(self):
        """My items in the active scene"""
        return self._scenes.get(self.scene, OrderedDict())

    def removefromscene(self):
        """Remove myself from the active scene"""
        items = self.sceneitems
        for item in items:
            if item is not None:
                items[item].remove()
        self._scenes[self.scene] = OrderedDict()

    def refreshscene(self, newitems):
        """Update the active scene with new items"""
        olditems = self.sceneitems

        for name in olditems:
            if name in newitems:
                if newitems[name] != olditems[name]:  # TODO: use "is not"?
                    olditems[name].remove()
            else:
                olditems[name].remove()
        self._scenes[self.scene] = newitems

    def datarange(self, dataaxis):
        raise NotImplementedError("Item is an abstract class")

    @property
    def datalimx(self):
        return self.datarange(self.dataaxisx)

    @property
    def datalimy(self):
        return self.datarange(self.dataaxisy)

    @property
    def transposed(self):
        return self.scene.transposed
Esempio n. 45
0
class ThemeFilesFinder(BaseFinder):
    """
    A static files finder that looks in the directory of each theme as
    specified in the source_dir attribute.
    """
    storage_class = ThemeStorage
    source_dir = 'static'

    def __init__(self, *args, **kwargs):
        # The list of themes that are handled
        self.themes = []
        # Mapping of theme names to storage instances
        self.storages = OrderedDict()

        themes = get_themes()
        for theme in themes:
            theme_storage = self.storage_class(
                os.path.join(theme.path, self.source_dir),
                prefix=theme.theme_dir_name,
            )

            self.storages[theme.theme_dir_name] = theme_storage
            if theme.theme_dir_name not in self.themes:
                self.themes.append(theme.theme_dir_name)

        super(ThemeFilesFinder, self).__init__(*args, **kwargs)

    def list(self, ignore_patterns):
        """
        List all files in all theme storages.
        """
        for storage in six.itervalues(self.storages):
            if storage.exists(''):  # check if storage location exists
                for path in utils.get_files(storage, ignore_patterns):
                    yield path, storage

    def find(self, path, all=False):  # pylint: disable=redefined-builtin
        """
        Looks for files in the theme directories.
        """
        matches = []
        theme_dir = path.split("/", 1)[0]

        themes = {t.theme_dir_name: t for t in get_themes()}
        # if path is prefixed by theme name then search in the corresponding storage other wise search all storages.
        if theme_dir in themes:
            theme = themes[theme_dir]
            path = "/".join(path.split("/")[1:])
            match = self.find_in_theme(theme.theme_dir_name, path)
            if match:
                if not all:
                    return match
                matches.append(match)
        return matches

    def find_in_theme(self, theme, path):
        """
        Find a requested static file in an theme's static locations.
        """
        storage = self.storages.get(theme, None)
        if storage:
            # only try to find a file if the source dir actually exists
            if storage.exists(path):
                matched_path = storage.path(path)
                if matched_path:
                    return matched_path
class RendererSVG(RendererBase):
    def __init__(self, width, height, svgwriter, basename=None, image_dpi=72):
        self.width = width
        self.height = height
        self.writer = XMLWriter(svgwriter)
        self.image_dpi = image_dpi  # actual dpi at which we rasterize stuff

        self._groupd = {}
        self.basename = basename
        self._image_counter = itertools.count()
        self._clipd = OrderedDict()
        self._markers = {}
        self._path_collection_id = 0
        self._hatchd = OrderedDict()
        self._has_gouraud = False
        self._n_gradients = 0
        self._fonts = OrderedDict()
        self.mathtext_parser = MathTextParser('SVG')

        RendererBase.__init__(self)
        self._glyph_map = dict()
        str_height = short_float_fmt(height)
        str_width = short_float_fmt(width)
        svgwriter.write(svgProlog)
        self._start_id = self.writer.start(
            'svg',
            width='%spt' % str_width,
            height='%spt' % str_height,
            viewBox='0 0 %s %s' % (str_width, str_height),
            xmlns="http://www.w3.org/2000/svg",
            version="1.1",
            attrib={'xmlns:xlink': "http://www.w3.org/1999/xlink"})
        self._write_default_style()

    def finalize(self):
        self._write_clips()
        self._write_hatches()
        self.writer.close(self._start_id)
        self.writer.flush()

    def _write_default_style(self):
        writer = self.writer
        default_style = generate_css({
            'stroke-linejoin': 'round',
            'stroke-linecap': 'butt'
        })
        writer.start('defs')
        writer.start('style', type='text/css')
        writer.data('*{%s}\n' % default_style)
        writer.end('style')
        writer.end('defs')

    def _make_id(self, type, content):
        salt = rcParams['svg.hashsalt']
        if salt is None:
            salt = str(uuid.uuid4())
        m = hashlib.md5()
        m.update(salt.encode('utf8'))
        m.update(str(content).encode('utf8'))
        return '%s%s' % (type, m.hexdigest()[:10])

    def _make_flip_transform(self, transform):
        return (transform +
                Affine2D().scale(1.0, -1.0).translate(0.0, self.height))

    def _get_font(self, prop):
        fname = findfont(prop)
        font = get_font(fname)
        font.clear()
        size = prop.get_size_in_points()
        font.set_size(size, 72.0)
        return font

    def _get_hatch(self, gc, rgbFace):
        """
        Create a new hatch pattern
        """
        if rgbFace is not None:
            rgbFace = tuple(rgbFace)
        edge = gc.get_hatch_color()
        if edge is not None:
            edge = tuple(edge)
        dictkey = (gc.get_hatch(), rgbFace, edge)
        oid = self._hatchd.get(dictkey)
        if oid is None:
            oid = self._make_id('h', dictkey)
            self._hatchd[dictkey] = ((gc.get_hatch_path(), rgbFace, edge), oid)
        else:
            _, oid = oid
        return oid

    def _write_hatches(self):
        if not len(self._hatchd):
            return
        HATCH_SIZE = 72
        writer = self.writer
        writer.start('defs')
        for (path, face, stroke), oid in self._hatchd.values():
            writer.start('pattern',
                         id=oid,
                         patternUnits="userSpaceOnUse",
                         x="0",
                         y="0",
                         width=str(HATCH_SIZE),
                         height=str(HATCH_SIZE))
            path_data = self._convert_path(path,
                                           Affine2D().scale(HATCH_SIZE).scale(
                                               1.0,
                                               -1.0).translate(0, HATCH_SIZE),
                                           simplify=False)
            if face is None:
                fill = 'none'
            else:
                fill = rgb2hex(face)
            writer.element('rect',
                           x="0",
                           y="0",
                           width=str(HATCH_SIZE + 1),
                           height=str(HATCH_SIZE + 1),
                           fill=fill)
            writer.element('path',
                           d=path_data,
                           style=generate_css({
                               'fill':
                               rgb2hex(stroke),
                               'stroke':
                               rgb2hex(stroke),
                               'stroke-width':
                               str(rcParams['hatch.linewidth']),
                               'stroke-linecap':
                               'butt',
                               'stroke-linejoin':
                               'miter'
                           }))
            writer.end('pattern')
        writer.end('defs')

    def _get_style_dict(self, gc, rgbFace):
        """Generate a style string from the GraphicsContext and rgbFace."""
        attrib = {}

        forced_alpha = gc.get_forced_alpha()

        if gc.get_hatch() is not None:
            attrib['fill'] = "url(#%s)" % self._get_hatch(gc, rgbFace)
            if (rgbFace is not None and len(rgbFace) == 4 and rgbFace[3] != 1.0
                    and not forced_alpha):
                attrib['fill-opacity'] = short_float_fmt(rgbFace[3])
        else:
            if rgbFace is None:
                attrib['fill'] = 'none'
            else:
                if tuple(rgbFace[:3]) != (0, 0, 0):
                    attrib['fill'] = rgb2hex(rgbFace)
                if (len(rgbFace) == 4 and rgbFace[3] != 1.0
                        and not forced_alpha):
                    attrib['fill-opacity'] = short_float_fmt(rgbFace[3])

        if forced_alpha and gc.get_alpha() != 1.0:
            attrib['opacity'] = short_float_fmt(gc.get_alpha())

        offset, seq = gc.get_dashes()
        if seq is not None:
            attrib['stroke-dasharray'] = ','.join(
                short_float_fmt(val) for val in seq)
            attrib['stroke-dashoffset'] = short_float_fmt(float(offset))

        linewidth = gc.get_linewidth()
        if linewidth:
            rgb = gc.get_rgb()
            attrib['stroke'] = rgb2hex(rgb)
            if not forced_alpha and rgb[3] != 1.0:
                attrib['stroke-opacity'] = short_float_fmt(rgb[3])
            if linewidth != 1.0:
                attrib['stroke-width'] = short_float_fmt(linewidth)
            if gc.get_joinstyle() != 'round':
                attrib['stroke-linejoin'] = gc.get_joinstyle()
            if gc.get_capstyle() != 'butt':
                attrib['stroke-linecap'] = _capstyle_d[gc.get_capstyle()]

        return attrib

    def _get_style(self, gc, rgbFace):
        return generate_css(self._get_style_dict(gc, rgbFace))

    def _get_clip(self, gc):
        cliprect = gc.get_clip_rectangle()
        clippath, clippath_trans = gc.get_clip_path()
        if clippath is not None:
            clippath_trans = self._make_flip_transform(clippath_trans)
            dictkey = (id(clippath), str(clippath_trans))
        elif cliprect is not None:
            x, y, w, h = cliprect.bounds
            y = self.height - (y + h)
            dictkey = (x, y, w, h)
        else:
            return None

        clip = self._clipd.get(dictkey)
        if clip is None:
            oid = self._make_id('p', dictkey)
            if clippath is not None:
                self._clipd[dictkey] = ((clippath, clippath_trans), oid)
            else:
                self._clipd[dictkey] = (dictkey, oid)
        else:
            clip, oid = clip
        return oid

    def _write_clips(self):
        if not len(self._clipd):
            return
        writer = self.writer
        writer.start('defs')
        for clip, oid in self._clipd.values():
            writer.start('clipPath', id=oid)
            if len(clip) == 2:
                clippath, clippath_trans = clip
                path_data = self._convert_path(clippath,
                                               clippath_trans,
                                               simplify=False)
                writer.element('path', d=path_data)
            else:
                x, y, w, h = clip
                writer.element('rect',
                               x=short_float_fmt(x),
                               y=short_float_fmt(y),
                               width=short_float_fmt(w),
                               height=short_float_fmt(h))
            writer.end('clipPath')
        writer.end('defs')

    def open_group(self, s, gid=None):
        # docstring inherited
        if gid:
            self.writer.start('g', id=gid)
        else:
            self._groupd[s] = self._groupd.get(s, 0) + 1
            self.writer.start('g', id="%s_%d" % (s, self._groupd[s]))

    def close_group(self, s):
        # docstring inherited
        self.writer.end('g')

    def option_image_nocomposite(self):
        # docstring inherited
        return not rcParams['image.composite_image']

    def _convert_path(self,
                      path,
                      transform=None,
                      clip=None,
                      simplify=None,
                      sketch=None):
        if clip:
            clip = (0.0, 0.0, self.width, self.height)
        else:
            clip = None
        return _path.convert_to_string(path, transform, clip, simplify, sketch,
                                       6, [b'M', b'L', b'Q', b'C', b'z'],
                                       False).decode('ascii')

    def draw_path(self, gc, path, transform, rgbFace=None):
        # docstring inherited
        trans_and_flip = self._make_flip_transform(transform)
        clip = (rgbFace is None and gc.get_hatch_path() is None)
        simplify = path.should_simplify and clip
        path_data = self._convert_path(path,
                                       trans_and_flip,
                                       clip=clip,
                                       simplify=simplify,
                                       sketch=gc.get_sketch_params())

        attrib = {}
        attrib['style'] = self._get_style(gc, rgbFace)

        clipid = self._get_clip(gc)
        if clipid is not None:
            attrib['clip-path'] = 'url(#%s)' % clipid

        if gc.get_url() is not None:
            self.writer.start('a', {'xlink:href': gc.get_url()})
        self.writer.element('path', d=path_data, attrib=attrib)
        if gc.get_url() is not None:
            self.writer.end('a')

    def draw_markers(self,
                     gc,
                     marker_path,
                     marker_trans,
                     path,
                     trans,
                     rgbFace=None):
        # docstring inherited

        if not len(path.vertices):
            return

        writer = self.writer
        path_data = self._convert_path(marker_path,
                                       marker_trans +
                                       Affine2D().scale(1.0, -1.0),
                                       simplify=False)
        style = self._get_style_dict(gc, rgbFace)
        dictkey = (path_data, generate_css(style))
        oid = self._markers.get(dictkey)
        style = generate_css(
            {k: v
             for k, v in style.items() if k.startswith('stroke')})

        if oid is None:
            oid = self._make_id('m', dictkey)
            writer.start('defs')
            writer.element('path', id=oid, d=path_data, style=style)
            writer.end('defs')
            self._markers[dictkey] = oid

        attrib = {}
        clipid = self._get_clip(gc)
        if clipid is not None:
            attrib['clip-path'] = 'url(#%s)' % clipid
        writer.start('g', attrib=attrib)

        trans_and_flip = self._make_flip_transform(trans)
        attrib = {'xlink:href': '#%s' % oid}
        clip = (0, 0, self.width * 72, self.height * 72)
        for vertices, code in path.iter_segments(trans_and_flip,
                                                 clip=clip,
                                                 simplify=False):
            if len(vertices):
                x, y = vertices[-2:]
                attrib['x'] = short_float_fmt(x)
                attrib['y'] = short_float_fmt(y)
                attrib['style'] = self._get_style(gc, rgbFace)
                writer.element('use', attrib=attrib)
        writer.end('g')

    def draw_path_collection(self, gc, master_transform, paths, all_transforms,
                             offsets, offsetTrans, facecolors, edgecolors,
                             linewidths, linestyles, antialiaseds, urls,
                             offset_position):
        # Is the optimization worth it? Rough calculation:
        # cost of emitting a path in-line is
        #    (len_path + 5) * uses_per_path
        # cost of definition+use is
        #    (len_path + 3) + 9 * uses_per_path
        len_path = len(paths[0].vertices) if len(paths) > 0 else 0
        uses_per_path = self._iter_collection_uses_per_path(
            paths, all_transforms, offsets, facecolors, edgecolors)
        should_do_optimization = \
            len_path + 9 * uses_per_path + 3 < (len_path + 5) * uses_per_path
        if not should_do_optimization:
            return RendererBase.draw_path_collection(
                self, gc, master_transform, paths, all_transforms, offsets,
                offsetTrans, facecolors, edgecolors, linewidths, linestyles,
                antialiaseds, urls, offset_position)

        writer = self.writer
        path_codes = []
        writer.start('defs')
        for i, (path, transform) in enumerate(
                self._iter_collection_raw_paths(master_transform, paths,
                                                all_transforms)):
            transform = Affine2D(transform.get_matrix()).scale(1.0, -1.0)
            d = self._convert_path(path, transform, simplify=False)
            oid = 'C%x_%x_%s' % (self._path_collection_id, i,
                                 self._make_id('', d))
            writer.element('path', id=oid, d=d)
            path_codes.append(oid)
        writer.end('defs')

        for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
                gc, master_transform, all_transforms, path_codes, offsets,
                offsetTrans, facecolors, edgecolors, linewidths, linestyles,
                antialiaseds, urls, offset_position):
            clipid = self._get_clip(gc0)
            url = gc0.get_url()
            if url is not None:
                writer.start('a', attrib={'xlink:href': url})
            if clipid is not None:
                writer.start('g', attrib={'clip-path': 'url(#%s)' % clipid})
            attrib = {
                'xlink:href': '#%s' % path_id,
                'x': short_float_fmt(xo),
                'y': short_float_fmt(self.height - yo),
                'style': self._get_style(gc0, rgbFace)
            }
            writer.element('use', attrib=attrib)
            if clipid is not None:
                writer.end('g')
            if url is not None:
                writer.end('a')

        self._path_collection_id += 1

    def draw_gouraud_triangle(self, gc, points, colors, trans):
        # This uses a method described here:
        #
        #   http://www.svgopen.org/2005/papers/Converting3DFaceToSVG/index.html
        #
        # that uses three overlapping linear gradients to simulate a
        # Gouraud triangle.  Each gradient goes from fully opaque in
        # one corner to fully transparent along the opposite edge.
        # The line between the stop points is perpendicular to the
        # opposite edge.  Underlying these three gradients is a solid
        # triangle whose color is the average of all three points.

        writer = self.writer
        if not self._has_gouraud:
            self._has_gouraud = True
            writer.start('filter', id='colorAdd')
            writer.element('feComposite',
                           attrib={'in': 'SourceGraphic'},
                           in2='BackgroundImage',
                           operator='arithmetic',
                           k2="1",
                           k3="1")
            writer.end('filter')
            # feColorMatrix filter to correct opacity
            writer.start('filter', id='colorMat')
            writer.element('feColorMatrix',
                           attrib={'type': 'matrix'},
                           values='1 0 0 0 0 \n0 1 0 0 0 \n0 0 1 0 0' +
                           ' \n1 1 1 1 0 \n0 0 0 0 1 ')
            writer.end('filter')

        avg_color = np.sum(colors[:, :], axis=0) / 3.0
        # Just skip fully-transparent triangles
        if avg_color[-1] == 0.0:
            return

        trans_and_flip = self._make_flip_transform(trans)
        tpoints = trans_and_flip.transform(points)

        writer.start('defs')
        for i in range(3):
            x1, y1 = tpoints[i]
            x2, y2 = tpoints[(i + 1) % 3]
            x3, y3 = tpoints[(i + 2) % 3]
            c = colors[i][:]

            if x2 == x3:
                xb = x2
                yb = y1
            elif y2 == y3:
                xb = x1
                yb = y2
            else:
                m1 = (y2 - y3) / (x2 - x3)
                b1 = y2 - (m1 * x2)
                m2 = -(1.0 / m1)
                b2 = y1 - (m2 * x1)
                xb = (-b1 + b2) / (m1 - m2)
                yb = m2 * xb + b2

            writer.start('linearGradient',
                         id="GR%x_%d" % (self._n_gradients, i),
                         gradientUnits="userSpaceOnUse",
                         x1=short_float_fmt(x1),
                         y1=short_float_fmt(y1),
                         x2=short_float_fmt(xb),
                         y2=short_float_fmt(yb))
            writer.element('stop',
                           offset='1',
                           style=generate_css({
                               'stop-color':
                               rgb2hex(avg_color),
                               'stop-opacity':
                               short_float_fmt(c[-1])
                           }))
            writer.element('stop',
                           offset='0',
                           style=generate_css({
                               'stop-color': rgb2hex(c),
                               'stop-opacity': "0"
                           }))

            writer.end('linearGradient')

        writer.end('defs')

        # triangle formation using "path"
        dpath = "M " + short_float_fmt(x1) + ',' + short_float_fmt(y1)
        dpath += " L " + short_float_fmt(x2) + ',' + short_float_fmt(y2)
        dpath += " " + short_float_fmt(x3) + ',' + short_float_fmt(y3) + " Z"

        writer.element('path',
                       attrib={
                           'd': dpath,
                           'fill': rgb2hex(avg_color),
                           'fill-opacity': '1',
                           'shape-rendering': "crispEdges"
                       })

        writer.start('g',
                     attrib={
                         'stroke': "none",
                         'stroke-width': "0",
                         'shape-rendering': "crispEdges",
                         'filter': "url(#colorMat)"
                     })

        writer.element('path',
                       attrib={
                           'd': dpath,
                           'fill': 'url(#GR%x_0)' % self._n_gradients,
                           'shape-rendering': "crispEdges"
                       })

        writer.element('path',
                       attrib={
                           'd': dpath,
                           'fill': 'url(#GR%x_1)' % self._n_gradients,
                           'filter': 'url(#colorAdd)',
                           'shape-rendering': "crispEdges"
                       })

        writer.element('path',
                       attrib={
                           'd': dpath,
                           'fill': 'url(#GR%x_2)' % self._n_gradients,
                           'filter': 'url(#colorAdd)',
                           'shape-rendering': "crispEdges"
                       })

        writer.end('g')

        self._n_gradients += 1

    def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
                               transform):
        attrib = {}
        clipid = self._get_clip(gc)
        if clipid is not None:
            attrib['clip-path'] = 'url(#%s)' % clipid

        self.writer.start('g', attrib=attrib)

        transform = transform.frozen()
        for tri, col in zip(triangles_array, colors_array):
            self.draw_gouraud_triangle(gc, tri, col, transform)

        self.writer.end('g')

    def option_scale_image(self):
        # docstring inherited
        return True

    def get_image_magnification(self):
        return self.image_dpi / 72.0

    def draw_image(self, gc, x, y, im, transform=None):
        # docstring inherited

        h, w = im.shape[:2]

        if w == 0 or h == 0:
            return

        attrib = {}
        clipid = self._get_clip(gc)
        if clipid is not None:
            # Can't apply clip-path directly to the image because the
            # image has a transformation, which would also be applied
            # to the clip-path
            self.writer.start('g', attrib={'clip-path': 'url(#%s)' % clipid})

        oid = gc.get_gid()
        url = gc.get_url()
        if url is not None:
            self.writer.start('a', attrib={'xlink:href': url})
        if rcParams['svg.image_inline']:
            buf = _png.write_png(im, None)
            oid = oid or self._make_id('image', buf)
            attrib['xlink:href'] = ("data:image/png;base64,\n" +
                                    base64.b64encode(buf).decode('ascii'))
        else:
            if self.basename is None:
                raise ValueError("Cannot save image data to filesystem when "
                                 "writing SVG to an in-memory buffer")
            filename = '{}.image{}.png'.format(self.basename,
                                               next(self._image_counter))
            _log.info('Writing image file for inclusion: %s', filename)
            with open(filename, 'wb') as file:
                _png.write_png(im, file)
            oid = oid or 'Im_' + self._make_id('image', filename)
            attrib['xlink:href'] = filename

        attrib['id'] = oid

        if transform is None:
            w = 72.0 * w / self.image_dpi
            h = 72.0 * h / self.image_dpi

            self.writer.element('image',
                                transform=generate_transform([
                                    ('scale', (1, -1)), ('translate', (0, -h))
                                ]),
                                x=short_float_fmt(x),
                                y=short_float_fmt(-(self.height - y - h)),
                                width=short_float_fmt(w),
                                height=short_float_fmt(h),
                                attrib=attrib)
        else:
            alpha = gc.get_alpha()
            if alpha != 1.0:
                attrib['opacity'] = short_float_fmt(alpha)

            flipped = (Affine2D().scale(1.0 / w, 1.0 / h) + transform +
                       Affine2D().translate(x, y).scale(1.0, -1.0).translate(
                           0.0, self.height))

            attrib['transform'] = generate_transform([('matrix',
                                                       flipped.frozen())])
            self.writer.element('image',
                                width=short_float_fmt(w),
                                height=short_float_fmt(h),
                                attrib=attrib)

        if url is not None:
            self.writer.end('a')
        if clipid is not None:
            self.writer.end('g')

    def _adjust_char_id(self, char_id):
        return char_id.replace("%20", "_")

    def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath, mtext=None):
        """
        draw the text by converting them to paths using textpath module.

        Parameters
        ----------
        prop : `matplotlib.font_manager.FontProperties`
          font property

        s : str
          text to be converted

        usetex : bool
          If True, use matplotlib usetex mode.

        ismath : bool
          If True, use mathtext parser. If "TeX", use *usetex* mode.

        """
        writer = self.writer

        writer.comment(s)

        glyph_map = self._glyph_map

        text2path = self._text2path
        color = rgb2hex(gc.get_rgb())
        fontsize = prop.get_size_in_points()

        style = {}
        if color != '#000000':
            style['fill'] = color

        alpha = gc.get_alpha() if gc.get_forced_alpha() else gc.get_rgb()[3]
        if alpha != 1:
            style['opacity'] = short_float_fmt(alpha)

        if not ismath:
            font = text2path._get_font(prop)
            _glyphs = text2path.get_glyphs_with_font(
                font, s, glyph_map=glyph_map, return_new_glyphs_only=True)
            glyph_info, glyph_map_new, rects = _glyphs

            if glyph_map_new:
                writer.start('defs')
                for char_id, glyph_path in glyph_map_new.items():
                    path = Path(*glyph_path)
                    path_data = self._convert_path(path, simplify=False)
                    writer.element('path', id=char_id, d=path_data)
                writer.end('defs')

                glyph_map.update(glyph_map_new)

            attrib = {}
            attrib['style'] = generate_css(style)
            font_scale = fontsize / text2path.FONT_SCALE
            attrib['transform'] = generate_transform([
                ('translate', (x, y)), ('rotate', (-angle, )),
                ('scale', (font_scale, -font_scale))
            ])

            writer.start('g', attrib=attrib)
            for glyph_id, xposition, yposition, scale in glyph_info:
                attrib = {'xlink:href': '#%s' % glyph_id}
                if xposition != 0.0:
                    attrib['x'] = short_float_fmt(xposition)
                if yposition != 0.0:
                    attrib['y'] = short_float_fmt(yposition)
                writer.element('use', attrib=attrib)

            writer.end('g')
        else:
            if ismath == "TeX":
                _glyphs = text2path.get_glyphs_tex(prop,
                                                   s,
                                                   glyph_map=glyph_map,
                                                   return_new_glyphs_only=True)
            else:
                _glyphs = text2path.get_glyphs_mathtext(
                    prop, s, glyph_map=glyph_map, return_new_glyphs_only=True)

            glyph_info, glyph_map_new, rects = _glyphs

            # We store the character glyphs w/o flipping.  Instead, the
            # coordinate will be flipped when these characters are used.
            if glyph_map_new:
                writer.start('defs')
                for char_id, glyph_path in glyph_map_new.items():
                    char_id = self._adjust_char_id(char_id)
                    # Some characters are blank
                    if not len(glyph_path[0]):
                        path_data = ""
                    else:
                        path = Path(*glyph_path)
                        path_data = self._convert_path(path, simplify=False)
                    writer.element('path', id=char_id, d=path_data)
                writer.end('defs')

                glyph_map.update(glyph_map_new)

            attrib = {}
            font_scale = fontsize / text2path.FONT_SCALE
            attrib['style'] = generate_css(style)
            attrib['transform'] = generate_transform([
                ('translate', (x, y)), ('rotate', (-angle, )),
                ('scale', (font_scale, -font_scale))
            ])

            writer.start('g', attrib=attrib)
            for char_id, xposition, yposition, scale in glyph_info:
                char_id = self._adjust_char_id(char_id)

                writer.element('use',
                               transform=generate_transform([
                                   ('translate', (xposition, yposition)),
                                   ('scale', (scale, )),
                               ]),
                               attrib={'xlink:href': '#%s' % char_id})

            for verts, codes in rects:
                path = Path(verts, codes)
                path_data = self._convert_path(path, simplify=False)
                writer.element('path', d=path_data)

            writer.end('g')

    def _draw_text_as_text(self, gc, x, y, s, prop, angle, ismath, mtext=None):
        writer = self.writer

        color = rgb2hex(gc.get_rgb())
        style = {}
        if color != '#000000':
            style['fill'] = color

        alpha = gc.get_alpha() if gc.get_forced_alpha() else gc.get_rgb()[3]
        if alpha != 1:
            style['opacity'] = short_float_fmt(alpha)

        if not ismath:
            font = self._get_font(prop)
            font.set_text(s, 0.0, flags=LOAD_NO_HINTING)

            attrib = {}
            # Must add "px" to workaround a Firefox bug
            style['font-size'] = short_float_fmt(prop.get_size()) + 'px'
            style['font-family'] = str(font.family_name)
            style['font-style'] = prop.get_style().lower()
            style['font-weight'] = str(prop.get_weight()).lower()
            attrib['style'] = generate_css(style)

            if mtext and (angle == 0 or mtext.get_rotation_mode() == "anchor"):
                # If text anchoring can be supported, get the original
                # coordinates and add alignment information.

                # Get anchor coordinates.
                transform = mtext.get_transform()
                ax, ay = transform.transform(mtext.get_unitless_position())
                ay = self.height - ay

                # Don't do vertical anchor alignment. Most applications do not
                # support 'alignment-baseline' yet. Apply the vertical layout
                # to the anchor point manually for now.
                angle_rad = np.deg2rad(angle)
                dir_vert = np.array([np.sin(angle_rad), np.cos(angle_rad)])
                v_offset = np.dot(dir_vert, [(x - ax), (y - ay)])
                ax = ax + v_offset * dir_vert[0]
                ay = ay + v_offset * dir_vert[1]

                ha_mpl_to_svg = {
                    'left': 'start',
                    'right': 'end',
                    'center': 'middle'
                }
                style['text-anchor'] = ha_mpl_to_svg[mtext.get_ha()]

                attrib['x'] = short_float_fmt(ax)
                attrib['y'] = short_float_fmt(ay)
                attrib['style'] = generate_css(style)
                attrib['transform'] = "rotate(%s, %s, %s)" % (short_float_fmt(
                    -angle), short_float_fmt(ax), short_float_fmt(ay))
                writer.element('text', s, attrib=attrib)
            else:
                attrib['transform'] = generate_transform([
                    ('translate', (x, y)), ('rotate', (-angle, ))
                ])

                writer.element('text', s, attrib=attrib)

        else:
            writer.comment(s)

            width, height, descent, svg_elements, used_characters = \
                self.mathtext_parser.parse(s, 72, prop)
            svg_glyphs = svg_elements.svg_glyphs
            svg_rects = svg_elements.svg_rects

            attrib = {}
            attrib['style'] = generate_css(style)
            attrib['transform'] = generate_transform([('translate', (x, y)),
                                                      ('rotate', (-angle, ))])

            # Apply attributes to 'g', not 'text', because we likely have some
            # rectangles as well with the same style and transformation.
            writer.start('g', attrib=attrib)

            writer.start('text')

            # Sort the characters by font, and output one tspan for each.
            spans = OrderedDict()
            for font, fontsize, thetext, new_x, new_y, metrics in svg_glyphs:
                style = generate_css({
                    'font-size': short_float_fmt(fontsize) + 'px',
                    'font-family': font.family_name,
                    'font-style': font.style_name.lower(),
                    'font-weight': font.style_name.lower()
                })
                if thetext == 32:
                    thetext = 0xa0  # non-breaking space
                spans.setdefault(style, []).append((new_x, -new_y, thetext))

            for style, chars in spans.items():
                chars.sort()

                if len({y for x, y, t in chars}) == 1:  # Are all y's the same?
                    ys = str(chars[0][1])
                else:
                    ys = ' '.join(str(c[1]) for c in chars)

                attrib = {
                    'style': style,
                    'x': ' '.join(short_float_fmt(c[0]) for c in chars),
                    'y': ys
                }

                writer.element('tspan',
                               ''.join(chr(c[2]) for c in chars),
                               attrib=attrib)

            writer.end('text')

            if len(svg_rects):
                for x, y, width, height in svg_rects:
                    writer.element('rect',
                                   x=short_float_fmt(x),
                                   y=short_float_fmt(-y + height),
                                   width=short_float_fmt(width),
                                   height=short_float_fmt(height))

            writer.end('g')

    def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
        # docstring inherited
        self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")

    def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
        # docstring inherited

        clipid = self._get_clip(gc)
        if clipid is not None:
            # Cannot apply clip-path directly to the text, because
            # is has a transformation
            self.writer.start('g', attrib={'clip-path': 'url(#%s)' % clipid})

        if gc.get_url() is not None:
            self.writer.start('a', {'xlink:href': gc.get_url()})

        if rcParams['svg.fonttype'] == 'path':
            self._draw_text_as_path(gc, x, y, s, prop, angle, ismath, mtext)
        else:
            self._draw_text_as_text(gc, x, y, s, prop, angle, ismath, mtext)

        if gc.get_url() is not None:
            self.writer.end('a')

        if clipid is not None:
            self.writer.end('g')

    def flipy(self):
        # docstring inherited
        return True

    def get_canvas_width_height(self):
        # docstring inherited
        return self.width, self.height

    def get_text_width_height_descent(self, s, prop, ismath):
        # docstring inherited
        return self._text2path.get_text_width_height_descent(s, prop, ismath)
Esempio n. 47
0
def generate_block_shapes(blockable, args, level):
    if not blockable:
        raise ValueError

    mapper = OrderedDict()
    for d in blockable:
        mapper[d] = mapper.get(d.parent, -1) + 1

    # Generate level-0 block shapes
    level_0 = [d for d, v in mapper.items() if v == 0]
    # Max attemptable block shape
    max_bs = tuple((d.step, d.symbolic_size.subs(args)) for d in level_0)
    # Defaults (basic mode)
    ret = [
        tuple((d.step, v) for d in level_0) for v in options['blocksize-l0']
    ]
    # Always try the entire iteration space (degenerate block)
    ret.append(max_bs)
    # More attempts if autotuning in aggressive mode
    if level in ['aggressive', 'max']:
        # Ramp up to larger block shapes
        handle = tuple((i, options['blocksize-l0'][-1]) for i, _ in ret[0])
        for i in range(3):
            new_bs = tuple((b, v * 2) for b, v in handle)
            ret.insert(ret.index(handle) + 1, new_bs)
            handle = new_bs
        handle = []
        # Extended shuffling for the smaller block shapes
        for bs in ret[:4]:
            for i in ret:
                handle.append(bs[:-1] + (i[-1], ))
        # Some more shuffling for all block shapes
        for bs in list(ret):
            ncombs = len(bs)
            for i in range(ncombs):
                for j in combinations(dict(bs), i + 1):
                    handle.append(
                        tuple((b, v * 2 if b in j else v) for b, v in bs))
        ret.extend(handle)
    # Drop block shapes exceeding the iteration space extent
    ret = [i for i in ret if all(dict(i)[k] <= v for k, v in max_bs)]
    # Drop redundant block shapes
    ret = filter_ordered(ret)

    # Generate level-1 block shapes
    level_1 = [d for d, v in mapper.items() if v == 1]
    if level_1:
        assert len(level_1) == len(level_0)
        assert all(d1.parent is d0 for d0, d1 in zip(level_0, level_1))
        for bs in list(ret):
            handle = []
            for v in options['blocksize-l1']:
                # To be a valid blocksize, it must be smaller than and divide evenly
                # the parent's block size
                if all(v <= i and i % v == 0 for _, i in bs):
                    ret.append(bs + tuple((d.step, v) for d in level_1))
            ret.remove(bs)

    # Generate level-n (n > 1) block shapes
    # TODO -- currently, there's no Operator producing depth>2 hierarchical blocking,
    # so for simplicity we ignore this for the time being

    # Normalize
    ret = [tuple((k.name, v) for k, v in bs) for bs in ret]

    return ret
Esempio n. 48
0
def decryptbyFA(cyphertext):
    french = {
        'A': 9.42,
        'B': 1.02,
        'C': 2.64,
        'D': 3.39,
        'E': 15.87,
        'F': 0.95,
        'G': 1.04,
        'H': 0.77,
        'I': 8.41,
        'J': 0.89,
        'K': 0.00,
        'L': 5.34,
        'M': 3.24,
        'N': 7.15,
        'O': 5.14,
        'P': 2.86,
        'Q': 1.06,
        'R': 6.46,
        'S': 7.90,
        'T': 7.26,
        'U': 6.24,
        'V': 2.15,
        'W': 0.00,
        'X': 0.30,
        'Y': 0.24,
        'Z': 0.32
    }

    text = cyphertext

    freq = french.copy()

    print(freq)
    for key in freq:
        freq[key] = 0.00

    for char in cyphertext:
        freq[char] = frequence(char, text)

    freq = OrderedDict(sorted(freq.items(), key=lambda t: t[1], reverse=True))

    print(freq)
    for key in freq:
        print(key + ' -> ' + str(freq.get(key) * 100))

    frenchSorted = OrderedDict(
        sorted(french.items(), key=lambda t: t[1], reverse=True))

    possibleKeys = []
    for i, key in enumerate(freq):
        if (i < 3):
            maxKey = getMaxValueKey(french)
            possibleKeys.append(((26 + ord(key) - 65) - ord(maxKey) - 65) % 26)

    indexKey = -1
    errorMin = 99999999999
    for possibleKey in possibleKeys:
        plainfreq = {}
        for i, key in enumerate(freq):
            plainfreq[chr((ord(key) - 65 + possibleKey) % 26 + 65)] = freq[key]
        errorCompute = compare_dict(frenchSorted, plainfreq)
        if (errorMin > errorCompute):
            errorMin = errorCompute
            indexKey = possibleKeys.index(possibleKey)

    code_cesar(cyphertext, possibleKeys[indexKey])
Esempio n. 49
0
class SettingsRegistry(object):
    """Registry of all API-configurable settings and categories."""
    def __init__(self, settings=None):
        """
        :param settings: a ``django.conf.LazySettings`` object used to lookup
                         file-based field values (e.g., ``local_settings.py``
                         and ``/etc/tower/conf.d/example.py``).  If unspecified,
                         defaults to ``django.conf.settings``.
        """
        if settings is None:
            from django.conf import settings
        self._registry = OrderedDict()
        self._validate_registry = {}
        self._dependent_settings = {}
        self.settings = settings

    def register(self, setting, **kwargs):
        if setting in self._registry:
            raise ImproperlyConfigured(
                'Setting "{}" is already registered.'.format(setting))
        category = kwargs.setdefault('category', None)
        category_slug = kwargs.setdefault('category_slug',
                                          slugify(category or '') or None)
        if category_slug in {'all', 'changed', 'user-defaults'}:
            raise ImproperlyConfigured(
                '"{}" is a reserved category slug.'.format(category_slug))
        if 'field_class' not in kwargs:
            raise ImproperlyConfigured(
                'Setting must provide a field_class keyword argument.')
        self._registry[setting] = kwargs

        # Normally for read-only/dynamic settings, depends_on will specify other
        # settings whose changes may affect the value of this setting. Store
        # this setting as a dependent for the other settings, so we can know
        # which extra cache keys to clear when a setting changes.
        depends_on = kwargs.setdefault('depends_on', None) or set()
        for depends_on_setting in depends_on:
            dependent_settings = self._dependent_settings.setdefault(
                depends_on_setting, set())
            dependent_settings.add(setting)

    def unregister(self, setting):
        self._registry.pop(setting, None)
        for dependent_settings in self._dependent_settings.values():
            dependent_settings.discard(setting)

    def register_validate(self, category_slug, func):
        self._validate_registry[category_slug] = func

    def unregister_validate(self, category_slug):
        self._validate_registry.pop(category_slug, None)

    def get_dependent_settings(self, setting):
        return self._dependent_settings.get(setting, set())

    def get_registered_categories(self, features_enabled=None):
        categories = {
            'all': _('All'),
            'changed': _('Changed'),
        }
        for setting, kwargs in self._registry.items():
            category_slug = kwargs.get('category_slug', None)
            if category_slug is None or category_slug in categories:
                continue
            if features_enabled is not None:
                feature_required = kwargs.get('feature_required', None)
                if feature_required and feature_required not in features_enabled:
                    continue
            if category_slug == 'user':
                categories['user'] = _('User')
                categories['user-defaults'] = _('User-Defaults')
            else:
                categories[category_slug] = kwargs.get('category',
                                                       None) or category_slug
        return categories

    def get_registered_settings(self,
                                category_slug=None,
                                read_only=None,
                                features_enabled=None,
                                slugs_to_ignore=set()):
        setting_names = []
        if category_slug == 'user-defaults':
            category_slug = 'user'
        if category_slug == 'changed':
            category_slug = 'all'
        for setting, kwargs in self._registry.items():
            if category_slug not in {
                    None, 'all',
                    kwargs.get('category_slug', None)
            }:
                continue
            if kwargs.get('category_slug', None) in slugs_to_ignore:
                continue
            if (read_only in {True, False}
                    and kwargs.get('read_only', False) != read_only
                    and setting not in ('AWX_ISOLATED_PRIVATE_KEY',
                                        'AWX_ISOLATED_PUBLIC_KEY')):
                # Note: Doesn't catch fields that set read_only via __init__;
                # read-only field kwargs should always include read_only=True.
                continue
            if features_enabled is not None:
                feature_required = kwargs.get('feature_required', None)
                if feature_required and feature_required not in features_enabled:
                    continue
            setting_names.append(setting)
        return setting_names

    def get_registered_validate_func(self, category_slug):
        return self._validate_registry.get(category_slug, None)

    def is_setting_encrypted(self, setting):
        return bool(self._registry.get(setting, {}).get('encrypted', False))

    def is_setting_read_only(self, setting):
        return bool(self._registry.get(setting, {}).get('read_only', False))

    def get_setting_category(self, setting):
        return self._registry.get(setting, {}).get('category_slug', None)

    def get_setting_field(self,
                          setting,
                          mixin_class=None,
                          for_user=False,
                          **kwargs):
        from rest_framework.fields import empty
        field_kwargs = {}
        field_kwargs.update(self._registry[setting])
        field_kwargs.update(kwargs)
        field_class = original_field_class = field_kwargs.pop('field_class')
        if mixin_class:
            field_class = type(field_class.__name__,
                               (mixin_class, field_class), {})
        category_slug = field_kwargs.pop('category_slug', None)
        category = field_kwargs.pop('category', None)
        depends_on = frozenset(field_kwargs.pop('depends_on', None) or [])
        placeholder = field_kwargs.pop('placeholder', empty)
        feature_required = field_kwargs.pop('feature_required', empty)
        encrypted = bool(field_kwargs.pop('encrypted', False))
        defined_in_file = bool(field_kwargs.pop('defined_in_file', False))
        if getattr(field_kwargs.get('child', None), 'source',
                   None) is not None:
            field_kwargs['child'].source = None
        field_instance = field_class(**field_kwargs)
        field_instance.category_slug = category_slug
        field_instance.category = category
        field_instance.depends_on = depends_on
        if placeholder is not empty:
            field_instance.placeholder = placeholder
        if feature_required is not empty:
            field_instance.feature_required = feature_required
        field_instance.defined_in_file = defined_in_file
        if field_instance.defined_in_file:
            field_instance.help_text = (str(
                _('This value has been set manually in a settings file.')) +
                                        '\n\n' + str(field_instance.help_text))
        field_instance.encrypted = encrypted
        original_field_instance = field_instance
        if field_class != original_field_class:
            original_field_instance = original_field_class(**field_kwargs)
        if category_slug == 'user' and for_user:
            try:
                field_instance.default = original_field_instance.to_representation(
                    getattr(self.settings, setting))
            except Exception:
                logger.warning(
                    'Unable to retrieve default value for user setting "%s".',
                    setting,
                    exc_info=True)
        elif not field_instance.read_only or field_instance.default is empty or field_instance.defined_in_file:
            try:
                field_instance.default = original_field_instance.to_representation(
                    self.settings._awx_conf_settings._get_default(setting))
            except AttributeError:
                pass
            except Exception:
                logger.warning(
                    'Unable to retrieve default value for setting "%s".',
                    setting,
                    exc_info=True)

        # `PENDO_TRACKING_STATE` is disabled for the open source awx license
        if setting == 'PENDO_TRACKING_STATE' and get_license().get(
                'license_type') == 'open':
            field_instance.read_only = True

        return field_instance
class ApplicationDescription(object):

    GLOBAL_NAMESPACE = '_global'

    def __init__(self, application, namespace=None):
        """
        Constructor.

        :type application: Application
        :type namespace: str
        """
        self._application = application
        self._namespace = namespace
        self._namespaces = OrderedDict()
        self._commands = OrderedDict()
        self._aliases = {}

        self._inspect_application()

    def get_namespaces(self):
        return self._namespaces

    def get_commands(self):
        return self._commands

    def get_command(self, name):
        if name not in self._commands and name not in self._aliases:
            raise ValueError('Command %s does not exist.' % name)

        return self._commands.get(name, self._aliases.get(name))

    def _inspect_application(self):
        namespace = None
        if self._namespace:
            namespace = self._application.find_namespace(self._namespace)

        all = self._application.all(namespace)

        for namespace, commands in self._sort_commands(all):
            names = []

            for name, command in commands:
                if not command.get_name() or command.is_hidden():
                    continue

                if command.get_name() == name:
                    self._commands[name] = command
                else:
                    self._aliases[name] = command

                names.append(name)

            self._namespaces[namespace] = {'id': namespace, 'commands': names}

    def _sort_commands(self, commands):
        """
        Sorts command in alphabetical order

        :param commands: A dict of commands
        :type commands: dict

        :return: A sorted list of commands
        """
        namespaced_commands = {}
        for name, command in commands.items():
            key = self._application.extract_namespace(name, 1)
            if not key:
                key = '_global'

            if key in namespaced_commands:
                namespaced_commands[key][name] = command
            else:
                namespaced_commands[key] = {name: command}

        for namespace, commands in namespaced_commands.items():
            namespaced_commands[namespace] = sorted(commands.items(), key=lambda x: x[0])

        namespaced_commands = sorted(namespaced_commands.items(), key=lambda x: x[0])

        return namespaced_commands
Esempio n. 51
0
class Command(BaseCommand):
    help = str(_('Synchronize the specified object with the Autotask API'))

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        # This can be replaced with a single instantiation of an OrderedDict
        # using kwargs in Python 3.6. But we need Python 3.5 compatibility for
        # now.
        # See https://www.python.org/dev/peps/pep-0468/.
        synchronizers = (
            ('status', sync.StatusSynchronizer, _('Status')),
            ('license_type', syncrest.LicenseTypeSynchronizer,
             _('License Type')),
            ('resource', sync.ResourceSynchronizer, _('Resource')),
            ('ticket_secondary_resource',
             sync.TicketSecondaryResourceSynchronizer,
             _('Ticket Secondary Resource')),
            ('priority', sync.PrioritySynchronizer, _('Priority')),
            ('queue', sync.QueueSynchronizer, _('Queue')),
            ('account_type', syncrest.AccountTypeSynchronizer,
             _('Account Type')),
            ('account', sync.AccountSynchronizer, _('Account')),
            ('account_physical_location',
             sync.AccountPhysicalLocationSynchronizer,
             _('Account Physical Location')),
            ('contract', sync.ContractSynchronizer, _('Contract')),
            ('project_status', sync.ProjectStatusSynchronizer,
             _('Project Status')),
            ('project_type', sync.ProjectTypeSynchronizer, _('Project Type')),
            ('project', syncrest.ProjectSynchronizer, _('Project')),
            ('phase', sync.PhaseSynchronizer, _('Phase')),
            ('task_secondary_resource', sync.TaskSecondaryResourceSynchronizer,
             _('Task Secondary Resource')),
            ('task', syncrest.TaskSynchronizer, _('Task')),
            ('display_color', sync.DisplayColorSynchronizer,
             _('Display Color')),
            ('ticket_category', sync.TicketCategorySynchronizer,
             _('Ticket Category')),
            ('source', sync.SourceSynchronizer, _('Source')),
            ('issue_type', sync.IssueTypeSynchronizer, _('Issue Type')),
            ('ticket_type', sync.TicketTypeSynchronizer, _('Ticket Type')),
            ('sub_issue_type', sync.SubIssueTypeSynchronizer,
             _('Sub Issue Type')),
            ('ticket', syncrest.TicketSynchronizer, _('Ticket')),
            ('note_type', sync.NoteTypeSynchronizer, _('Note Type')),
            ('ticket_note', sync.TicketNoteSynchronizer, _('Ticket Note')),
            ('task_note', sync.TaskNoteSynchronizer, _('Task Note')),
            ('task_type_link', syncrest.TaskTypeLinkSynchronizer,
             _('Task Type Link')),
            ('use_type', syncrest.UseTypeSynchronizer, _('Use Type')),
            ('allocation_code', sync.AllocationCodeSynchronizer,
             _('Allocation Code')),
            ('role', syncrest.RoleSynchronizer, _('Role')),
            ('department', syncrest.DepartmentSynchronizer, _('Department')),
            ('time_entry', sync.TimeEntrySynchronizer, _('Time Entry')),
            ('resource_role_department',
             sync.ResourceRoleDepartmentSynchronizer,
             _('Resource Role Department')),
            ('resource_service_desk_role',
             sync.ResourceServiceDeskRoleSynchronizer,
             _('Resource Service Desk Role')),
            ('service_call_status', sync.ServiceCallStatusSynchronizer,
             _('Service Call Status')),
            ('service_call', sync.ServiceCallSynchronizer, _('Service Call')),
            ('service_call_ticket', sync.ServiceCallTicketSynchronizer,
             _('Service Call Ticket')),
            ('service_call_task', sync.ServiceCallTaskSynchronizer,
             _('Service Call Task')),
            ('service_call_ticket_resource',
             sync.ServiceCallTicketResourceSynchronizer,
             _('Service Call Ticket Resource')),
            ('service_call_task_resource',
             sync.ServiceCallTaskResourceSynchronizer,
             _('Service Call Task Resource')),
            ('task_predecessor', sync.TaskPredecessorSynchronizer,
             _('Task Predecessor')),
            ('ticket_udf', sync.TicketUDFSynchronizer, _('Ticket UDF')),
            ('task_udf', sync.TaskUDFSynchronizer, _('Task UDF')),
            ('project_udf', sync.ProjectUDFSynchronizer, _('Project UDF')),
            ('contact', syncrest.ContactSynchronizer, _('Contact')),
        )
        self.synchronizer_map = OrderedDict()
        for name, synchronizer, obj_name in synchronizers:
            self.synchronizer_map[name] = (synchronizer, obj_name)

    def add_arguments(self, parser):
        parser.add_argument(OPTION_NAME, nargs='?', type=str)
        parser.add_argument('--full',
                            action='store_true',
                            dest='full',
                            default=False)

    def sync_by_class(self, sync_class, obj_name, full_option=False):
        synchronizer = sync_class(full=full_option)

        created_count, updated_count, skipped_count, deleted_count = \
            synchronizer.sync()

        msg = _('{} Sync Summary - Created: {}, Updated: {}, Skipped: {}')
        fmt_msg = msg.format(obj_name, created_count, updated_count,
                             skipped_count)

        if full_option:
            msg = _('{} Sync Summary - Created: {}, Updated: {}, Skipped: {}, '
                    'Deleted: {}')
            fmt_msg = msg.format(obj_name, created_count, updated_count,
                                 skipped_count, deleted_count)

        self.stdout.write(fmt_msg)

    def handle(self, *args, **options):
        sync_classes = []
        autotask_object_arg = options[OPTION_NAME]
        full_option = options.get('full', False)

        if autotask_object_arg:
            object_arg = autotask_object_arg
            sync_tuple = self.synchronizer_map.get(object_arg)

            if sync_tuple:
                sync_classes.append(sync_tuple)
            else:
                msg = _('Invalid AT object {}, '
                        'choose one of the following: \n{}')
                options_txt = ', '.join(self.synchronizer_map.keys())
                msg = msg.format(sync_tuple, options_txt)
                raise CommandError(msg)
        else:
            sync_classes = self.synchronizer_map.values()

        failed_classes = 0
        error_messages = ''

        for sync_class, obj_name in sync_classes:
            error_msg = None
            try:
                self.sync_by_class(sync_class,
                                   obj_name,
                                   full_option=full_option)

            except AutotaskProcessException as e:
                error_msg = ERROR_MESSAGE_TEMPLATE.format(
                    obj_name, api.parse_autotaskprocessexception(e))

            except AutotaskAPIException as e:
                error_msg = ERROR_MESSAGE_TEMPLATE.format(
                    obj_name, api.parse_autotaskapiexception(e))

            except SAXParseException as e:
                error_msg = 'Failed to connect to Autotask API. ' \
                      'The error was: {}'.format(e)

            except apirest.AutotaskAPIError as e:
                error_msg = ERROR_MESSAGE_TEMPLATE.format(obj_name, e)

            finally:
                if error_msg:
                    self.stderr.write(error_msg)
                    error_messages += '{}\n'.format(error_msg)
                    failed_classes += 1

        if failed_classes > 0:
            msg = '{} class{} failed to sync.\n'.format(
                failed_classes,
                '' if failed_classes == 1 else 'es',
            )
            msg += 'Errors:\n'
            msg += error_messages
            raise CommandError(msg)
Esempio n. 52
0
import sys
from collections import OrderedDict

print("Enter Number: ")
count = int(sys.stdin.readline())
print("Enter Data: ")
words = []
word_appearence = OrderedDict()
for i in range(count):
    data = sys.stdin.readline()
    words.append(data.strip('\n'))

print(words)

print("Number of unique words")
unique_words = set(words)
print(len(unique_words))

for word in words:
    if word_appearence.get(word) is None:
        word_appearence[word] = 1
    else:
        value = word_appearence.get(word)
        value = value + 1
        word_appearence[word] = value

for key in word_appearence:
    print(word_appearence[key]),
Esempio n. 53
0
class FtdiEeprom:
    """FTDI EEPROM management
    """

    _PROPS = namedtuple('PROPS', 'size user dynoff chipoff')
    """Properties for each FTDI device release.

       * size is the size in bytes of the EEPROM storage area
       * user is the size in bytes of the user storage area, if any/supported
       * dynoff is the offset in EEPROM of the first bytes to store strings
       * chipoff is the offset in EEPROM of the EEPROM chip type
    """

    _PROPERTIES = {
        0x0200: _PROPS(0, None, 0, None),        # FT232AM
        0x0400: _PROPS(256, 0x14, 0x94, None),   # FT232BM
        0x0500: _PROPS(256, 0x16, 0x96, 0x14),   # FT2232D
        0x0600: _PROPS(128, None, 0x18, None),   # FT232R
        0x0700: _PROPS(256, 0x1A, 0x9A, 0x18),   # FT2232H
        0x0800: _PROPS(256, 0x1A, 0x9A, 0x18),   # FT4232H
        0x0900: _PROPS(256, 0x1A, 0xA0, 0x1e),   # FT232H
        0x1000: _PROPS(1024, 0x1A, 0xA0, None),  # FT230X/FT231X/FT234X
    }
    """EEPROM properties."""


    CBUS = IntEnum('CBus',
                   'TXDEN PWREN TXLED RXLED TXRXLED SLEEP CLK48 CLK24 CLK12 '
                   'CLK6 GPIO BB_WR BB_RD', start=0)
    """Alternate features for legacy FT232R devices."""

    CBUSH = IntEnum('CBusH',
                    'TRISTATE TXLED RXLED TXRXLED PWREN SLEEP DRIVE0 DRIVE1 '
                    'GPIO TXDEN CLK30 CLK15 CLK7_5', start=0)
    """Alternate features for FT232H/FT2232H/FT4232H devices."""

    CBUSX = IntEnum('CBusX',
                    'TRISTATE TXLED RXLED TXRXLED PWREN SLEEP DRIVE0 DRIVE1 '
                    'GPIO TXDEN CLK24 CLK12 CLK6 BAT_DETECT BAT_NDETECT '
                    'I2C_TXE I2C_RXF VBUS_SENSE BB_WR BB_RD TIMESTAMP AWAKE',
                    start=0)
    """Alternate features for FT230X devices."""

    UART_BITS = IntFlag('UartBits', 'TXD RXD RTS CTS DTR DSR DCD RI')
    """Inversion flags for FT232R and FT-X devices."""

    CHANNEL = IntFlag('Channel', 'FIFO OPTO CPU FT128 RS485')
    """Alternate port mode."""

    DRIVE = IntFlag('Drive',
                    'LOW HIGH SLOW_SLEW SCHMITT _10 _20 _40 PWRSAVE_DIS')
    """Driver options for I/O pins."""

    CFG1 = IntFlag('Cfg1', 'CLK_IDLE_STATE DATA_LSB FLOW_CONTROL _08 '
                           'HIGH_CURRENTDRIVE _20 _40 SUSPEND_DBUS7')
    """Configuration bits stored @ 0x01."""

    VAR_STRINGS = ('manufacturer', 'product', 'serial')
    """EEPROM strings with variable length."""

    def __init__(self):
        self.log = getLogger('pyftdi.eeprom')
        self._ftdi = Ftdi()
        self._eeprom = bytearray()
        self._size = 0
        self._dev_ver = 0
        self._valid = False
        self._config = OrderedDict()
        self._dirty = set()
        self._modified = False
        self._chip: Optional[int] = None
        self._mirror = False

    def __getattr__(self, name):
        if name in self._config:
            return self._config[name]
        raise AttributeError('No such attribute: %s' % name)

    @classproperty
    def eeprom_sizes(cls) -> List[int]:
        """Return a list of supported EEPROM sizes.

           :return: the supported EEPROM sizes
        """
        return sorted({p.size for p in cls._PROPERTIES.values() if p.size})

    def open(self, device: Union[str, UsbDevice],
             ignore: bool = False, size: Optional[int] = None,
             model: Optional[str] = None) -> None:
        """Open a new connection to the FTDI USB device.

           :param device: the device URL or a USB device instance.
           :param ignore: whether to ignore existing content
           :param size: a custom EEPROM size
           :param model: the EEPROM model used to specify a custom size
        """
        if self._ftdi.is_connected:
            raise FtdiError('Already open')
        if isinstance(device, str):
            self._ftdi.open_from_url(device)
        else:
            self._ftdi.open_from_device(device)
        if model and not size:
            # 93xxx46/56/66
            mmo = match(r'(?i)^93[a-z]*([456])6.*$', model)
            if not mmo:
                raise ValueError(f'Unknown EEPROM device: {model}')
            mmul = int(mmo.group(1))
            size = 128 << (mmul - 4)
        if size:
            if size not in self.eeprom_sizes:
                raise ValueError(f'Unsupported EEPROM size: {size}')
            self._size = min(size, 256)
        if not ignore:
            self._eeprom = self._read_eeprom()
            if self._valid:
                self._decode_eeprom()

    def close(self) -> None:
        """Close the current connection to the FTDI USB device,
        """
        if self._ftdi.is_connected:
            self._ftdi.close()
            self._eeprom = bytearray()
            self._dev_ver = 0
            self._config.clear()

    def connect(self, ftdi: Ftdi, ignore: bool = False) -> None:
        """Connect a FTDI EEPROM to an existing Ftdi instance.

           :param ftdi: the Ftdi instance to use
           :param ignore: whether to ignore existing content
        """
        self._ftdi = ftdi
        self._eeprom = bytearray()
        self._dev_ver = 0
        self._valid = False
        self._config = OrderedDict()
        self._dirty = set()
        if not ignore:
            self._eeprom = self._read_eeprom()
            if self._valid:
                self._decode_eeprom()
            self._decode_eeprom()

    @property
    def device_version(self) -> int:
        """Report the version of the FTDI device.

           :return: the release
        """
        if not self._dev_ver:
            if not self._ftdi.is_connected:
                raise FtdiError('Not connected')
            self._dev_ver = self._ftdi.device_version
        return self._dev_ver

    @property
    def size(self) -> int:
        """Report the EEPROM size.

           Use the most common (default) EEPROM size of the size is not yet
           known.

           :return: the size in bytes
        """
        if not self._size:
            self._size = self.default_size
        return self._size

    @property
    def default_size(self) -> int:
        """Report the default EEPROM size based on the FTDI type.

           The physical EEPROM size may be greater or lower, depending on the
           actual connected EEPROM device.

           :return: the size in bytes
        """
        if self._chip == 0x46:
            return 0x80  # 93C46
        if self._chip == 0x56:
            return 0x100  # 93C56
        if self._chip == 0x66:
            return 0x100  # 93C66 (512 bytes, only 256 are used)
        try:
            eeprom_size = self._PROPERTIES[self.device_version].size
        except (AttributeError, KeyError) as exc:
            raise FtdiError('No EEPROM') from exc
        return eeprom_size

    @property
    def storage_size(self) -> int:
        """Report the number of EEPROM bytes that can be used for configuration
            storage. The physical EEPROM size may be greater

            :return: the number of bytes in the eeprom that will be used for
                configuration storage
        """
        try:
            eeprom_storage_size = self.size
            if self.is_mirroring_enabled:
                eeprom_storage_size = self.mirror_sector
        except FtdiError as fe:
            raise fe
        return eeprom_storage_size

    @property
    def data(self) -> bytes:
        """Returns the content of the EEPROM.

           :return: the content as bytes.
        """
        self._sync_eeprom()
        return bytes(self._eeprom)

    @property
    def properties(self) ->  Set[str]:
        """Returns the supported properties for the current device.

           :return: the supported properies.
        """
        props = set(self._config.keys())
        props -= set(self.VAR_STRINGS)
        return props

    @property
    def is_empty(self) -> bool:
        """Reports whether the EEPROM has been erased, or no EEPROM is
           connected to the FTDI EEPROM port.

           :return: True if no content is detected
        """
        if len(self._eeprom) != self.size:
            return False
        for byte in self._eeprom:
            if byte != 0xFF:
                return False
        return True

    @property
    def cbus_pins(self) -> List[int]:
        """Return the list of CBUS pins configured as GPIO, if any

           :return: list of CBUS pins
        """
        pins = [pin for pin in range(0, 10)
                if self._config.get('cbus_func_%d' % pin, '') == 'GPIO']
        return pins

    @property
    def cbus_mask(self) -> int:
        """Return the bitmask of CBUS pins configured as GPIO.

           The bitmap contains four bits, ordered in natural order.

           :return: CBUS mask
        """
        if self.device_version == 0x900:  # FT232H
            cbus = [5, 6, 8, 9]
        else:
            cbus = list(range(4))
        mask = 0
        for bix, pin in enumerate(cbus):
            if self._config.get('cbus_func_%d' % pin, '') == 'GPIO':
                mask |= 1 << bix
        return mask

    @property
    def has_mirroring(self) -> bool:
        """Report whether the device supports EEPROM content duplication
           across its two sectors.

           :return: True if the device support mirorring
        """
        return (self._PROPERTIES[self.device_version].user and
                self._ftdi.device_version != 0x1000)

    @property
    def mirror_sector(self) -> int:
        """Report start address of the mirror sector in the EEPROM.
           This is only valid if the FTDI is capable of mirroring EEPROM data.

           :return: the start address
        """
        if self.has_mirroring:
            return self.size // 2
        raise FtdiError('EEPROM does not support mirroring')

    @property
    def is_mirroring_enabled(self) -> bool:
        """Check if EEPROM mirroring is currently enabled for this EEPROM.
            See enable_mirroring for more details on EEPROM mirroring
            functionality
        """
        return self.has_mirroring and self._mirror

    def enable_mirroring(self, enable : bool) -> None:
        """Enable EEPROM write mirroring. When enabled, this divides the EEPROM
           into 2 sectors and mirrors configuration data between them.

           For example on a 256 byte EEPROM, two 128 byte 'sectors' will be
           used to store identical data. Configuration properties/strings will
           be writen to both of these sectors. For some devices (like the
           4232H), this makes the PyFtdi EEPROM functionally similar to
           FT_PROG.

           Note: Data will only be mirrored if the has_mirroring property
           returns true (after establishing a connection to the ftdi)

           :param enable: enable or disable EEPROM mirroring
        """
        self._mirror = enable

    def save_config(self, file: TextIO) -> None:
        """Save the EEPROM content as an INI stream.

           :param file: output stream
        """
        self._sync_eeprom()
        cfg = ConfigParser()
        cfg.add_section('values')
        for name, value in self._config.items():
            val = str(value)
            if isinstance(value, bool):
                val = val.lower()
            cfg.set('values', name, val)
        cfg.add_section('raw')
        length = 16
        for i in range(0, len(self._eeprom), length):
            chunk = self._eeprom[i:i+length]
            hexa = hexlify(chunk).decode()
            cfg.set('raw', '@%02x' % i, hexa)
        cfg.write(file)

    def load_config(self, file: TextIO, section: Optional[str] = None) -> None:
        """Load the EEPROM content from an INI stream.

           The ``section`` argument selects which section(s) to load:

           * ``raw`` only loads the raw data (hexabytes) from a previous dump
           * ``values`` only loads the values section, that is the human
             readable configuration.
           * ``all``, which is the default section selection, load the raw
             section, then overwrite part of it with any configuration value
             from the ``values`` section. This provides a handy way to use an
             existing dump from a valid EEPROM content, while customizing some
             parameters, such as the serial number.

           :param file: input stream
           :paran section: which section to load from the ini file
        """
        self._sync_eeprom()
        cfg = ConfigParser()
        cfg.read_file(file)
        loaded = False
        sections = cfg.sections()
        if section not in ('all', None) and section not in sections:
            raise FtdiEepromError(f'No such configuration section {section}')
        sect = 'raw'
        if sect in sections and section in (None, 'all', sect):
            if not cfg.has_section(sect):
                raise FtdiEepromError("No '%s' section in INI file" % sect)
            options = cfg.options(sect)
            try:
                for opt in options:
                    if not opt.startswith('@'):
                        raise ValueError()
                    address = int(opt[1:], 16)
                    hexval = cfg.get(sect, opt).strip()
                    buf = unhexlify(hexval)
                    self._eeprom[address:address+len(buf)] = buf
            except IndexError as exc:
                raise ValueError("Invalid address in '%s'' section" %
                                 sect) from exc
            except ValueError as exc:
                raise ValueError("Invalid line in '%s'' section" %
                                 sect) from exc
            self._compute_crc(self._eeprom, True)
            if not self._valid:
                raise ValueError('Loaded RAW section is invalid (CRC mismatch')
            loaded = True
        sect = 'values'
        vmap = {
            'manufacturer': 'manufacturer_name',
            'product': 'product_name',
            'serial': 'serial_number'
        }
        if sect in sections and section in (None, 'all', sect):
            if not cfg.has_section(sect):
                raise FtdiEepromError("No '%s' section in INI file" % sect)
            options = cfg.options(sect)
            for opt in options:
                value = cfg.get(sect, opt).strip()
                if opt in vmap:
                    func = getattr(self, 'set_%s' % vmap[opt])
                    func(value)
                else:
                    self.log.debug('Assigning opt %s = %s', opt, value)
                    try:
                        self.set_property(opt, value)
                    except (TypeError, ValueError, NotImplementedError) as exc:
                        self.log.warning("Ignoring setting '%s': %s", opt, exc)
            loaded = True
        if not loaded:
            raise ValueError('Invalid section: %s' % section)
        self._sync_eeprom()

    def set_serial_number(self, serial: str) -> None:
        """Define a new serial number."""
        self._validate_string(serial)
        self._update_var_string('serial', serial)
        self.set_property('has_serial', True)

    def set_manufacturer_name(self, manufacturer: str) -> None:
        """Define a new manufacturer string."""
        self._validate_string(manufacturer)
        self._update_var_string('manufacturer', manufacturer)

    def set_product_name(self, product: str) -> None:
        """Define a new product name."""
        self._validate_string(product)
        self._update_var_string('product', product)

    def set_property(self, name: str, value: Union[str, int, bool],
                     out: Optional[TextIO] = None) -> None:
        """Change the value of a stored property.

           :see: :py:meth:`properties` for a list of valid property names.
                 Note that for now, only a small subset of properties can be
                 changed.
           :param name: the property to change
           :param value: the new value (supported values depend on property)
           :param out: optional output stream to report hints
        """
        mobj = match(r'cbus_func_(\d)', name)
        if mobj:
            if not isinstance(value, str):
                raise ValueError("'%s' should be specified as a string" % name)
            self._set_cbus_func(int(mobj.group(1)), value, out)
            self._dirty.add(name)
            return
        mobj = match(r'([abcd])bus_(drive|slow_slew|schmitt)', name)
        if mobj:
            self._set_bus_control(mobj.group(1), mobj.group(2), value, out)
            self._dirty.add(name)
            return
        mobj = match(r'group_(\d)_(drive|schmitt|slow_slew)', name)
        if mobj:
            self._set_group(int(mobj.group(1)), mobj.group(2), value, out)
            self._dirty.add(name)
            return
        confs = {
            'remote_wakeup': (0, 5),
            'self_powered': (0, 6),
            'in_isochronous': (2, 0),
            'out_isochronous': (2, 1),
            'suspend_pull_down': (2, 2),
            'has_serial': (2, 3),
        }
        hwords = {
            'vendor_id': 0x02,
            'product_id': 0x04,
            'type': 0x06,
        }
        if self.device_version in (0x0400, 0x0500):
            # Type BM and 2232C/D use 0xc to encode the USB version to expose
            # H device use this location to encode bus/group properties
            hwords['usb_version'] = 0x0c
            confs['use_usb_version'] = (2, 4)
        if name in hwords:
            val = to_int(value)
            if not 0 <= val <= 0xFFFF:
                raise ValueError('Invalid value for %s' % name)
            offset = hwords[name]
            self._eeprom[offset:offset+2] = spack('<H', val)
            if self.is_mirroring_enabled:
                # duplicate in 'sector 2'
                offset2 = self.mirror_sector + offset
                self._eeprom[offset2:offset2+2] = spack('<H', val)
            self._dirty.add(name)
            return
        if name in confs:
            val = to_bool(value, permissive=False, allow_int=True)
            offset, bit = confs[name]
            mask = 1 << bit
            if val:
                idx = 0x08 + offset
                self._eeprom[idx] |= mask
                if self.is_mirroring_enabled:
                    # duplicate in 'sector 2'
                    idx2 = self.mirror_sector + idx
                    self._eeprom[idx2] |= mask
            else:
                idx = 0x0a + offset
                self._eeprom[idx] &= ~mask
                if self.is_mirroring_enabled:
                    # duplicate in 'sector 2'
                    idx2 = self.mirror_sector + idx
                    self._eeprom[idx2] &= ~mask
            self._dirty.add(name)
            return
        if name == 'power_max':
            val = to_int(value) >> 1
            idx = 0x09
            self._eeprom[idx] = val
            if self.is_mirroring_enabled:
                # duplicate in 'sector 2'
                idx2 = self.mirror_sector + idx
                self._eeprom[idx2] = val
            self._dirty.add(name)
            return
        if name.startswith('invert_'):
            if not self.device_version in (0x600, 0x1000):
                raise ValueError('UART control line inversion not available '
                                 'with this device')
            self._set_invert(name[len('invert_'):], value, out)
            self._dirty.add(name)
            return
        if name in self.properties:
            if name not in self._config:
                raise NotImplementedError("change is not supported")
            curval = self._config[name]
            try:
                curtype = type(curval)
                value = curtype(value)
            except (ValueError, TypeError) as exc:
                raise ValueError("cannot be converted to "
                    "the proper type '%s'" % curtype) from exc
            if value != curval:
                raise NotImplementedError("not yet supported")
            # no-op change is silently ignored
            return
        raise ValueError(f"unknown property: {name}")

    def erase(self, erase_byte: Optional[int] = 0xFF) -> None:
        """Erase the whole EEPROM.

            :param erase_byte: Optional erase byte to use. Default to 0xFF
        """
        self._eeprom = bytearray([erase_byte] * self.size)
        self._config.clear()
        self._dirty.add('eeprom')

    def initialize(self) -> None:
        """Initialize the EEPROM with some default sensible values.
        """
        dev_ver = self.device_version
        dev_name = Ftdi.DEVICE_NAMES[dev_ver]
        vid = Ftdi.FTDI_VENDOR
        pid = Ftdi.PRODUCT_IDS[vid][dev_name]
        self.set_manufacturer_name('FTDI')
        self.set_product_name(dev_name.upper())
        sernum = ''.join([chr(randint(ord('A'), ord('Z'))) for _ in range(5)])
        self.set_serial_number('FT%d%s' % (randint(0, 9), sernum))
        self.set_property('vendor_id', vid)
        self.set_property('product_id', pid)
        self.set_property('type', dev_ver)
        self.set_property('power_max', 150)
        self._sync_eeprom()

    def sync(self) -> None:
        """Force re-evaluation of configuration after some changes.

           This API is not useful for regular usage, but might help for testing
           when the EEPROM does not go through a full save/load cycle
        """
        self._sync_eeprom()

    def dump_config(self, file: Optional[BinaryIO] = None) -> None:
        """Dump the configuration to a file.

           :param file: the output file, default to stdout
        """
        if self._dirty:
            self._decode_eeprom()
        for name, value in self._config.items():
            print('%s: %s' % (name, value), file=file or sys.stdout)

    def commit(self, dry_run: bool = True, no_crc: bool = False) -> bool:
        """Commit any changes to the EEPROM.

           :param dry_run: log what should be written, do not actually change
                  the EEPROM content
           :param no_crc: do not compute EEPROM CRC. This should only be used
            to perform a full erasure of the EEPROM, as an attempt to recover
            from a corrupted config. 

           :return: True if some changes have been committed to the EEPROM
        """
        self._sync_eeprom(no_crc)
        if not self._modified:
            self.log.warning('No change to commit, EEPROM not modified')
            return False
        self._ftdi.overwrite_eeprom(self._eeprom, dry_run=dry_run)
        if not dry_run:
            eeprom = self._read_eeprom()
            if eeprom != self._eeprom:
                pos = 0
                for pos, (old, new) in enumerate(zip(self._eeprom, eeprom)):
                    if old != new:
                        break
                pos &= ~0x1
                raise FtdiEepromError('Write to EEPROM failed @ 0x%02x' % pos)
            self._modified = False
        return dry_run

    def reset_device(self):
        """Execute a USB device reset."""
        self._ftdi.reset(usb_reset=True)

    @classmethod
    def _validate_string(cls, string):
        for invchr in ':/':
            # do not accept characters which are interpreted as URL seperators
            if invchr in string:
                raise ValueError("Invalid character '%s' in string" % invchr)

    def _update_var_string(self, name: str, value: str) -> None:
        if name not in self.VAR_STRINGS:
            raise ValueError('%s is not a variable string' % name)
        try:
            if value == self._config[name]:
                return
        except KeyError:
            # not yet defined
            pass
        self._config[name] = value
        self._dirty.add(name)

    def _generate_var_strings(self, fill=True) -> None:
        """
            :param fill: fill the remainder of the space after the var strings
                with 0s
        """
        stream = bytearray()
        dynpos = self._PROPERTIES[self.device_version].dynoff
        if dynpos > self._size:
            # if a custom,small EEPROM device is used
            dynpos = 0x40
        data_pos = dynpos
        # start of var-strings in sector 1 (used for mirrored config)
        s1_vstr_start = data_pos - self.mirror_sector
        tbl_pos = 0x0e
        tbl_sector2_pos = self.mirror_sector + tbl_pos
        for name in self.VAR_STRINGS:
            try:
                ustr = self._config[name].encode('utf-16le')
            except KeyError:
                ustr = ''
            length = len(ustr)+2
            stream.append(length)
            stream.append(0x03)  # string descriptor
            stream.extend(ustr)
            self._eeprom[tbl_pos] = data_pos
            if self.is_mirroring_enabled:
                self._eeprom[tbl_sector2_pos] = data_pos
            tbl_pos += 1
            tbl_sector2_pos += 1
            self._eeprom[tbl_pos] = length
            if self.is_mirroring_enabled:
                self._eeprom[tbl_sector2_pos] = length
            tbl_pos += 1
            tbl_sector2_pos += 1
            data_pos += length
        if self.is_mirroring_enabled:
            self._eeprom[s1_vstr_start:s1_vstr_start+len(stream)] = stream
        self._eeprom[dynpos:dynpos+len(stream)] = stream
        mtp = self._ftdi.device_version == 0x1000
        crc_pos = 0x100 if mtp else self._size
        rem = crc_pos - (dynpos + len(stream))
        if rem < 0:
            oversize = (-rem + 2) // 2
            raise FtdiEepromError(f'Cannot fit strings into EEPROM, '
                                  f'{oversize} oversize characters')
        if fill:
            self._eeprom[dynpos+len(stream):crc_pos] = bytes(rem)
            if self.is_mirroring_enabled:
                crc_s1_pos = self.mirror_sector
                self._eeprom[s1_vstr_start+len(stream):crc_s1_pos] = bytes(rem)

    def _sync_eeprom(self, no_crc: bool = False):
        if not self._dirty:
            self.log.debug('No change detected for EEPROM content')
            return
        if not no_crc:
            if any([x in self._dirty for x in self.VAR_STRINGS]):
                self._generate_var_strings()
                for varstr in self.VAR_STRINGS:
                    self._dirty.discard(varstr)
            self._update_crc()
            self._decode_eeprom()
        self._dirty.clear()
        self._modified = True
        self.log.debug('EEPROM content regenerated (not yet committed)')

    def _compute_crc(self, eeprom: Union[bytes, bytearray], check=False):
        mtp = self._ftdi.device_version == 0x1000
        crc_pos = 0x100 if mtp else len(eeprom)
        crc_size = scalc('<H')
        if not check:
            # check mode: add CRC itself, so that result should be zero
            crc_pos -= crc_size
        if self.is_mirroring_enabled:
            mirror_s1_crc_pos = self.mirror_sector
            if not check:
                mirror_s1_crc_pos -= crc_size
            # if mirroring, only calculate the crc for the first sector/half
            #   of the eeprom. Data (including this crc) are duplicated in
            #   the second sector/half
            crc = self._ftdi.calc_eeprom_checksum(eeprom[:mirror_s1_crc_pos])
        else:
            crc = self._ftdi.calc_eeprom_checksum(eeprom[:crc_pos])
        if check:
            self._valid = not bool(crc)
            if not self._valid:
                self.log.debug('CRC is now 0x%04x', crc)
            else:
                self.log.debug('CRC OK')
        return crc, crc_pos, crc_size

    def _update_crc(self):
        crc, crc_pos, crc_size = self._compute_crc(
            self._eeprom, False)
        self._eeprom[crc_pos:crc_pos+crc_size] = spack('<H', crc)
        if self.is_mirroring_enabled:
            # if mirroring calculate where the CRC will start in first sector
            crc_s1_start = self.mirror_sector - crc_size
            self._eeprom[crc_s1_start:crc_s1_start+crc_size] = spack('<H', crc)

    def _compute_size(self, 
            eeprom: Union[bytes, bytearray]) -> Tuple[int, bool]:
        """
            :return: Tuple of:
                - int of usable size of the eeprom
                - bool of whether eeprom mirroring was detected or not
        """
        if self._ftdi.is_eeprom_internal:
            return self._ftdi.max_eeprom_size, False
        if all([x == 0xFF for x in eeprom]):
            # erased EEPROM, size is unknown
            return self._ftdi.max_eeprom_size, False
        if eeprom[0:0x80] == eeprom[0x80:0x100]:
            return 0x80, True
        if eeprom[0:0x40] == eeprom[0x40:0x80]:
            return 0x40, True
        return 0x100, False

    def _read_eeprom(self) -> bytes:
        buf = self._ftdi.read_eeprom(0, eeprom_size=self.size)
        eeprom = bytearray(buf)
        size, mirror_detected = self._compute_size(eeprom)
        if size < len(eeprom):
            eeprom = eeprom[:size]
        crc = self._compute_crc(eeprom, True)[0]
        if crc:
            if self.is_empty:
                self.log.info('No EEPROM or EEPROM erased')
            else:
                self.log.error('Invalid CRC or EEPROM content')
        if not self.is_empty and mirror_detected:
            self.log.info("Detected a mirrored eeprom. " +
                "Enabling mirrored writing")
            self._mirror = True
        return eeprom

    def _decode_eeprom(self):
        cfg = self._config
        cfg.clear()
        chipoff = self._PROPERTIES[self.device_version].chipoff
        if chipoff is not None:
            self._chip = Hex2Int(self._eeprom[chipoff])
            cfg['chip'] = self._chip
        cfg['vendor_id'] = Hex4Int(sunpack('<H', self._eeprom[0x02:0x04])[0])
        cfg['product_id'] = Hex4Int(sunpack('<H', self._eeprom[0x04:0x06])[0])
        cfg['type'] = Hex4Int(sunpack('<H', self._eeprom[0x06:0x08])[0])
        power_supply, power_max, conf = sunpack('<3B', self._eeprom[0x08:0x0b])
        cfg['self_powered'] = bool(power_supply & (1 << 6))
        cfg['remote_wakeup'] = bool(power_supply & (1 << 5))
        cfg['power_max'] = power_max << 1
        cfg['has_serial'] = bool(conf & (1 << 3))
        cfg['suspend_pull_down'] = bool(conf & (1 << 2))
        cfg['out_isochronous'] = bool(conf & (1 << 1))
        cfg['in_isochronous'] = bool(conf & (1 << 0))
        cfg['manufacturer'] = self._decode_string(0x0e)
        cfg['product'] = self._decode_string(0x10)
        cfg['serial'] = self._decode_string(0x12)
        if self.device_version in (0x0400, 0x0500):
            cfg['use_usb_version'] = bool(conf & (1 << 3))
            if cfg['use_usb_version']:
                cfg['usb_version'] = \
                    Hex4Int(sunpack('<H', self._eeprom[0x0c:0x0e])[0])
        name = None
        try:
            name = Ftdi.DEVICE_NAMES[cfg['type']].replace('-', '')
            if name.startswith('ft'):
                name = name[2:]
            func = getattr(self, '_decode_%s' % name)
        except (KeyError, AttributeError):
            self.log.warning('No EEPROM decoder for device %s', name or '?')
        else:
            func()

    def _decode_string(self, offset):
        str_offset, str_size = sunpack('<BB', self._eeprom[offset:offset+2])
        if str_size:
            str_offset &= self.size - 1
            str_size -= scalc('<H')
            str_offset += scalc('<H')
            manufacturer = self._eeprom[str_offset:str_offset+str_size]
            return manufacturer.decode('utf16', errors='ignore')
        return ''

    def _set_cbus_func(self, cpin: int, value: str,
                       out: Optional[TextIO]) -> None:
        cmap = {0x600: (self.CBUS, 5, 0x14, 4),    # FT232R
                0x900: (self.CBUSH, 10, 0x18, 4),  # FT232H
                0x1000: (self.CBUSX, 4, 0x1A, 8)}  # FT230X/FT231X/FT234X
        try:
            cbus, count, offset, width = cmap[self.device_version]
        except KeyError as exc:
            raise ValueError('This property is not supported on this '
                             'device') from exc
        pin_filter = getattr(self,
                             '_filter_cbus_func_x%x' % self.device_version,
                             None)
        if value == '?' and out:
            items = {item.name for item in cbus}
            if pin_filter:
                items = {val for val in items if pin_filter(cpin, val)}
            print(', '.join(sorted(items)) if items else '(none)', file=out)
            return
        if not 0 <= cpin < count:
            raise ValueError("Unsupported CBUS pin '%d'" % cpin)
        try:
            code = cbus[value.upper()].value
        except KeyError as exc:
            raise ValueError("CBUS pin %d does not have function '%s'" %
                             (cpin, value)) from exc
        if pin_filter and not pin_filter(cpin, value.upper()):
            raise ValueError("Unsupported CBUS function '%s' for pin '%d'" %
                             (value, cpin))
        addr = offset + (cpin*width)//8
        if width == 4:
            bitoff = 4 if cpin & 0x1 else 0
            mask = 0x0F << bitoff
        else:
            bitoff = 0
            mask = 0xFF
        old = self._eeprom[addr]
        self._eeprom[addr] &= ~mask
        self._eeprom[addr] |= code << bitoff
        self.log.debug('Cpin %d, addr 0x%02x, value 0x%02x->0x%02x',
                       cpin, addr, old, self._eeprom[addr])

    @classmethod
    def _filter_cbus_func_x900(cls, cpin: int, value: str):
        if cpin == 7:
            # nothing can be assigned to ACBUS7
            return False
        if value in 'TRISTATE TXLED RXLED TXRXLED PWREN SLEEP DRIVE0'.split():
            # any pin can be assigned these functions
            return True
        if cpin in (5, 6, 8, 9):
            # any function can be assigned to ACBUS5, ACBUS6, ACBUS8, ACBUS9
            return True
        if cpin == 0:
            return value != 'GPIO'
        return False

    @classmethod
    def _filter_cbus_func_x600(cls, cpin: int, value: str):
        if value == 'BB_WR':
            # this signal is only available on CBUS0, CBUS1
            return cpin < 2
        return True

    def _set_bus_control(self, bus: str, control: str,
                         value: Union[str, int, bool],
                         out: Optional[TextIO]) -> None:
        if self.device_version == 0x1000:
            self._set_bus_control_230x(bus, control, value, out)
            return
        # for now, only support FT-X devices
        raise ValueError('Bus control not implemented for this device')

    def _set_group(self, group: int, control: str,
                   value: Union[str, int, bool], out: Optional[TextIO]) \
            -> None:
        if self.device_version in (0x0700, 0x0800, 0x0900):
            self._set_group_x232h(group, control, value, out)
            return
        raise ValueError('Group not implemented for this device')

    def _set_bus_control_230x(self, bus: str, control: str,
                              value: Union[str, int, bool],
                              out: Optional[TextIO]) -> None:
        if bus not in 'cd':
            raise ValueError('Invalid bus: %s' % bus)
        self._set_bus_xprop(0x0c, bus == 'c', control, value, out)

    def _set_group_x232h(self, group: int, control: str, value: str,
                         out: Optional[TextIO]) -> None:
        if self.device_version in (0x0700, 0x800):  # 2232H/4232H
            offset = 0x0c + group//2
            nibble = group & 1
        else:  # 232H
            offset = 0x0c + group
            nibble = 0
        self._set_bus_xprop(offset, nibble, control, value, out)

    def _set_bus_xprop(self, offset: int, high_nibble: bool, control: str,
                       value: Union[str, int, bool], out: Optional[TextIO]) \
            -> None:
        try:
            if control == 'drive':
                candidates = (4, 8, 12, 16)
                if value == '?' and out:
                    print(', '.join([str(v) for v in candidates]), file=out)
                    return
                value = int(value)
                if value not in candidates:
                    raise ValueError('Invalid drive current: %d mA' % value)
                value //= 4
                value -= 1
            elif control in ('slow_slew', 'schmitt'):
                if value == '?' and out:
                    print('off, on', file=out)
                    return
                value = int(to_bool(value))
            else:
                raise ValueError('Unsupported control: %s' % control)
        except (ValueError, TypeError) as exc:
            raise ValueError('Invalid %s value: %s' %
                             (control, value)) from exc
        config = self._eeprom[offset]
        if not high_nibble:
            conf = config & 0x0F
            config &= 0xF0
            cshift = 0
        else:
            conf = config >> 4
            config &= 0x0F
            cshift = 4
        if control == 'drive':
            conf &= 0b1100
            conf |= value
        elif control == 'slow_slew':
            conf &= 0b1011
            conf |= value << 2
        elif control == 'schmitt':
            conf &= 0b0111
            conf |= value << 3
        else:
            raise RuntimeError('Internal error')
        config |= conf << cshift
        self._eeprom[offset] = config

    def _set_invert(self, name, value, out):
        if value == '?' and out:
            print('off, on', file=out)
            return
        if name.upper() not in self.UART_BITS.__members__:
            raise ValueError('Unknown property: %s' % name)
        value = to_bool(value, permissive=False)
        code = getattr(self.UART_BITS, name.upper())
        invert = self._eeprom[0x0B]
        if value:
            invert |= code
        else:
            invert &= ~code
        self._eeprom[0x0B] = invert

    def _decode_x(self):
        # FT-X series
        cfg = self._config
        misc, = sunpack('<H', self._eeprom[0x00:0x02])
        cfg['channel_a_driver'] = 'VCP' if misc & (1 << 7) else 'D2XX'
        for bit in self.UART_BITS:
            value = self._eeprom[0x0B]
            cfg['invert_%s' % self.UART_BITS(bit).name] = bool(value & bit)
        max_drive = self.DRIVE.LOW.value | self.DRIVE.HIGH.value
        value = self._eeprom[0x0c]
        for grp in range(2):
            conf = value & 0xF
            bus = 'c' if grp else 'd'
            cfg['%sbus_drive' % bus] = 4 * (1+(conf & max_drive))
            cfg['%sbus_schmitt' % bus] = bool(conf & self.DRIVE.SCHMITT)
            cfg['%sbus_slow_slew' % bus] = bool(conf & self.DRIVE.SLOW_SLEW)
            value >>= 4
        for bix in range(4):
            value = self._eeprom[0x1A + bix]
            try:
                cfg['cbus_func_%d' % bix] = self.CBUSX(value).name
            except ValueError:
                pass

    def _decode_232h(self):
        cfg = self._config
        cfg0, cfg1 = self._eeprom[0x00], self._eeprom[0x01]
        cfg['channel_a_type'] = cfg0 & 0x0F
        cfg['channel_a_driver'] = 'VCP' if (cfg0 & (1 << 4)) else 'D2XX'
        cfg['clock_polarity'] = 'high' if (cfg1 & self.CFG1.CLK_IDLE_STATE) \
                                else 'low'
        cfg['lsb_data'] = bool(cfg1 & self.CFG1.DATA_LSB)
        cfg['flow_control'] = 'on' if (cfg1 & self.CFG1.FLOW_CONTROL) \
                              else 'off'
        cfg['powersave'] = bool(cfg1 & self.DRIVE.PWRSAVE_DIS)
        max_drive = self.DRIVE.LOW.value | self.DRIVE.HIGH.value
        for grp in range(2):
            conf = self._eeprom[0x0c+grp]
            cfg['group_%d_drive' % grp] = 4 * (1+(conf & max_drive))
            cfg['group_%d_schmitt' % grp] = \
                bool(conf & self.DRIVE.SCHMITT.value)
            cfg['group_%d_slow_slew' % grp] = \
                bool(conf & self.DRIVE.SLOW_SLEW.value)
        for bix in range(5):
            value = self._eeprom[0x18 + bix]
            low, high = value & 0x0F, value >> 4
            try:
                cfg['cbus_func_%d' % ((2*bix)+0)] = self.CBUSH(low).name
            except ValueError:
                pass
            try:
                cfg['cbus_func_%d' % ((2*bix)+1)] = self.CBUSH(high).name
            except ValueError:
                pass

    def _decode_232r(self):
        cfg = self._config
        cfg0 = self._eeprom[0x00]
        cfg['channel_a_driver'] = 'VCP' if (~cfg0 & (1 << 3)) else ''
        cfg['high_current'] = bool(~cfg0 & (1 << 2))
        cfg['external_oscillator'] = cfg0 & 0x02
        for bit in self.UART_BITS:
            value = self._eeprom[0x0B]
            cfg['invert_%s' % self.UART_BITS(bit).name] = bool(value & bit)
        bix = 0
        while True:
            value = self._eeprom[0x14 + bix]
            low, high = value & 0x0F, value >> 4
            try:
                cfg['cbus_func_%d' % ((2*bix)+0)] = self.CBUS(low).name
            except ValueError:
                pass
            if bix == 2:
                break
            try:
                cfg['cbus_func_%d' % ((2*bix)+1)] = self.CBUS(high).name
            except ValueError:
                pass
            bix += 1

    def _decode_2232h(self):
        cfg = self._config
        self._decode_x232h(cfg)
        cfg0, cfg1 = self._eeprom[0x00], self._eeprom[0x01]
        cfg['channel_a_type'] = self.CHANNEL(cfg0 & 0x7).name or 'UART'
        cfg['channel_b_type'] = self.CHANNEL(cfg1 & 0x7).name or 'UART'
        cfg['suspend_dbus7'] = bool(cfg1 & self.CFG1.SUSPEND_DBUS7.value)

    def _decode_4232h(self):
        cfg = self._config
        self._decode_x232h(cfg)
        cfg0, cfg1 = self._eeprom[0x00], self._eeprom[0x01]
        cfg['channel_c_driver'] = 'VCP' if ((cfg0 >> 4) & (1 << 3)) else 'D2XX'
        cfg['channel_d_driver'] = 'VCP' if ((cfg1 >> 4) & (1 << 3)) else 'D2XX'
        conf = self._eeprom[0x0B]
        rs485 = self.CHANNEL.RS485
        for chix in range(4):
            cfg['channel_%x_rs485' % (0xa+chix)] = bool(conf & (rs485 << chix))

    def _decode_x232h(self, cfg):
        # common code for 2232h and 4232h
        cfg0, cfg1 = self._eeprom[0x00], self._eeprom[0x01]
        cfg['channel_a_driver'] = 'VCP' if (cfg0 & (1 << 3)) else 'D2XX'
        cfg['channel_b_driver'] = 'VCP' if (cfg1 & (1 << 3)) else 'D2XX'
        max_drive = self.DRIVE.LOW.value | self.DRIVE.HIGH.value
        for bix in range(4):
            if not bix & 1:
                val = self._eeprom[0x0c + bix//2]
            else:
                val >>= 4
            cfg['group_%d_drive' % bix] = 4 * (1+(val & max_drive))
            cfg['group_%d_schmitt' % bix] = \
                bool(val & self.DRIVE.SCHMITT.value)
            cfg['group_%d_slow_slew' % bix] = \
                bool(val & self.DRIVE.SLOW_SLEW.value)
Esempio n. 54
0
class Reporter(Listener):
    name = "reporter"

    def __init__(self, logger, **kwargs):
        super().__init__(**kwargs)
        self._logger = logger
        self._logs = OrderedDict()
        self._reported = 0
        self._history = []

    def __enter__(self):
        _reporters.append(self)

    def __exit__(self, exc_type, exc_value, traceback):
        _reporters.pop()

    def report(self, values):
        for name, value in values.items():
            if "accuracy" in name:
                accuracy = self._logs.get(name, 0.0)
                if isinstance(value, (tuple, list)) and len(value) == 2:
                    if isinstance(accuracy, float):
                        accuracy = [0, 0]
                    accuracy[0] += value[0]
                    accuracy[1] += value[1]
                else:
                    accuracy += float(value)
                values[name] = accuracy
        self._logs.update(values)
        self._reported += 1

    def get_summary(self):
        summary = OrderedDict()
        for name, value in self._logs.items():
            if "accuracy" in name:
                if isinstance(value, list):
                    correct, total = value[:2]
                    if total == 0:
                        import numpy
                        accuracy = numpy.nan
                    else:
                        accuracy = correct / total
                else:
                    accuracy = value / self._reported
                summary[name] = accuracy
            else:
                summary[name] = value
        return summary

    def get_history(self):
        return self._history

    def on_train_begin(self, data):
        self._history = []

    def on_epoch_train_begin(self, data):
        self._logs.clear()
        self._reported = 0

    on_epoch_validate_begin = on_epoch_train_begin

    def on_epoch_train_end(self, data):
        self.report({'loss': data['loss']})
        summary = self.get_summary()
        self._output_log("training", summary, data)
        self._history.append({'training': summary, 'validation': None})

    def on_epoch_validate_end(self, data):
        self.report({'loss': data['loss']})
        summary = self.get_summary()
        self._output_log("validation", summary, data)
        self._history[-1]['validation'] = summary

    def _output_log(self, label, summary, data):
        message = "[{}] epoch {} - #samples: {}, loss: {:.8f}".format(
            label, data['epoch'], data['size'], summary['loss'])
        if 'accuracy' in summary:
            message += ", accuracy: {:.8f}".format(summary['accuracy'])
            v = self._logs.get('accuracy', None)
            if isinstance(v, list) and v[1] > 0:
                message += " ({}/{})".format(v[0], v[1])
        self._logger.info(message)
        message = []
        for name, value in summary.items():
            if name == 'loss' or name == 'accuracy':
                continue
            if isinstance(value, float):
                message.append("{}: {:.8f}".format(name, value))
            else:
                message.append("{}: {}".format(name, value))
            if 'accuracy' in name:
                v = self._logs.get(name, None)
                if isinstance(v, list) and v[1] > 0:
                    message[-1] += " ({}/{})".format(v[0], v[1])
        if message:
            self._logger.info(", ".join(message))
Esempio n. 55
0
class Vocab(object):
    def __init__(self,
                 special=[],
                 min_freq=0,
                 max_size=None,
                 lower_case=True,
                 delimiter=None,
                 vocab_file=None):
        self.counter = Counter()
        self.special = special
        self.min_freq = min_freq
        self.max_size = max_size
        self.lower_case = lower_case
        self.delimiter = delimiter
        self.vocab_file = vocab_file

    def tokenize(self, line, add_eos=False, add_double_eos=False):
        line = line.strip()
        # convert to lower case
        if self.lower_case:
            line = line.lower()

        # empty delimiter '' will evaluate False
        if self.delimiter == '':
            symbols = line
        else:
            symbols = line.split(self.delimiter)

        if add_double_eos:  # lm1b
            return ['<S>'] + symbols + ['<S>']
        elif add_eos:
            return symbols + ['<eos>']
        else:
            return symbols

    def count_file(self, path, verbose=False, add_eos=False):
        if verbose: print('counting file {} ...'.format(path))
        assert os.path.exists(path)

        sents = []
        with open(path, 'r', encoding='utf-8') as f:
            for idx, line in enumerate(f):
                if verbose and idx > 0 and idx % 500000 == 0:
                    print('    line {}'.format(idx))
                symbols = self.tokenize(line, add_eos=add_eos)
                self.counter.update(symbols)
                sents.append(symbols)

        return sents

    def count_sents(self, sents, verbose=False):
        """
            sents : a list of sentences, each a list of tokenized symbols
        """
        if verbose: print('counting {} sents ...'.format(len(sents)))
        for idx, symbols in enumerate(sents):
            if verbose and idx > 0 and idx % 500000 == 0:
                print('    line {}'.format(idx))
            self.counter.update(symbols)

    def _build_from_file(self, vocab_file):
        self.idx2sym = []
        self.sym2idx = OrderedDict()

        with open(vocab_file, 'r', encoding='utf-8') as f:
            for line in f:
                symb = line.strip().split()[0]
                self.add_symbol(symb)
        self.unk_idx = self.sym2idx['<UNK>']

    def build_vocab(self):
        if self.vocab_file:
            print('building vocab from {}'.format(self.vocab_file))
            self._build_from_file(self.vocab_file)
            print('final vocab size {}'.format(len(self)))
        else:
            print('building vocab with min_freq={}, max_size={}'.format(
                self.min_freq, self.max_size))
            self.idx2sym = []
            self.sym2idx = OrderedDict()

            for sym in self.special:
                self.add_special(sym)

            for sym, cnt in self.counter.most_common(self.max_size):
                if cnt < self.min_freq: break
                self.add_symbol(sym)

            print('final vocab size {} from {} unique tokens'.format(
                len(self), len(self.counter)))

    def encode_file(self,
                    path,
                    ordered=False,
                    verbose=False,
                    add_eos=True,
                    add_double_eos=False):
        if verbose: print('encoding file {} ...'.format(path))
        assert os.path.exists(path)
        encoded = []
        with open(path, 'r', encoding='utf-8') as f:
            for idx, line in enumerate(f):
                if verbose and idx > 0 and idx % 500000 == 0:
                    print('    line {}'.format(idx))
                symbols = self.tokenize(line,
                                        add_eos=add_eos,
                                        add_double_eos=add_double_eos)
                encoded.append(self.convert_to_tensor(symbols))

        if ordered:
            encoded = torch.cat(encoded)

        return encoded

    def encode_sents(self, sents, ordered=False, verbose=False):
        if verbose: print('encoding {} sents ...'.format(len(sents)))
        encoded = []
        for idx, symbols in enumerate(sents):
            if verbose and idx > 0 and idx % 500000 == 0:
                print('    line {}'.format(idx))
            encoded.append(self.convert_to_tensor(symbols))

        if ordered:
            encoded = torch.cat(encoded)

        return encoded

    def add_special(self, sym):
        if sym not in self.sym2idx:
            self.idx2sym.append(sym)
            self.sym2idx[sym] = len(self.idx2sym) - 1
            setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym])

    def add_symbol(self, sym):
        if sym not in self.sym2idx:
            self.idx2sym.append(sym)
            self.sym2idx[sym] = len(self.idx2sym) - 1

    def get_sym(self, idx):
        assert 0 <= idx < len(self), 'Index {} out of range'.format(idx)
        return self.idx2sym[idx]

    def get_idx(self, sym):
        if sym in self.sym2idx:
            return self.sym2idx[sym]
        else:
            # print('encounter unk {}'.format(sym))
            assert '<eos>' not in sym
            assert hasattr(self, 'unk_idx')
            return self.sym2idx.get(sym, self.unk_idx)

    def get_symbols(self, indices):
        return [self.get_sym(idx) for idx in indices]

    def get_indices(self, symbols):
        return [self.get_idx(sym) for sym in symbols]

    def convert_to_tensor(self, symbols):
        return torch.LongTensor(self.get_indices(symbols))

    def convert_to_sent(self, indices, exclude=None):
        if exclude is None:
            return ' '.join([self.get_sym(idx) for idx in indices])
        else:
            return ' '.join(
                [self.get_sym(idx) for idx in indices if idx not in exclude])

    def __len__(self):
        return len(self.idx2sym)
Esempio n. 56
0
class Dictionary():
    def __init__(self):
        self.__dictionary = OrderedDict()

    def clear(self):
        self.__dictionary.clear()

    def get_json(self):
        return json.dumps(self.__dictionary)

    def set_item(self, key, value):
        self.__dictionary.__setitem__(key, value)
        return self

    def values(self):
        return [value for value in self.__dictionary.values().__iter__()]

    def items(self):
        return self.__dictionary.items()

    def copy(self):
        self.__dictionary

    def constain(self, key):
        return key in self.get_list_of_key()

    def get(self, key):
        return self.__dictionary.get(key)

    def clear(self, key, default):
        self.__dictionary.pop(key, default)

    def format(self, *args, **kwargs):
        return self.__dictionary.__format__(*args, **kwargs)

    def ne(self, *args, **kwargs):
        return self.__dictionary.__ne__(*args, **kwargs)

    def repr(self, *args, **kwargs):
        return self.__dictionary.__repr__(*args, **kwargs)

    def ge(self, *args, **kwargs):
        return self.dictionary__ge__(*args, **kwargs)

    def __sizeof__(self):
        return self.__dictionary.__sizeof__()

    def setattr(self, *args, **kwargs):
        return self.__dictionary.__setattr__(*args, **kwargs)

    def dir(self):
        return self.__dictionary.__dir__()

    def le(self, *args, **kwargs):
        return self.__dictionary.__le__(*args, **kwargs)

    def delattr(self, *args, **kwargs):
        return self.__dictionary.__delattr__(*args, **kwargs)

    def hash(self, *args, **kwargs):
        return self.__dictionary.__hash__(*args, **kwargs)

    def gt(self, *args, **kwargs):
        return self.__dictionary.__gt__(*args, **kwargs)

    def eq(self, *args, **kwargs):
        return self.__dictionary.__eq__(*args, **kwargs)

    def getattribute(self, *args, **kwargs):
        return self.__dictionary.__getattribute__(*args, **kwargs)

    def str(self, *args, **kwargs):
        return self.__dictionary.__str__(*args, **kwargs)

    def reduce(self, *args, **kwargs):
        return self.__dictionary.__reduce__(*args, **kwargs)

    def reduce_ex(self, *args, **kwargs):
        return self.__dictionary.__reduce_ex__(*args, **kwargs)

    def lt(self, *args, **kwargs):
        return self.__dictionary.__lt__(*args, **kwargs)

    def keys(self):
        return self.get_list_of_key()

    def get_list_of_key(self):
        return [key for key in self.__dictionary.keys().__iter__()]
Esempio n. 57
0
    def _procure_orderpoint_confirm(self,
                                    use_new_cursor=False,
                                    company_id=False):
        """ Create procurements based on orderpoints.
        :param bool use_new_cursor: if set, use a dedicated cursor and auto-commit after processing
            1000 orderpoints.
            This is appropriate for batch jobs only.
        """
        if company_id and self.env.user.company_id.id != company_id:
            # To ensure that the company_id is taken into account for
            # all the processes triggered by this method
            # i.e. If a PO is generated by the run of the procurements the
            # sequence to use is the one for the specified company not the
            # one of the user's company
            self = self.with_context(company_id=company_id,
                                     force_company=company_id)
        OrderPoint = self.env['stock.warehouse.orderpoint']
        domain = self._get_orderpoint_domain(company_id=company_id)
        orderpoints_noprefetch = OrderPoint.with_context(
            prefetch_fields=False).search(
                domain,
                order=self._procurement_from_orderpoint_get_order()).ids
        while orderpoints_noprefetch:
            if use_new_cursor:
                cr = registry(self._cr.dbname).cursor()
                self = self.with_env(self.env(cr=cr))
            OrderPoint = self.env['stock.warehouse.orderpoint']

            orderpoints = OrderPoint.browse(orderpoints_noprefetch[:1000])
            orderpoints_noprefetch = orderpoints_noprefetch[1000:]

            # Calculate groups that can be executed together
            location_data = OrderedDict()

            def makedefault():
                return {
                    'products': self.env['product.product'],
                    'orderpoints': self.env['stock.warehouse.orderpoint'],
                    'groups': []
                }

            for orderpoint in orderpoints:
                key = self._procurement_from_orderpoint_get_grouping_key(
                    [orderpoint.id])
                if not location_data.get(key):
                    location_data[key] = makedefault()
                location_data[key]['products'] += orderpoint.product_id
                location_data[key]['orderpoints'] += orderpoint
                location_data[key][
                    'groups'] = self._procurement_from_orderpoint_get_groups(
                        [orderpoint.id])

            for location_id, location_data in location_data.items():
                location_orderpoints = location_data['orderpoints']
                product_context = dict(
                    self._context,
                    location=location_orderpoints[0].location_id.id)
                substract_quantity = location_orderpoints._quantity_in_progress(
                )

                for group in location_data['groups']:
                    if group.get('from_date'):
                        product_context['from_date'] = group[
                            'from_date'].strftime(
                                DEFAULT_SERVER_DATETIME_FORMAT)
                    if group['to_date']:
                        product_context['to_date'] = group['to_date'].strftime(
                            DEFAULT_SERVER_DATETIME_FORMAT)
                    product_quantity = location_data['products'].with_context(
                        product_context)._product_available()
                    for orderpoint in location_orderpoints:
                        try:
                            op_product_virtual = product_quantity[
                                orderpoint.product_id.id]['virtual_available']
                            if op_product_virtual is None:
                                continue
                            if float_compare(op_product_virtual,
                                             orderpoint.product_min_qty,
                                             precision_rounding=orderpoint.
                                             product_uom.rounding) <= 0:
                                qty = max(orderpoint.product_min_qty,
                                          orderpoint.product_max_qty
                                          ) - op_product_virtual
                                remainder = orderpoint.qty_multiple > 0 and qty % orderpoint.qty_multiple or 0.0

                                if float_compare(remainder,
                                                 0.0,
                                                 precision_rounding=orderpoint.
                                                 product_uom.rounding) > 0:
                                    qty += orderpoint.qty_multiple - remainder

                                if float_compare(qty,
                                                 0.0,
                                                 precision_rounding=orderpoint.
                                                 product_uom.rounding) < 0:
                                    continue

                                qty -= substract_quantity[orderpoint.id]
                                qty_rounded = float_round(
                                    qty,
                                    precision_rounding=orderpoint.product_uom.
                                    rounding)
                                if qty_rounded > 0:
                                    values = orderpoint._prepare_procurement_values(
                                        qty_rounded,
                                        **group['procurement_values'])
                                    try:
                                        with self._cr.savepoint():
                                            self.env['procurement.group'].run(
                                                orderpoint.product_id,
                                                qty_rounded,
                                                orderpoint.product_uom,
                                                orderpoint.location_id,
                                                orderpoint.name,
                                                orderpoint.name, values)
                                    except UserError as error:
                                        self.env[
                                            'stock.rule']._log_next_activity(
                                                orderpoint.product_id,
                                                error.name)
                                    self._procurement_from_orderpoint_post_process(
                                        [orderpoint.id])
                                if use_new_cursor:
                                    cr.commit()

                        except OperationalError:
                            if use_new_cursor:
                                orderpoints_noprefetch += [orderpoint.id]
                                cr.rollback()
                                continue
                            else:
                                raise

            try:
                if use_new_cursor:
                    cr.commit()
            except OperationalError:
                if use_new_cursor:
                    cr.rollback()
                    continue
                else:
                    raise

            if use_new_cursor:
                cr.commit()
                cr.close()

        return {}
Esempio n. 58
0
class AllureReporter(object):
    def __init__(self):
        self._items = OrderedDict()
        self._orphan_items = []

    def _update_item(self, uuid, **kwargs):
        item = self._items[uuid] if uuid else self._items[next(
            reversed(self._items))]
        for name, value in kwargs.items():
            attr = getattr(item, name)
            if isinstance(attr, list):
                attr.append(value)
            else:
                setattr(item, name, value)

    def _last_executable(self):
        for _uuid in reversed(self._items):
            if isinstance(self._items[_uuid], ExecutableItem):
                return _uuid

    def get_item(self, uuid):
        return self._items.get(uuid)

    def get_last_item(self, item_type=None):
        for _uuid in reversed(self._items):
            if item_type is None:
                return self._items.get(_uuid)
            if type(self._items[_uuid]) == item_type:
                return self._items.get(_uuid)

    def start_group(self, uuid, group):
        self._items[uuid] = group

    def stop_group(self, uuid, **kwargs):
        self._update_item(uuid, **kwargs)
        group = self._items.pop(uuid)
        plugin_manager.hook.report_container(container=group)

    def update_group(self, uuid, **kwargs):
        self._update_item(uuid, **kwargs)

    def start_before_fixture(self, parent_uuid, uuid, fixture):
        self._items.get(parent_uuid).befores.append(fixture)
        self._items[uuid] = fixture

    def stop_before_fixture(self, uuid, **kwargs):
        self._update_item(uuid, **kwargs)
        self._items.pop(uuid)

    def start_after_fixture(self, parent_uuid, uuid, fixture):
        try:
            self._items.get(parent_uuid).afters.append(fixture)
        except AttributeError:
            pass
        self._items[uuid] = fixture

    def stop_after_fixture(self, uuid, **kwargs):
        self._update_item(uuid, **kwargs)
        fixture = self._items.pop(uuid)
        fixture.stop = now()

    def schedule_test(self, uuid, test_case):
        self._items[uuid] = test_case

    def get_test(self, uuid):
        return self.get_item(uuid) if uuid else self.get_last_item(TestResult)

    def close_test(self, uuid):
        test_case = self._items.pop(uuid)
        plugin_manager.hook.report_result(result=test_case)

    def drop_test(self, uuid):
        self._items.pop(uuid)

    def start_step(self, parent_uuid, uuid, step):
        parent_uuid = parent_uuid if parent_uuid else self._last_executable()
        if parent_uuid is None:
            self._orphan_items.append(uuid)
        else:
            self._items[parent_uuid].steps.append(step)
            self._items[uuid] = step

    def stop_step(self, uuid, **kwargs):
        if uuid in self._orphan_items:
            self._orphan_items.remove(uuid)
        else:
            self._update_item(uuid, **kwargs)
            self._items.pop(uuid)

    def _attach(self, uuid, name=None, attachment_type=None, extension=None):
        mime_type = attachment_type
        extension = extension if extension else 'attach'

        if type(attachment_type) is AttachmentType:
            extension = attachment_type.extension
            mime_type = attachment_type.mime_type

        file_name = ATTACHMENT_PATTERN.format(prefix=uuid, ext=extension)
        attachment = Attachment(source=file_name, name=name, type=mime_type)
        last_uuid = self._last_executable()
        self._items[last_uuid].attachments.append(attachment)

        return file_name

    def attach_file(self,
                    uuid,
                    source,
                    name=None,
                    attachment_type=None,
                    extension=None):
        file_name = self._attach(uuid,
                                 name=name,
                                 attachment_type=attachment_type,
                                 extension=extension)
        plugin_manager.hook.report_attached_file(source=source,
                                                 file_name=file_name)

    def attach_data(self,
                    uuid,
                    body,
                    name=None,
                    attachment_type=None,
                    extension=None):
        file_name = self._attach(uuid,
                                 name=name,
                                 attachment_type=attachment_type,
                                 extension=extension)
        plugin_manager.hook.report_attached_data(body=body,
                                                 file_name=file_name)
Esempio n. 59
0
class Page:
    "Banana banana"
    meta_schema = {
        Optional('title'): And(str, len),
        Optional('symbols'): Schema([And(str, len)]),
        Optional('private-symbols'): Schema([And(str, len)]),
        Optional('short-description'): And(str, len),
        Optional('description'): And(str, len),
        Optional('render-subpages'): bool,
        Optional('auto-sort'): bool,
        Optional('full-width'): bool,
        Optional('see-also'): And(str, len),
        Optional('extra'): Schema({str: object}),
        Optional('thumbnail'): And(str, len)
    }

    # pylint: disable=too-many-arguments
    def __init__(self,
                 source_file,
                 ast,
                 output_path,
                 project_name,
                 meta=None,
                 raw_contents=None):
        "Banana banana"
        assert source_file
        basename = os.path.basename(source_file)
        name = os.path.splitext(basename)[0]
        ref = os.path.join(output_path,
                           re.sub(r'\W+', '-',
                                  os.path.splitext(basename)[0]))
        pagename = '%s.html' % ref

        self.ast = ast
        self.extension_name = None
        self.source_file = source_file
        self.raw_contents = raw_contents
        self.comment = None
        self.generated = False
        self.pre_sorted = False
        self.output_attrs = None
        self.subpages = OrderedSet()
        self.symbols = []
        self.private_symbols = []
        self.typed_symbols = OrderedDict()
        self.by_parent_symbols = OrderedDict()
        self.is_stale = True
        self.formatted_contents = None
        self.detailed_description = None
        self.build_path = None
        self.project_name = project_name
        self.cached_paths = OrderedSet()

        meta = meta or {}
        self.listed_symbols = []
        self.symbol_names = []
        self.short_description = None
        self.render_subpages = True
        self.title = ''
        self.meta = Schema(Page.meta_schema).validate({})
        self.__update_meta(meta)
        self.__discover_title(meta)
        self.link = Link(pagename, self.title or name, ref)

    def __update_meta(self, meta):
        for key, value in meta.items():
            try:
                self.meta.update(
                    Schema(Page.meta_schema).validate(
                        {key.replace('_', '-').lower(): value}))
            except SchemaError as _:
                warn(
                    'invalid-page-metadata',
                    '%s: Invalid metadata: \n%s, discarding metadata' %
                    (self.source_file, str(_)))

        if not self.meta.get('extra'):
            self.meta['extra'] = defaultdict()

        self.title = meta.get('title', self.title)
        self.thumbnail = meta.get('thumbnail')
        self.listed_symbols = OrderedSet(
            meta.get('symbols') or self.symbol_names)
        self.private_symbols = OrderedSet(
            meta.get('private-symbols') or self.private_symbols)
        self.symbol_names = OrderedSet(
            meta.get('symbols') or self.symbol_names)
        self.short_description = meta.get('short-description',
                                          self.short_description)
        self.render_subpages = meta.get('render-subpages',
                                        self.render_subpages)

    def __getstate__(self):
        return {
            'ast': None,
            'build_path': None,
            'title': self.title,
            'raw_contents': self.raw_contents,
            'short_description': self.short_description,
            'extension_name': self.extension_name,
            'link': self.link,
            'meta': self.meta,
            'source_file': self.source_file,
            'comment': self.comment,
            'generated': self.generated,
            'is_stale': False,
            'formatted_contents': None,
            'detailed_description': None,
            'output_attrs': None,
            'symbols': [],
            'private_symbols': {},
            'typed_symbols': {},
            'by_parent_symbols': {},
            'subpages': self.subpages,
            'listed_symbols': self.listed_symbols,
            'symbol_names': self.symbol_names,
            'project_name': self.project_name,
            'pre_sorted': self.pre_sorted,
            'cached_paths': self.cached_paths,
            'render_subpages': self.render_subpages
        }

    def __repr__(self):
        return "<Page %s>" % self.source_file

    @staticmethod
    def __get_empty_typed_symbols():
        typed_symbols_list = namedtuple('TypedSymbolsList',
                                        ['name', 'symbols'])
        empty_typed_symbols = {}

        for subclass in all_subclasses(Symbol):
            empty_typed_symbols[subclass] = typed_symbols_list(
                subclass.get_plural_name(), [])

        return empty_typed_symbols

    def set_comment(self, comment):
        """
        Sets @comment as main comment for @self.
        """
        if comment:
            self.__update_meta(comment.meta)

        self.comment = comment

    def resolve_symbols(self, tree, database, link_resolver):
        """
        When this method is called, the page's symbol names are queried
        from `database`, and added to lists of actual symbols, sorted
        by symbol class.
        """
        self.typed_symbols = self.__get_empty_typed_symbols()
        all_syms = OrderedSet()
        for sym_name in self.symbol_names:
            sym = database.get_symbol(sym_name)
            self.__query_extra_symbols(sym, all_syms, tree, link_resolver,
                                       database)

        if tree.project.is_toplevel:
            page_path = self.link.ref
        else:
            page_path = self.project_name + '/' + self.link.ref

        if self.meta.get("auto-sort", True):
            all_syms = sorted(all_syms, key=lambda x: x.unique_name)
        for sym in all_syms:
            sym.update_children_comments()
            self.__resolve_symbol(sym, link_resolver, page_path)
            self.symbol_names.add(sym.unique_name)

        # Always put symbols with no parent at the end
        no_parent_syms = self.by_parent_symbols.pop(None, None)
        if no_parent_syms:
            self.by_parent_symbols[None] = no_parent_syms

        for sym_type in [
                ClassSymbol, AliasSymbol, InterfaceSymbol, StructSymbol
        ]:
            syms = self.typed_symbols[sym_type].symbols

            if not syms:
                continue

            if self.title is None:
                self.title = syms[0].display_name
            if self.comment is None:
                self.comment = Comment(name=self.source_file)
                self.comment.short_description = syms[
                    0].comment.short_description
                self.comment.title = syms[0].comment.title
            break

    # pylint: disable=no-self-use
    def __fetch_comment(self, sym, database):
        old_comment = sym.comment
        new_comment = database.get_comment(sym.unique_name)
        sym.comment = Comment(sym.unique_name)

        if new_comment:
            sym.comment = new_comment
        elif old_comment:
            if old_comment.filename not in ChangeTracker.all_stale_files:
                sym.comment = old_comment

    def __format_page_comment(self, formatter, link_resolver):
        if not self.comment:
            return

        if self.comment.short_description:
            self.short_description = formatter.format_comment(
                self.comment.short_description, link_resolver).strip()
            if self.short_description.startswith('<p>'):
                self.short_description = self.short_description[3:-4]
        if self.comment.title:
            self.title = formatter.format_comment(self.comment.title,
                                                  link_resolver).strip()
            if self.title.startswith('<p>'):
                self.title = self.title[3:-4]

        if self.title:
            self.formatted_contents += '<h1 id="%s-page">%s</h1>' % (
                id_from_text(self.title), self.title)

        self.formatted_contents += formatter.format_comment(
            self.comment, link_resolver)

    def format(self, formatter, link_resolver, output):
        """
        Banana banana
        """

        if not self.title and self.source_file:
            title = os.path.splitext(self.source_file)[0]
            self.title = os.path.basename(title).replace('-', ' ')

        self.formatted_contents = u''

        self.build_path = os.path.join(formatter.get_output_folder(self),
                                       self.link.ref)

        if self.ast:
            out, diags = cmark.ast_to_html(self.ast, link_resolver)
            for diag in diags:
                warn(diag.code,
                     message=diag.message,
                     filename=self.source_file)

            self.formatted_contents += out

        if not self.formatted_contents:
            self.__format_page_comment(formatter, link_resolver)

        self.output_attrs = defaultdict(lambda: defaultdict(dict))
        formatter.prepare_page_attributes(self)
        self.__format_symbols(formatter, link_resolver)
        self.detailed_description =\
            formatter.format_page(self)[0]

        if output:
            formatter.cache_page(self)

    # pylint: disable=no-self-use
    def get_title(self):
        """
        Banana banana
        """
        return self.title or 'unnamed'

    def __discover_title(self, meta):
        if meta is not None and 'title' in meta:
            self.title = meta['title']
        elif self.ast:
            self.title = cmark.title_from_ast(self.ast)

    def __format_symbols(self, formatter, link_resolver):
        for symbol in self.symbols:
            if symbol is None:
                continue
            debug(
                'Formatting symbol %s in page %s' %
                (symbol.unique_name, self.source_file), 'formatting')
            symbol.detailed_description = formatter.format_symbol(
                symbol, link_resolver)

    def __query_extra_symbols(self, sym, all_syms, tree, link_resolver,
                              database):
        if sym:
            self.__fetch_comment(sym, database)
            new_symbols = sum(tree.resolving_symbol_signal(self, sym), [])
            all_syms.add(sym)

            for symbol in new_symbols:
                self.__query_extra_symbols(symbol, all_syms, tree,
                                           link_resolver, database)

    def __resolve_symbol(self, symbol, link_resolver, page_path):
        symbol.resolve_links(link_resolver)

        symbol.link.ref = "%s#%s" % (page_path, symbol.unique_name)

        for link in symbol.get_extra_links():
            link.ref = "%s#%s" % (page_path, link.id_)

        tsl = self.typed_symbols.get(type(symbol))
        if tsl:
            tsl.symbols.append(symbol)

            by_parent_symbols = self.by_parent_symbols.get(symbol.parent_name)
            if not by_parent_symbols:
                by_parent_symbols = self.__get_empty_typed_symbols()
                parent_name = symbol.parent_name
                if parent_name is None:
                    parent_name = 'Others symbols'
                self.by_parent_symbols[symbol.parent_name] = by_parent_symbols
            by_parent_symbols.get(type(symbol)).symbols.append(symbol)

        self.symbols.append(symbol)

        debug(
            'Resolved symbol %s to page %s' %
            (symbol.display_name, self.link.ref), 'resolution')
Esempio n. 60
0
class WineRegistry:
    version_header = "WINE REGISTRY Version "
    relative_to_header = ";; All keys relative to "

    def __init__(self, reg_filename=None):
        self.arch = WINE_DEFAULT_ARCH
        self.version = 2
        self.relative_to = "\\\\User\\\\S-1-5-21-0-0-0-1000"
        self.keys = OrderedDict()
        self.reg_filename = reg_filename
        if reg_filename:
            if not system.path_exists(reg_filename):
                logger.error("Unexisting registry %s", reg_filename)
            self.parse_reg_file(reg_filename)

    @property
    def prefix_path(self):
        """Return the Wine prefix path (where the .reg files are located)"""
        if self.reg_filename:
            return os.path.dirname(self.reg_filename)

    @staticmethod
    def get_raw_registry(reg_filename):
        """Return an array of the unprocessed contents of a registry file"""
        if not system.path_exists(reg_filename):
            return []
        with open(reg_filename, "r") as reg_file:

            try:
                registry_content = reg_file.readlines()
            except Exception:  # pylint: disable=broad-except
                logger.exception(
                    "Failed to registry read %s, please send attach this file in a bug report",
                    reg_filename)
                registry_content = []
        return registry_content

    def parse_reg_file(self, reg_filename):
        registry_lines = self.get_raw_registry(reg_filename)
        current_key = None
        add_next_to_value = False
        additional_values = []
        for line in registry_lines:
            line = line.rstrip("\n")

            if line.startswith("["):
                current_key = WineRegistryKey(key_def=line)
                self.keys[current_key.name] = current_key
            elif current_key:
                if add_next_to_value:
                    additional_values.append(line)
                elif not add_next_to_value:
                    if additional_values:
                        additional_values = '\n'.join(additional_values)
                        current_key.add_to_last(additional_values)
                        additional_values = []
                    current_key.parse(line)
                add_next_to_value = line.endswith("\\")
            elif line.startswith(self.version_header):
                self.version = int(line[len(self.version_header):])
            elif line.startswith(self.relative_to_header):
                self.relative_to = line[len(self.relative_to_header):]
            elif line.startswith("#arch"):
                self.arch = line.split("=")[1]

    def render(self):
        content = "{}{}\n".format(self.version_header, self.version)
        content += "{}{}\n\n".format(self.relative_to_header, self.relative_to)
        content += "#arch={}\n".format(self.arch)
        for key in self.keys:
            content += "\n"
            content += self.keys[key].render()
        return content

    def save(self, path=None):
        """Write the registry to a file"""
        if not path:
            path = self.reg_filename
        if not path:
            raise OSError("No filename provided")
        prefix_path = os.path.dirname(path)
        if not os.path.isdir(prefix_path):
            raise OSError("Invalid Wine prefix path %s, make sure to "
                          "create the prefix before saving to a registry" %
                          prefix_path)
        with open(path, "w") as registry_file:
            registry_file.write(self.render())

    def query(self, path, subkey):
        key = self.keys.get(path)
        if key:
            return key.get_subkey(subkey)

    def set_value(self, path, subkey, value):
        key = self.keys.get(path)
        if not key:
            key = WineRegistryKey(path=path)
            self.keys[key.name] = key
        key.set_subkey(subkey, value)

    def clear_key(self, path):
        """Removes all subkeys from a key"""
        key = self.keys.get(path)
        if not key:
            return
        key.subkeys.clear()

    def clear_subkeys(self, path, keys):
        """Remove some subkeys from a key"""
        key = self.keys.get(path)
        if not key:
            return
        for subkey in list(key.subkeys.keys()):
            if subkey not in keys:
                continue
            key.subkeys.pop(subkey)

    def get_unix_path(self, windows_path):
        windows_path = windows_path.replace("\\", "/")
        if not self.prefix_path:
            return
        drives_path = os.path.join(self.prefix_path, "dosdevices")
        if not system.path_exists(drives_path):
            return
        letter, relpath = windows_path.split(":", 1)
        relpath = relpath.strip("/")
        drive_link = os.path.join(drives_path, letter.lower() + ":")
        try:
            drive_path = os.readlink(drive_link)
        except FileNotFoundError:
            logger.error("Unable to read link for %s", drive_link)
            return

        if not os.path.isabs(drive_path):
            drive_path = os.path.join(drives_path, drive_path)
        return os.path.join(drive_path, relpath)