示例#1
0
def saveAsDot(automata, name, display):
    file = open(name, 'w+')
    intialState = ''
    for i in automata.q0:
        intialState = intialState + i
    data = "digraph \"" + display + "\" {\n\t_nil [style=\"invis\"];\n\t_nil -> " + intialState + " [label=\"\"];\n"
    finalStates = ''
    for final in automata.F:
        for s in final:
            finalStates = finalStates + s
        data = data + "\t" + finalStates + " [peripheries=2];\n"
    transitions = ""
    for state in sorted(automata.delta.keys()):
        nameState = ''
        for s in state:
            nameState = nameState + s
        targetSimbols = sorted(automata.delta[state].keys())
        targets = automata.delta[state]
        for t in targets:
            transitions = transitions + '\t' + nameState + ' -> '
            targetName = ''
            for a in targets[t]:
                targetName = targetName + a
            transitions = transitions + targetName + ' [label=' + t + '];\n'
    data = data + transitions + '}'
    file.write(data)
示例#2
0
文件: flex.py 项目: kek-pf-mx/dials
    def sort(self, name, reverse=False, order=None):
        '''
    Sort the reflection table by a key.

    :param name: The name of the column
    :param reverse: Reverse the sort order
    :param order: For multi element items specify order

    '''
        import __builtin__
        if type(self[name]) in [
                vec2_double, vec3_double, mat3_double, int6, miller_index
        ]:
            data = self[name]
            if order is None:
                perm = flex.size_t(
                    __builtin__.sorted(range(len(self)),
                                       key=lambda x: data[x],
                                       reverse=reverse))
            else:
                assert len(order) == len(data[0])

                def compare(x, y):
                    a = tuple(x[i] for i in order)
                    b = tuple(y[i] for i in order)
                    return cmp(a, b)

                perm = flex.size_t(
                    __builtin__.sorted(range(len(self)),
                                       key=lambda x: data[x],
                                       cmp=compare,
                                       reverse=reverse))
        else:
            perm = flex.sort_permutation(self[name], reverse=reverse)
        self.reorder(perm)
def saveAsDot(automata, name, display):
    file = open(name,'w+')
    intialState = ''
    for i in automata.q0:
        intialState = intialState + i
    data = "digraph \"" + display + "\" {\n\t_nil [style=\"invis\"];\n\t_nil -> " + intialState + " [label=\"\"];\n"
    finalStates = ''
    for final in automata.F:
        for s in final:
            finalStates = finalStates + s
        data = data + "\t" + finalStates + " [peripheries=2];\n"
    transitions = ""
    for state in sorted(automata.delta.keys()):
        nameState = ''
        for s in state:
            nameState = nameState + s
        targetSimbols = sorted(automata.delta[state].keys())
        targets = automata.delta[state]
        for t in targets:
            transitions = transitions + '\t' + nameState + ' -> '
            targetName = ''
            for a in targets[t]:
                targetName = targetName + a
            transitions = transitions + targetName + ' [label=' + t + '];\n'
    data = data + transitions + '}'
    file.write(data)
示例#4
0
    def _sorted(self, query, order):
        '''プログラムでデータをソートする'''

        if not order: return

        m = re.match(r'(-?)([a-zA-Z_0-9]+)', order)
        desc = m.group(1) == '-'
        prop_name = m.group(2)

        prop = getattr(self.model_type, prop_name)

        if desc:
            return __builtin__.sorted(query, key=lambda x: prop.__get__(x, self.model_type), reverse=True)
        else:
            return __builtin__.sorted(query, key=lambda x: prop.__get__(x, self.model_type))
示例#5
0
def satisfied_latest(range, versions):
    versions = __builtin__.sorted(versions)
    versions.reverse()
    for v in versions:
        if (satisfied(range, v)):
            return v
    return None
示例#6
0
def satisfied_latest(range, versions):
    versions = __builtin__.sorted(versions)
    versions.reverse()
    for v in versions:
        if (satisfied(range, v)):
            return v
    return None
示例#7
0
    def __init__(self,
                 dao,
                 windows,
                 main_pages,
                 vertex_in_question,
                 real_pages,
                 vertex=None,
                 merged=True):
        super(RasterPage, self).__init__(dao, windows, main_pages, real_pages)
        self.is_merged_version = merged
        #holds all the vertexes being recorded for spikes
        if vertex_in_question is None:
            self.vertex_in_question = list()
            self.vertex_in_question.append(vertex)
        else:
            self.vertex_in_question = sorted(vertex_in_question,
                                             key=lambda vertex: vertex.label)

        #creates a collection of offsets for y plot
        self.off_sets = list()
        current_off_set = 0

        self.x_axis_scope = config.get("Visualiser", "raster_plot_x_scope")
        if self.x_axis_scope == "None":
            self.x_axis_scope = dao.run_time
        if self.x_axis_scope is None:
            self.x_axis_scope = 2000.0

        self.do_fading = config.getboolean("Visualiser",
                                           "raster_plot_do_fading")
        self.data_stores = []

        for vertex in self.vertex_in_question:
            label = str(vertex.label)
            self.data_stores.append(label)
            tuple_data = [(-1, -1), (-1, -1)]
            self.data_stores.append(tuple_data)
            self.off_sets.append(current_off_set)
            current_off_set += vertex.atoms + 15

        #records the maxiumum neuron value
        self.max_y_value = current_off_set

        #set name of page
        self.page = gtk.Frame("raster plot")

        self.figure = None
        self.plot = ScatterplotChart()
        self.axis = None
        self.canvas = None
        self.graphview = None
        if self.is_merged_version:
            label = "merged raster page"
        else:
            label = "Raster page of {}".format(vertex.label)
        main_pages.append_page(self.page, gtk.Label(label))
        #generate plot
        self.generate_plot(0, True)
示例#8
0
def get_npmjs_time_of(name):
    url = '/'.join([NPMJS.rstrip('/'), name.lstrip('/')])
    try:
        with contextlib.closing(urllib.urlopen(url)) as f:
            data = json.loads(f.read())
        # print(json.dumps(data, indent=3, sort_keys=True))
        return __builtin__.sorted(data['time'].keys())[:-2]
    except __builtin__.ValueError as e:
        print('\n open and parse %s.\n error %s\n' % url, e, file=sys.stderr)
        sys.exit(1)
示例#9
0
def get_npmjs_time_of(name):
    url = '/'.join([NPMJS.rstrip('/'), name.lstrip('/')])
    try:
        with contextlib.closing(urllib.urlopen(url)) as f:
            data = json.loads(f.read())
        # print(json.dumps(data, indent=3, sort_keys=True))
        return __builtin__.sorted(data['time'].keys())[:-2]
    except __builtin__.ValueError as e:
        print('\n open and parse %s.\n error %s\n' % url, e, file=sys.stderr)
        sys.exit(1)
示例#10
0
文件: online.py 项目: nxi/gumtree
 def SortAngles(self):
     info = sorted(enumerate(self.Angle), key=lambda item:item[1])
     
     self.Angle     = [item[1]                 for item in info]
     self.DetCtr    = [self.DetCtr    [item[0]] for item in info]
     self.ErrDetCtr = [self.ErrDetCtr [item[0]] for item in info]
     self.MonCtr    = [self.MonCtr    [item[0]] for item in info]
     self.TransCtr  = [self.TransCtr  [item[0]] for item in info]
     self.CountTimes= [self.CountTimes[item[0]] for item in info]
     self.Bex       = [self.Bex       [item[0]] for item in info]
     self.TimeStamp = [self.TimeStamp [item[0]] for item in info]
     self.ActualTime= [self.ActualTime[item[0]] for item in info]
    def __init__(self, dao, windows, main_pages, vertex_in_question, real_pages,
                 vertex=None, merged=True):
        super(RasterPage, self).__init__(dao, windows, main_pages, real_pages)
        self.is_merged_version = merged
        #holds all the vertexes being recorded for spikes
        if vertex_in_question is None:
            self.vertex_in_question = list()
            self.vertex_in_question.append(vertex)
        else:
            self.vertex_in_question = sorted(vertex_in_question,
                                             key=lambda vertex: vertex.label)

        #creates a collection of offsets for y plot
        self.off_sets = list()
        current_off_set = 0

        self.x_axis_scope = config.get("Visualiser", "raster_plot_x_scope")
        if self.x_axis_scope == "None":
            self.x_axis_scope = dao.run_time
        if self.x_axis_scope is None:
            self.x_axis_scope = 2000.0
        
        self.do_fading = config.getboolean("Visualiser", "raster_plot_do_fading")
        self.data_stores = []
            
        for vertex in self.vertex_in_question:
            label = str(vertex.label)
            self.data_stores.append(label)
            tuple_data = [(-1, -1), (-1, -1)]
            self.data_stores.append(tuple_data)
            self.off_sets.append(current_off_set)
            current_off_set += vertex.atoms + 15

        #records the maxiumum neuron value
        self.max_y_value = current_off_set

        #set name of page
        self.page = gtk.Frame("raster plot")

        self.figure = None
        self.plot = ScatterplotChart()
        self.axis = None
        self.canvas = None
        self.graphview = None
        if self.is_merged_version:
            label = "merged raster page"
        else:
            label = "Raster page of {}".format(vertex.label)
        main_pages.append_page(self.page,
                               gtk.Label(label))
        #generate plot
        self.generate_plot(0, True)
示例#12
0
def classify0(inX,dataSet,labels,k):
    dataSetSize = dataSet.shape[0];
    diffMat = tile(inX, (dataSetSize,1)) - dataSet
    sqDiffMat = diffMat**2
    sqDistances = sqDiffMat.sum(axis=1)
    distances = sqDistances**0.5          ##computer distances
    sortedDistIndicies = distances.argsort()   
    classCount = {}
    for i in range(k):                        # chose min k 
        voteILabel = labels[sortedDistIndicies[i]]
        classCount[voteILabel] = classCount.get(voteILabel,0) + 1
    sortedClassCount = sorted(classCount.items(),key = operator.itemgetter(1),reverse = True)  #sorted
    return sortedClassCount[0][0]
示例#13
0
文件: flex.py 项目: dials/dials
  def sort(self, name, reverse=False, order=None):
    '''
    Sort the reflection table by a key.

    :param name: The name of the column
    :param reverse: Reverse the sort order
    :param order: For multi element items specify order

    '''
    import __builtin__
    if type(self[name]) in [
        vec2_double,
        vec3_double,
        mat3_double,
        int6,
        miller_index ]:
      data = self[name]
      if order is None:
        perm = flex.size_t(
          __builtin__.sorted(
            range(len(self)),
            key=lambda x: data[x],
            reverse=reverse))
      else:
        assert len(order) == len(data[0])
        def compare(x, y):
          a = tuple(x[i] for i in order)
          b = tuple(y[i] for i in order)
          return cmp(a, b)
        perm = flex.size_t(
          __builtin__.sorted(
            range(len(self)),
            key=lambda x: data[x],
            cmp=compare,
            reverse=reverse))
    else:
      perm = flex.sort_permutation(self[name], reverse=reverse)
    self.reorder(perm)
示例#14
0
    def FindZeroAngle(self):
        from __builtin__ import max, min, sorted

        # input
        x = self.Angle
        y = self.DetCtr

        # limits
        y_max = max(y)

        y_low = 0.01 * y_max  # find suitable x range
        x_min = max(x)
        x_max = min(x)
        for xi, yi in zip(x, y):
            if yi > y_low:
                if x_min > xi:
                    x_min = xi
                if x_max < xi:
                    x_max = xi

        # sampling
        x_sam = self.linspace(x_min, x_max, num=500)
        y_sam = self.sample(x, y, x_sam)

        # normalized cross-correlation
        y_cnv = self.normxcorr(y_sam, y_sam)
        x_cnv = self.linspace(x_min, x_max, num=len(y_cnv))

        # find suitable maximum of y_cnv
        yLevel = 0.5 * y_max

        maxima = self.localmaxima(x_cnv, y_cnv)
        maxima = [m for m in maxima if m[1] > 0.0]  # ignore negative matches
        maxima = [m for m in maxima if self.sample(x, y, m[0]) > yLevel
                  ]  # only consider high y values
        maxima = sorted(maxima, key=lambda m: m[1],
                        reverse=True)  # best fit first

        if not maxima:
            self.PeakAng = x[y.index(y_max)]
            self.PeakVal = y_max

        else:
            x_cnv_max, y_cnv_max, i_cnv_max = maxima[0]
            self.PeakAng = self.maximumX(x_cnv, y_cnv, i_cnv_max)
            self.PeakVal = y_max

        print "Peak Angle:", self.PeakAng
        print "I(rock):", self.PeakVal
        return self.PeakAng
示例#15
0
def sequential(values):
    print values
    def key(value):
        if 'start_time' in value.attrs:
            return value.attrs['start_time']
        elif 'end_time' in value.attrs:
            return value.attrs['end_time']
        else:
            try:
            	return posixpath.split(value.name[-1])
            except AttributeError:
                return value

    return __builtin__.sorted(values, key=key)
示例#16
0
    def FindZeroAngle(self):
        from __builtin__ import max, min, sorted
        
        # input
        x = self.Angle
        y = self.DetCtr

        # limits
        y_max = max(y)
        
        y_low = 0.01 * y_max # find suitable x range
        x_min = max(x)
        x_max = min(x)
        for xi, yi in zip(x, y):
            if yi > y_low:
                if x_min > xi:
                    x_min = xi
                if x_max < xi:
                    x_max = xi

        # sampling
        x_sam = self.linspace(x_min, x_max, num=500)
        y_sam = self.sample(x, y, x_sam)
        
        # normalized cross-correlation
        y_cnv = self.normxcorr(y_sam, y_sam)
        x_cnv = self.linspace(x_min, x_max, num=len(y_cnv))
                    
        # find suitable maximum of y_cnv
        yLevel = 0.5 * y_max
                    
        maxima = self.localmaxima(x_cnv, y_cnv)
        maxima = [m for m in maxima if m[1] > 0.0]                    # ignore negative matches
        maxima = [m for m in maxima if self.sample(x, y, m[0]) > yLevel]   # only consider high y values
        maxima = sorted(maxima, key=lambda m: m[1], reverse=True)     # best fit first
        
        if not maxima:
            self.PeakAng = x[y.index(y_max)]
            self.PeakVal = y_max
        
        else:
            x_cnv_max, y_cnv_max, i_cnv_max = maxima[0]
            self.PeakAng = self.maximumX(x_cnv, y_cnv, i_cnv_max)
            self.PeakVal = y_max
        
        print "Peak Angle:", self.PeakAng
        print "I(rock):", self.PeakVal
        return self.PeakAng
示例#17
0
文件: util.py 项目: ssorj/boneyard
def print_threads(writer=sys.stdout):
    row = "%-28s  %-36s  %-18s  %-8s  %-8s  %s"

    writer.write(row % ("Class", "Name", "Ident", "Alive", "Daemon", ""))
    writer.write(os.linesep)
    writer.write("-" * 120)
    writer.write(os.linesep)

    for thread in sorted(enumerate_threads()):
        cls = thread.__class__.__name__
        name = thread.name
        ident = thread.ident
        alive = thread.is_alive()
        daemon = thread.daemon
        extra = ""
        #extra = thread._Thread__target

        writer.write(row % (cls, name, ident, alive, daemon, extra))
        writer.write(os.linesep)
示例#18
0
def bundled_entry(deps, pwd, repo, deps_dict, level):
    re = parse_deps_down_level(deps, deps_dict, level)
    re = __builtin__.sorted(re)
    for n_v in re:
        ind = n_v.rindex('-')
        name, version = n_v[:ind], n_v[ind + 1:]
        nm_path = make_sure_node_module_dir(pwd)
        tgz = download_dep(name, nm_path, repo, version)
        print('%s%s %s@%s' % ('|', '___' * level, name, version), file=sys.stderr)
        open_to_and_clean(tgz, nm_path)

        src = os.path.join(nm_path, 'package')
        dest = os.path.join(nm_path, name)
        shutil.move(src, dest)

        deps = parse_deps_file(os.path.join(dest, 'package.json'))
        if not os.path.exists(os.path.join(dest, 'node_modules')) and deps:
            bundled_entry(deps, dest, repo, deps_dict, level + 1)
    for n_v in re:
        deps_dict.pop(n_v)
示例#19
0
def bundled_entry(deps, pwd, repo, deps_dict, level):
    re = parse_deps_down_level(deps, deps_dict, level)
    re = __builtin__.sorted(re)
    for n_v in re:
        ind = n_v.rindex('-')
        name, version = n_v[:ind], n_v[ind + 1:]
        nm_path = make_sure_node_module_dir(pwd)
        tgz = download_dep(name, nm_path, repo, version)
        print('%s%s %s@%s' % ('|', '___' * level, name, version),
              file=sys.stderr)
        open_to_and_clean(tgz, nm_path)

        src = os.path.join(nm_path, 'package')
        dest = os.path.join(nm_path, name)
        shutil.move(src, dest)

        deps = parse_deps_file(os.path.join(dest, 'package.json'))
        if not os.path.exists(os.path.join(dest, 'node_modules')) and deps:
            bundled_entry(deps, dest, repo, deps_dict, level + 1)
    for n_v in re:
        deps_dict.pop(n_v)
示例#20
0
def get_vocab(data):
    vocab = set()
    for analogy in data:
        vocab.update(analogy)
    vocab = sorted(vocab)
    return dict([(a, i) for i, a in enumerate(vocab)]), vocab
示例#21
0
文件: flex.py 项目: kek-pf-mx/dials
    def match_with_reference(self, other):
        '''
    Match reflections with another set of reflections.

    :param other: The reflection table to match against
    :return: The matches

    '''
        from collections import defaultdict
        import __builtin__
        logger.info("Matching reference spots with predicted reflections")
        logger.info(' %d observed reflections input' % len(other))
        logger.info(' %d reflections predicted' % len(self))

        # Get the miller index, entering flag and turn number for
        # Both sets of reflections
        i1 = self['id']
        h1 = self['miller_index']
        e1 = self['entering'].as_int()
        x1, y1, z1 = self['xyzcal.px'].parts()
        p1 = self['panel']

        i2 = other['id']
        h2 = other['miller_index']
        e2 = other['entering'].as_int()
        x2, y2, z2 = other['xyzcal.px'].parts()
        p2 = other['panel']

        class Match(object):
            def __init__(self):
                self.a = []
                self.b = []

        # Create the match lookup
        lookup = defaultdict(Match)
        for i in range(len(self)):
            item = h1[i] + (e1[i], i1[i], p1[i])
            lookup[item].a.append(i)

        # Add matches from input reflections
        for i in range(len(other)):
            item = h2[i] + (e2[i], i2[i], p2[i])
            if item in lookup:
                lookup[item].b.append(i)

        # Create the list of matches
        match1 = []
        match2 = []
        for item, value in lookup.iteritems():
            if len(value.b) == 0:
                continue
            elif len(value.a) == 1 and len(value.b) == 1:
                match1.append(value.a[0])
                match2.append(value.b[0])
            else:
                matched = {}
                for i in value.a:
                    d = []
                    for j in value.b:
                        dx = x1[i] - x2[j]
                        dy = y1[i] - y2[j]
                        dz = z1[i] - z2[j]
                        d.append((i, j, dx**2 + dy**2 + dz**2))
                    i, j, d = __builtin__.min(d, key=lambda x: x[2])
                    if j not in matched:
                        matched[j] = (i, d)
                    elif d < matched[j][1]:
                        matched[j] = (i, d)
                for key1, value1 in matched.iteritems():
                    match1.append(value1[0])
                    match2.append(key1)

        # Select everything which matches
        sind = flex.size_t(match1)
        oind = flex.size_t(match2)

        # Sort by self index
        sort_index = flex.size_t(
            __builtin__.sorted(range(len(sind)), key=lambda x: sind[x]))
        sind = sind.select(sort_index)
        oind = oind.select(sort_index)

        s2 = self.select(sind)
        o2 = other.select(oind)
        h1 = s2['miller_index']
        h2 = o2['miller_index']
        e1 = s2['entering']
        e2 = o2['entering']
        assert (h1 == h2).all_eq(True)
        assert (e1 == e2).all_eq(True)
        x1, y1, z1 = s2['xyzcal.px'].parts()
        x2, y2, z2 = o2['xyzcal.px'].parts()
        distance = flex.sqrt((x1 - x2)**2 + (y1 - y2)**2 + (z1 - z2)**2)
        mask = distance < 2
        logger.info(' %d reflections matched' % len(o2))
        logger.info(' %d reflections accepted' % mask.count(True))
        self.set_flags(sind.select(mask), self.flags.reference_spot)
        self.set_flags(sind.select(o2.get_flags(self.flags.strong)),
                       self.flags.strong)
        self.set_flags(sind.select(o2.get_flags(self.flags.indexed)),
                       self.flags.indexed)
        self.set_flags(
            sind.select(o2.get_flags(self.flags.used_in_refinement)),
            self.flags.used_in_refinement)
        other_matched_indices = oind.select(mask)
        other_unmatched_mask = flex.bool(len(other), True)
        other_unmatched_mask.set_selected(
            other_matched_indices, flex.bool(len(other_matched_indices),
                                             False))
        other_matched = other.select(other_matched_indices)
        other_unmatched = other.select(other_unmatched_mask)
        for key, column in self.select(sind.select(mask)).cols():
            other_matched[key] = column
        mask2 = flex.bool(len(self), False)
        mask2.set_selected(sind.select(mask), True)
        return mask2, other_matched, other_unmatched
示例#22
0
def saveAsDFA(dfa, name):
    file = open(name, 'w+')

    states = "E: "
    hasError = False
    for state in sorted(dfa.delta.keys()):
        nameState = ''
        if len(dfa.delta.get(state)) != len(dfa.alphabet()):
            hasError = True
        for s in state:
            nameState = nameState + s
        states = states + "\t" + nameState + ','

    if hasError:
        states = states + '\tError,'
    states = states.rsplit(',', 1)[0]
    states = states + '.'

    simbols = "A: "
    for simbol in sorted(dfa.alphabet()):
        simbols = simbols + "\t" + simbol + ','
    simbols = simbols.rsplit(',', 1)[0]
    simbols = simbols + '.'

    transitions = "T:\n"
    for state in sorted(dfa.delta.keys()):
        for simbol in sorted(dfa.alphabet()):
            transitions = transitions + '\t'
            nameState = ''
            for s in state:
                nameState = nameState + s
            transitions = transitions + nameState + '[' + simbol + ']\t := \t{'
            targetSimbols = sorted(dfa.delta[state].keys())
            targets = dfa.delta[state]
            for t in targetSimbols:
                if simbol == t:
                    for s in targets[t]:
                        transitions = transitions + s
                    transitions = transitions + '},\n'
                else:
                    if len(targetSimbols) < 2:
                        transitions = transitions + 'Error},\n'
    transitions = transitions.rsplit(',', 1)[0]
    transitions = transitions + '.'

    stateInitial = "I:\t"
    nameState = ''
    for state in dfa.q0:
        for s in state:
            nameState = nameState + s
    stateInitial = stateInitial + nameState + '.'

    stateFinal = "F:\t"
    nameState = ''
    for state in dfa.F:
        for s in state:
            nameState = nameState + s
    stateFinal = stateFinal + nameState + '.'

    result = states
    result = result + '\n' + simbols
    result = result + '\n' + transitions
    result = result + '\n' + stateInitial
    result = result + '\n' + stateFinal

    file.write(result)
示例#23
0
def sortBy(sequence, function):
    return __builtin__.sorted(sequence, key=function)
示例#24
0
def sort(sequence):
    return __builtin__.sorted(sequence)
示例#25
0
def get_vocab(data):
    vocab = set()
    for analogy in data:
        vocab.update(analogy)
    vocab = sorted(vocab)
    return dict([(a, i) for i, a in enumerate(vocab)]), vocab
示例#26
0
 def sorted(iterable, cmp=None, key=None, reverse=False):
     if cmp is not None:
         key = functools.cmp_to_key(cmp)
     return builtins.sorted(iterable, key=key, reverse=reverse)
def saveAsDFA(dfa, name):
    file = open(name, 'w+')

    states = "E: "
    hasError = False
    for state in sorted(dfa.delta.keys()):
        nameState = ''
        if len(dfa.delta.get(state)) != len(dfa.alphabet()):
            hasError = True
        for s in state:
            nameState = nameState + s
        states = states + "\t" + nameState + ','
    
    if hasError:
        states = states + '\tError,'
    states = states.rsplit(',',1)[0]
    states = states + '.'
    
    simbols = "A: "
    for simbol in sorted(dfa.alphabet()):
        simbols = simbols + "\t" + simbol + ','
    simbols = simbols.rsplit(',',1)[0]
    simbols = simbols + '.'       

    transitions = "T:\n"
    for state in sorted(dfa.delta.keys()):
        for simbol in sorted(dfa.alphabet()):
            transitions = transitions + '\t'
            nameState = ''
            for s in state:
                nameState = nameState + s
            transitions = transitions + nameState + '[' + simbol + ']\t := \t{'
            targetSimbols = sorted(dfa.delta[state].keys())
            targets = dfa.delta[state]
            for t in targetSimbols:
                if simbol == t:
                    for s in targets[t]:
                        transitions = transitions + s
                    transitions = transitions + '},\n'
                else:
                    if len(targetSimbols) < 2:
                        transitions = transitions + 'Error},\n'
    transitions = transitions.rsplit(',',1)[0]
    transitions = transitions + '.'

    stateInitial = "I:\t"
    nameState = ''
    for state in dfa.q0:
        for s in state:
            nameState = nameState + s
    stateInitial = stateInitial + nameState + '.'

    stateFinal = "F:\t"
    nameState = ''
    for state in dfa.F:
        for s in state:
            nameState = nameState + s
    stateFinal = stateFinal + nameState + '.'

    result = states
    result = result + '\n' + simbols
    result = result + '\n' + transitions
    result = result + '\n' + stateInitial
    result = result + '\n' + stateFinal

    file.write(result)
示例#28
0
 def sorted(iterable, cmp=None, key=None, reverse=False):
     if cmp is not None:
         key=functools.cmp_to_key(cmp)
     return builtins.sorted(iterable, key=key, reverse=reverse)
示例#29
0
文件: versions.py 项目: jol/ipkg
def sorted(versions, parser=parse, reverse=False):
    """Returned sorted ``versions``.
    """
    return __builtin__.sorted(versions, key=parser, cmp=compare,
                              reverse=reverse)