Example #1
0
def reference_number(data):
    chk = -sum(int(x) * [7, 3, 1][i % 3]
               for i, x in enumerate(data[::-1])) % 10
    ref = '%s%d' % (data, chk)
    return ' '.join(
        reversed(
            [''.join(reversed(x)) for x in grouper(5, reversed(ref), '')]))
Example #2
0
 def validate_anyOf(self, *args, **kwargs):
     for error in super(SchemaValidator, self).validate_anyOf(*args, **kwargs):
         # Split the suberrors up by which subschema they are from
         subschema_errors = defaultdict(list)
         for sube in error.context:
             subschema_errors[sube.schema_path[0]].append(sube)
         # Find the subschemas that did not have a 'type' error validating the instance at this path
         no_type_errors = dict(subschema_errors)
         valid_types = set()
         for i, errors in subschema_errors.iteritems():
             for e in errors:
                 if e.validator == 'type' and not e.path:
                     # Remove from the no_type_errors dict
                     no_type_errors.pop(i, None)
                     # Add the valid types to the list of all valid types
                     if self.is_type(e.validator_value, 'string'):
                         valid_types.add(e.validator_value)
                     else:
                         valid_types.update(e.validator_value)
         if not no_type_errors:
             # If all of the branches had a 'type' error, create our own virtual type error with all possible types
             for e in self.descend(error.instance, {'type': valid_types}):
                 yield e
         elif len(no_type_errors) == 1:
             # If one of the possible schemas did not have a 'type' error, assume that is the intended one and issue
             # all errors from that subschema
             for e in no_type_errors.values()[0]:
                 e.schema_path.extendleft(reversed(error.schema_path))
                 e.path.extendleft(reversed(error.path))
                 yield e
         else:
             yield error
Example #3
0
    def wrap(f):
        @wraps(f)
        def inner_f(*args):
            val = f(*args)
            if nullable and val == ffi.NULL:
                return None
            elif returns_string:
                return ffi.string(val)
            return val

        # this insanity inserts a formatted argspec string
        # into the function's docstring, so that sphinx
        # gets the right args instead of just the wrapper args
        args, varargs, varkw, defaults = inspect.getargspec(f)
        defaults = () if defaults is None else defaults
        defaults = ["\"{}\"".format(a) if type(a) == str else a for a in defaults]
        l = ["{}={}".format(arg, defaults[(idx+1)*-1])
             if len(defaults)-1 >= idx else
             arg for idx, arg in enumerate(reversed(list(args)))]
        if varargs:
            l.append('*' + varargs)
        if varkw:
            l.append('**' + varkw)
        doc = "{}({})\n\nC: ``{}``\n\n{}".format(f.__name__, ', '.join(reversed(l)), decl, f.__doc__)
        inner_f.__doc__ = doc
        return inner_f
Example #4
0
    def run(self):

        self.correct_octave_errors_by_chunks()
        self.remove_extreme_values()

        self.correct_jumps()
        self.pitch = list(reversed(self.pitch))
        self.correct_jumps()
        self.pitch = list(reversed(self.pitch))

        self.filter_noise_region()

        self.correct_oct_error()
        self.pitch = list(reversed(self.pitch))
        self.correct_oct_error()
        self.pitch = list(reversed(self.pitch))

        self.correct_octave_errors_by_chunks()
        self.filter_chunks_by_energy(chunk_limit=60)

        if self.out:
            self.data['pitch'] = self.pitch
            with open(self.fname[:-5] + "_filtered.json", 'w') as f: json.dump(self.data, f)

        return self.pitch
Example #5
0
def _balance_tags(tags):
  """Throw out any close tags without an open tag.

  If {@code <table>} is used for formatting, embedded HTML shouldn't be able
  to use a mismatched {@code </table>} to break page layout.

  Args:
    tags: The list of all tags in this text.

  Returns:
    A string containing zero or more closed tags that close all elements that
    are opened in tags but not closed.
  """
  open_tags = []
  for i, tag in enumerate(tags):
    if tag[1] == '/':
      index = len(open_tags) - 1
      while index >= 0 and open_tags[index] != tag:
        index -= 1

      if index < 0:
        tags[i] = ''  # Drop close tag.
      else:
        tags[i] = ''.join(reversed(open_tags[index:]))
        del open_tags[index:]

    elif not _HTML5_VOID_ELEMENTS_RE.match(tag):
      open_tags.append('</' + tag[1:])

  return ''.join(reversed(open_tags))
Example #6
0
 def filterRevisions(self, revisions, filter=None, max_revs=None):
     """Filter a set of revisions based on any number of filter criteria.
     If specified, filter should be a dict with keys corresponding to
     revision attributes, and values of 1+ strings"""
     if not filter:
         if max_revs is None:
             for rev in reversed(revisions):
                 yield DevRevision(rev)
         else:
             for index,rev in enumerate(reversed(revisions)):
                 if index >= max_revs:
                     break
                 yield DevRevision(rev)
     else:
         for index, rev in enumerate(reversed(revisions)):
             if max_revs and index >= max_revs:
                 break
             try:
                 for field,acceptable in filter.iteritems():
                     if not hasattr(rev, field):
                         raise DoesNotPassFilter
                     if type(acceptable) in (str, unicode):
                         if getattr(rev, field) != acceptable:
                             raise DoesNotPassFilter
                     elif type(acceptable) in (list, tuple, set):
                         if getattr(rev, field) not in acceptable:
                             raise DoesNotPassFilter
                 yield DevRevision(rev)
             except DoesNotPassFilter:
                 pass
Example #7
0
    def show_registers(self):
        """Print the available registers"""
        i = -1
        print('1D histograms')
        print('{: <3} {: ^40} {: ^5} {: ^8} {: ^8}'.\
                format('i', 'Title', 'Bin', 'Norm', 'Active'))
        print('-' * 79)

        for p in reversed(Experiment.plots):
            print('{: >3} {: <40} {: >5} {: >5.2e} {: >5}'.\
                    format(i, p.histogram.title[:40], p.bin_size,
                           p.norm, p.active))
            i -= 1
        print()

        i = -1
        print('2D histograms')
        print('{: <3} {: ^40} {: ^5} {: ^8} {: ^8}'.\
                format('i', 'Title', 'Bin', 'Norm', 'Active'))
        print('-' * 79)

        for p in reversed(Experiment.maps):
            print('{: >3} {: <40} {: >5} {: >5.2e} {: >5}'.\
                    format(i, p.histogram.title[:40], p.bin_size,
                           p.norm, p.active))
            i -= 1
        print()
Example #8
0
    def execute(self, slot, subindex, roi, result):
        if numpy.prod(roi.stop - roi.start) > 1e9:
            logger.error("Requesting a very large volume from DVID: {}\n"\
                         "Is that really what you meant to do?"
                         .format( roi ))
            
        # TODO: Modify accessor implementation to accept a pre-allocated array.

# FIXME: Disabled throttling for now.  Need a better heuristic or explicit setting.
#         # For "heavy" requests, we'll use the throttled accessor
#         HEAVY_REQ_SIZE = 256*256*10
#         if numpy.prod(result.shape) > HEAVY_REQ_SIZE:
#             accessor = self._throttled_accessor
#         else:
#             accessor = self._default_accessor

        accessor = self._default_accessor # FIXME (see above)
        
        if self._transpose_axes:
            roi_start = tuple(reversed(roi.start))
            roi_stop = tuple(reversed(roi.stop))
            result[:] = accessor.get_ndarray(roi_start, roi_stop).transpose()
        else:
            result[:] = accessor.get_ndarray(roi.start, roi.stop)
        return result
def _get_flatmirror_attrdict(cls, add, exclude, bases, dict_factory):
    attrdict = dict_factory()
    slotset = set()
    rev_mro = reversed(cls.mro())
    supercls = next(rev_mro)
    for basecls in reversed(bases):
        # skip some redundant base classes (typically: object)
        if issubclass(basecls, supercls):
            supercls = next(rev_mro)
        else:
            break
    while True:
        slots = getattr(supercls, '__slots__', ())
        excl = frozenset(slots).union(exclude)
        slotset.update(slots)
        for attrname, obj in supercls.__dict__.items():
            if attrname not in excl:
                attrdict[attrname] = obj
        if supercls is not cls:
            supercls = next(rev_mro)
        else:
            break
    if add is not None:
        attrdict.update(add)
    slots = attrdict.get('__slots__', None)
    if slots is not None:
        if isinstance(slots, basestring):
            slots = (slots,)
        slotset.update(slots)
        attrdict['__slots__'] = tuple(slotset)
    return attrdict
Example #10
0
def list_changes(upstream, feature, paths=[]):
    feature_revs = tuple(list_revs(upstream, feature, paths=paths))
    upstream_revs = tuple(list_revs(feature, upstream, paths=paths))

    feature_cids = dict([
        (c.change_id, c) for c in feature_revs if c.change_id is not None ])
    upstream_cids = dict([
        (c.change_id, c) for c in upstream_revs if c.change_id is not None ])

    incoming = filter(
        lambda r: r.change_id and r.change_id not in feature_cids,
        reversed(upstream_revs))
    outgoing = filter(
        lambda r: r.change_id and r.change_id not in upstream_cids,
        reversed(feature_revs))
    common = filter(
        lambda r: r.change_id in upstream_cids,
        reversed(feature_revs))
    upstream_unknown = filter(
        lambda r: r.change_id is None,
        reversed(upstream_revs))
    feature_unknown = filter(
        lambda r: r.change_id is None,
        reversed(feature_revs))

    return incoming, outgoing, common, upstream_unknown, feature_unknown
 def try_to_bump(self, left, right):
     left_edge = []
     # Instead of bumping left and right, one has to bump all [..., left] and [right, ...] children.
     # It may be that the child just before the left had a branch that overlaps with the right side branches.
     for child in self.child_blocks:
         left_edge += child.right_edges
         if child is left:
             break
     right_edge = []
     for child in reversed(self.child_blocks):
         right_edge += child.left_edges
         if child is right:
             break
     adjust = prefs.edge_width
     while (not bump_polygons(left_edge, right_edge, adjust)) and adjust < 1000:
         adjust += prefs.edge_width
     amount = adjust - 3 * prefs.edge_width
     if amount > 0:
         a2 = amount / 2
         self.width -= amount
         self.left += a2
         for child in self.child_blocks:
             child.move(a2, 0)
             if child is left:
                 break
         for child in reversed(self.child_blocks):
             child.move(-a2, 0)
             if child is right:
                 break
Example #12
0
    def new_enum_type(self, name, enumerators, enumvalues, CTypesInt):
        assert isinstance(name, str)
        reverse_mapping = dict(zip(reversed(enumvalues),
                                   reversed(enumerators)))
        #
        class CTypesEnum(CTypesInt):
            __slots__ = []
            _reftypename = '%s &' % name

            def _get_own_repr(self):
                value = self._value
                try:
                    return '%d: %s' % (value, reverse_mapping[value])
                except KeyError:
                    return str(value)

            def _to_string(self, maxlen):
                value = self._value
                try:
                    return reverse_mapping[value]
                except KeyError:
                    return str(value)
        #
        CTypesEnum._fix_class()
        return CTypesEnum
Example #13
0
    def _FindNodeValue(self, key):
        # |found_node_list| will be all the nodes that |key| has been found in.
        # |checked_node_set| are those that have been checked.
        info = self._value_info.get(key)
        if info is None:
            info = ([], set())
            self._value_info[key] = info
        found_node_list, checked_node_set = info

        # Check all the nodes not yet checked for |key|.
        newly_found = []
        for node in reversed(self._nodes):
            if node in checked_node_set:
                break
            value = node.Get(key)
            if value is not None:
                newly_found.append(node)
            checked_node_set.add(node)

        # The nodes will have been found in reverse stack order. After extending
        # the found nodes, the freshest value will be at the tip of the stack.
        found_node_list.extend(reversed(newly_found))
        if not found_node_list:
            return None

        return found_node_list[-1]._value.get(key)
Example #14
0
def signature_argspec(f):
    from inspect import signature, Parameter, _empty

    try:
        if NO_ARGSPEC:
            sig = signature(f, follow_wrapped=False)
        else:
            sig = signature(f)
    except ValueError:
        raise TypeError("unsupported callable")
    args = list(
        k for k, v in sig.parameters.items() if v.kind in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD)
    )
    varargs = None
    keywords = None
    for k, v in sig.parameters.items():
        if v.kind == Parameter.VAR_POSITIONAL:
            varargs = k
        elif v.kind == Parameter.VAR_KEYWORD:
            keywords = k
    defaults = []
    for a in reversed(args):
        default = sig.parameters[a].default
        if default is _empty:
            break
        else:
            defaults.append(default)
    if defaults:
        defaults = tuple(reversed(defaults))
    else:
        defaults = None
    return FakeArgSpec(args, varargs, keywords, defaults)
    def _do_get_provider_count_and_objs(self, **kw):
        """
        Se utiliza para listar solo los proyectos que cumplan ciertas
        condiciones y de acuerdo a ciertos permisos.
        """
        if TieneAlgunPermiso(tipo = u"Fase", recurso = u"Linea Base"):
            proyectos = DBSession.query(Proyecto).filter(Proyecto.estado != \
                        u"Nuevo").order_by(Proyecto.id).all()

            for proyecto in reversed(proyectos):
                buscado = self.buscado in str(proyecto.nro_fases) or \
                          self.buscado in str(proyecto.fecha_inicio) or \
                          self.buscado in str(proyecto.fecha_fin) or \
                          self.buscado in proyecto.lider.nombre_usuario or \
                          self.buscado in proyecto.nombre or \
                          self.buscado in proyecto.descripcion or \
                          self.buscado in proyecto.estado
                if not buscado: proyectos.remove(proyecto)

            for proyecto in reversed(proyectos):
                if not (TieneAlgunPermiso(tipo = u"Fase", recurso = \
                        u"Linea Base", id_proyecto = proyecto.id) \
                        .is_met(request.environ) and self.fase_apta(proyecto)): 
                    proyectos.remove(proyecto)
                
        else: proyectos = list()       
        return len(proyectos), proyectos 
Example #16
0
    def get_tail(self, n=10, raw=True, output=False, include_latest=False):
        """Get the last n lines from the history database.

        Parameters
        ----------
        n : int
          The number of lines to get
        raw, output : bool
          See :meth:`get_range`
        include_latest : bool
          If False (default), n+1 lines are fetched, and the latest one
          is discarded. This is intended to be used where the function
          is called by a user command, which it should not return.

        Returns
        -------
        Tuples as :meth:`get_range`
        """
        self.writeout_cache()
        if not include_latest:
            n += 1
        cur = self._run_sql("ORDER BY session DESC, line DESC LIMIT ?", (n,), raw=raw, output=output)
        if not include_latest:
            return reversed(list(cur)[1:])
        return reversed(list(cur))
	def align(self, dataX, dataXY, reverse=False):
		if reverse:
			self.aligned = list(reversed(np.interp(list(reversed(dataX)), list(reversed(dataXY[0])), list(reversed(dataXY[1])))))
		else:
			self.aligned = list(np.interp(dataX, dataXY[0], dataXY[1]))
		
		return self.aligned
Example #18
0
 def uniquePathsWithObstacles(self, obstacleGrid):
     row = len(obstacleGrid)
     col = len(obstacleGrid[0])
     if obstacleGrid[row-1][col-1] == 1:
         return 0
     
     path = [[1 for x in xrange(col)] for x in xrange(row)]
     _i, _j = row, col
     
     for i in reversed(xrange(row)):
         if obstacleGrid[i][0] == 1:
             _i = i
     while _i < row:
         path[_i][0] = 0
         _i += 1
         
     for j in reversed(xrange(col)):
         if obstacleGrid[0][j] == 1:
             _j = j
     while _j < col:
         path[0][_j] = 0
         _j += 1
         
     for i in xrange(1, row):
         for j in xrange(1, col):
             if obstacleGrid[i-1][j] == 1 and obstacleGrid[i][j-1] == 1:
                 path[i][j] = 0
             elif obstacleGrid[i-1][j] == 1:
                 path[i][j] = path[i][j-1]
             elif obstacleGrid[i][j-1] == 1:
                 path[i][j] = path[i-1][j]
             else:
                 path[i][j] = path[i-1][j] + path[i][j-1]
                 
     return path[row-1][col-1]
    def release_linked(self, event):
        """Called when mousebutton released over the linked list.
            Removes items from the first list and adds them to the other
        """
        # print self.curselection()
        ot = self.otherDropList
        #Go backwards so as to not delete wrong item when the list resizes
        l = self.curselection()

        items = []
        varl = []
        for d in reversed(l):
            # print d,type(d)
            items.append( self.get(d) )
            varl.append( self.variables[int(d)] )
            self.delete(d)
            self.variables.pop(int(d))

        # print items
        y = event.y_root-ot.winfo_rooty()
        i = ot.nearest(y)

        for item in reversed(items):
            if ot.nearest(y) == -1:
                ot.insert("end",item,varl.pop())
                i = ot.nearest(y)
                continue
            
            # print i
            bbox = ot.bbox(i)
            if bbox==None or bbox[3]/2.0 > y: 
                ot.insert(i,item,varl.pop())
            else:
                ot.insert(i+1,item,varl.pop())
            i += 1
Example #20
0
 def instructions_reversed(self):
     """Iterate in reverse order over all instructions in the module."""
     for function in reversed(self.functions[:]):
         for inst in function.instructions_reversed():
             yield inst
     for inst in reversed(self.global_insts[:]):
         yield inst
    def test_walk_versions_all_true(self, _migrate_up, _migrate_down):
        self.REPOSITORY.latest = 20
        self.migration_api.db_version.return_value = self.INIT_VERSION

        self._walk_versions(self.engine, snake_walk=True, downgrade=True)

        versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
        upgraded = []
        for v in versions:
            upgraded.append(mock.call(self.engine, v, with_data=True))
            upgraded.append(mock.call(self.engine, v))
        upgraded.extend(
            [mock.call(self.engine, v) for v in reversed(versions)]
        )
        self.assertEqual(upgraded, self._migrate_up.call_args_list)

        downgraded_1 = [
            mock.call(self.engine, v - 1, with_data=True) for v in versions
        ]
        downgraded_2 = []
        for v in reversed(versions):
            downgraded_2.append(mock.call(self.engine, v - 1))
            downgraded_2.append(mock.call(self.engine, v - 1))
        downgraded = downgraded_1 + downgraded_2
        self.assertEqual(self._migrate_down.call_args_list, downgraded)
Example #22
0
File: key.py Project: JoonyLi/veusz
 def _layout(self, entries, totallines):
     """Layout the items, trying to keep the box as small as possible
     while still filling the columns"""
     
     maxcols = self.settings.columns
     numcols = min(maxcols, max(len(entries), 1))
     
     if not entries:
         return (list(), (0, 0))
     
     # start with evenly-sized rows and expand to fit
     numrows = totallines / numcols
     layout = []
     
     while not layout:
         # try to do a first cut of the layout, and expand the box until
         # everything fits
         (layout, colstats, newrows) = self._layoutChunk(entries, (0, 0), (numrows, numcols))
         if not layout:
             numrows = newrows
         
     # ok, we've got a layout where everything fits, now pull items right
     # to fill the remaining columns, if need be
     while colstats[-1] == 0:
         # shift 1 item to the right, up to the first column that has
         # excess items
         meanoccupation = max(1, sum(colstats)/float(numcols))
         
         # loop until we find a victim item which can be safely moved
         victimcol = numcols
         while True:
             # find the right-most column with excess occupation number
             for i in reversed(xrange(victimcol)):
                 if colstats[i] > meanoccupation:
                     victimcol = i
                     break
             
             # find the last item in the victim column
             victim = 0
             for i in reversed(xrange(len(layout))):
                 if layout[i][2] == victimcol:
                     victim = i
                     break
             
             # try to relayout with the victim item shoved to the next column
             (newlayout, newcolstats, newrows) = self._layoutChunk(entries[victim:],
                                                     (0, victimcol+1), (numrows, numcols))
             if newlayout:
                 # the relayout worked, so accept it
                 layout = layout[0:victim] + newlayout
                 colstats[victimcol] -= 1
                 del colstats[victimcol+1:]
                 colstats += newcolstats[victimcol+1:]
                 break
             
             # if we've run out of potential victims, just return what we have
             if victimcol == 0:
                 return (layout, (numrows, numcols))
     
     return (layout, (numrows, numcols))
Example #23
0
 def train(self, inp, out, training_weight=1.):
     inp = np.mat(inp).T
     out = np.mat(out).T
     deriv = []
     val = inp
     vals = [val]
     # forward calculation of activations and derivatives
     for weight,bias in self.__weights:
         val = weight*val
         val += bias
         deriv.append(self.__derivative(val))
         vals.append(self.__activation(val))
     deriv = iter(reversed(deriv))
     weights = iter(reversed(self.__weights))
     errs = []
     errs.append(np.multiply(vals[-1]-out, next(deriv)))
     # backwards propagation of errors
     for (w,b),d in zip(weights, deriv):
         errs.append(np.multiply(np.dot(w.T, errs[-1]), d))
     weights = iter(self.__weights)
     for (w,b),v,e in zip(\
             self.__weights,\
             vals, reversed(errs)):
         e *= self.__learning_rate*training_weight
         w -= e*v.T
         b -= e
     tmp = vals[-1]-out
     return np.dot(tmp[0].T,tmp[0])*.5*training_weight
 def setZeroes(self, matrix):
     """
     :type matrix: List[List[int]]
     :rtype: void Do not return anything, modify matrix in-place instead.
     """
     # O(1) 176ms
     rows, cols = len(matrix), len(matrix[0])
     col0 = False # reverse the row assign action to save rows
     # mark the state at first row and col  
     for i in range(0, rows):
         if matrix[i][0] == 0:
             col0 = True
         for j in range(1, cols):
             if matrix[i][j] == 0:
                 matrix[i][0] = matrix[0][j] = 0
     # mark the entire row or column to 0 
     for i in reversed(range(rows)):
         for j in reversed(range(1, cols)):
             if matrix[i][0] == 0 or matrix[0][j] == 0:
                 matrix[i][j] = 0
         if col0: 
             matrix[i][0] = 0
     
     # # O(m+n) 216ms
     # m, n = [], []
     # for i in range(len(matrix)):
     #     for j in range(len(matrix[i])):
     #         if matrix[i][j] == 0:
     #             m.append(i)
     #             n.append(j)
     # for i in range(len(matrix)):
     #     for j in range(len(matrix[i])):
     #         if i in m or j in n:
     #             matrix[i][j] = 0
     return matrix
Example #25
0
 def test_reversed_simple_sequences(self):
     l = range(5)
     rev = reversed(l)
     assert list(rev) == [4, 3, 2, 1, 0]
     assert list(l.__reversed__()) == [4, 3, 2, 1, 0]
     s = "abcd"
     assert list(reversed(s)) == ['d', 'c', 'b', 'a']
Example #26
0
 def scanner_network(self,gateway):
     scan = ''
     config_gateway = gateway.split('.')
     del config_gateway[-1]
     for i in config_gateway:
         scan += str(i) + '.'
     gateway = scan
     ranger = str(self.ip_range.text()).split('-')
     jobs = []
     manager = Manager()
     on_ips = manager.dict()
     for n in xrange(int(ranger[0]),int(ranger[1])):
         ip='%s{0}'.format(n)%(gateway)
         p = Process(target=self.working,args=(ip,on_ips))
         jobs.append(p)
         p.start()
     for i in jobs: i.join()
     for i in on_ips.values():
         Headers = []
         n = i.split('|')
         self.data['IPaddress'].append(n[0])
         self.data['MacAddress'].append(n[1])
         self.data['Hostname'].append('<unknown>')
         for n, key in enumerate(reversed(self.data.keys())):
             Headers.append(key)
             for m, item in enumerate(self.data[key]):
                 item = QTableWidgetItem(item)
                 item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
                 self.tables.setItem(m, n, item)
     Headers = []
     for key in reversed(self.data.keys()):
         Headers.append(key)
     self.tables.setHorizontalHeaderLabels(Headers)
    def test_create_changes_overridden_project(self):
        def custom_split_file(path):
            f = split_file(path)
            if f:
                f["project"] = "overridden-project"
                f["repository"] = "overridden-repository"
                f["codebase"] = "overridden-codebase"
            return f

        base = ("file:///home/warner/stuff/Projects/Buildbot/trees/" +
                "svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample")
        s = self.attachSVNPoller(base, split_file=custom_split_file)
        s._prefix = "sample"

        logentries = dict(
            zip(range(1, 7), reversed(make_logentry_elements(6))))
        changes = s.create_changes(reversed([logentries[3], logentries[2]]))
        self.assertEqual(len(changes), 2)

        # note that parsing occurs in reverse
        self.assertEqual(changes[0]['branch'], "branch")
        self.assertEqual(changes[0]['revision'], '2')
        self.assertEqual(changes[0]['project'], "overridden-project")
        self.assertEqual(changes[0]['repository'], "overridden-repository")
        self.assertEqual(changes[0]['codebase'], "overridden-codebase")

        self.assertEqual(changes[1]['branch'], "branch")
        self.assertEqual(changes[1]['files'], [u'çmain.c'])
        self.assertEqual(changes[1]['revision'], '3')
        self.assertEqual(changes[1]['project'], "overridden-project")
        self.assertEqual(changes[1]['repository'], "overridden-repository")
        self.assertEqual(changes[1]['codebase'], "overridden-codebase")
Example #28
0
    def _get_descendants_by_speech(self, **kwargs):
        dqs = self._get_descendants(include_min=True, **kwargs)

        max_datetime = datetime.datetime(datetime.MAXYEAR, 12, 31)

        lookup = dict((x.id, x) for x in dqs)
        lookup[self.id] = self
        self.speech_min = max_datetime

        for d in dqs:
            if not d.speech_min:
                continue
            for parent_id in reversed(d.path):
                parent = lookup[parent_id]
                if not parent.speech_min or d.speech_min < parent.speech_min:
                    parent.speech_min = d.speech_min

        # Set the speech_min to all the earliests
        for d in dqs:
            if d.speech_min:
                continue
            for parent_id in reversed(d.path):
                parent = lookup[parent_id]
                if parent.speech_min:
                    d.speech_min = parent.speech_min
                    break

        return sorted(dqs, key=lambda s: getattr(s, 'speech_min', max_datetime))
Example #29
0
    def backward_pass(self, accum_grad):
        _, timesteps, _ = accum_grad.shape

        # Variables where we save the accumulated gradient w.r.t each parameter
        grad_U = np.zeros_like(self.U)
        grad_V = np.zeros_like(self.V)
        grad_W = np.zeros_like(self.W)
        # The gradient w.r.t the layer input.
        # Will be passed on to the previous layer in the network
        accum_grad_next = np.zeros_like(accum_grad)

        # Back Propagation Through Time
        for t in reversed(range(timesteps)):
            # Update gradient w.r.t V at time step t
            grad_V += accum_grad[:, t].T.dot(self.states[:, t])
            # Calculate the gradient w.r.t the state input
            grad_wrt_state = accum_grad[:, t].dot(self.V) * self.activation.gradient(self.state_input[:, t])
            # Gradient w.r.t the layer input
            accum_grad_next[:, t] = grad_wrt_state.dot(self.U)
            # Update gradient w.r.t W and U by backprop. from time step t for at most
            # self.bptt_trunc number of time steps
            for t_ in reversed(np.arange(max(0, t - self.bptt_trunc), t+1)):
                grad_U += grad_wrt_state.T.dot(self.layer_input[:, t_])
                grad_W += grad_wrt_state.T.dot(self.states[:, t_-1])
                # Calculate gradient w.r.t previous state
                grad_wrt_state = grad_wrt_state.dot(self.W) * self.activation.gradient(self.state_input[:, t_-1])

        # Update weights
        self.U = self.U_opt.update(self.U, grad_U)
        self.V = self.V_opt.update(self.V, grad_V)
        self.W = self.W_opt.update(self.W, grad_W)

        return accum_grad_next
Example #30
0
def test_dead_default():
	'''
		You may now omit a transition, or even an entire state, from the map. This
		affects every usage of `fsm.map`.
	'''
	blockquote = fsm(
		alphabet = {"/", "*", anything_else},
		states = {0, 1, 2, 3, 4, 5},
		initial = 0,
		finals = {4},
		map = {
				0 : {"/" : 1},
				1 : {"*" : 2},
				2 : {"/" : 2, anything_else : 2, "*" : 3},
				3 : {"/" : 4, anything_else : 2, "*" : 3},
		}
	)
	assert blockquote.accepts(["/", "*", "whatever", "*", "/"])
	assert not blockquote.accepts(["*", "*", "whatever", "*", "/"])
	str(blockquote) # test stringification
	blockquote | blockquote
	blockquote & blockquote
	blockquote ^ blockquote
	reversed(blockquote)
	assert not blockquote.everythingbut().accepts(["/", "*", "whatever", "*", "/"])
	assert blockquote.everythingbut().accepts(["*"]) # deliberately seek oblivion
	assert blockquote.islive(3)
	assert blockquote.islive(4)
	assert not blockquote.islive(5)
	gen = blockquote.strings()
	assert next(gen) == ["/", "*", "*", "/"]