Esempio n. 1
0
def test_dead_default():
	'''
		You may now omit a transition, or even an entire state, from the map. This
		affects every usage of `fsm.map`.
	'''
	blockquote = fsm(
		alphabet = {"/", "*", anything_else},
		states = {0, 1, 2, 3, 4, 5},
		initial = 0,
		finals = {4},
		map = {
				0 : {"/" : 1},
				1 : {"*" : 2},
				2 : {"/" : 2, anything_else : 2, "*" : 3},
				3 : {"/" : 4, anything_else : 2, "*" : 3},
		}
	)
	assert blockquote.accepts(["/", "*", "whatever", "*", "/"])
	assert not blockquote.accepts(["*", "*", "whatever", "*", "/"])
	str(blockquote) # test stringification
	blockquote | blockquote
	blockquote & blockquote
	blockquote ^ blockquote
	reversed(blockquote)
	assert not blockquote.everythingbut().accepts(["/", "*", "whatever", "*", "/"])
	assert blockquote.everythingbut().accepts(["*"]) # deliberately seek oblivion
	assert blockquote.islive(3)
	assert blockquote.islive(4)
	assert not blockquote.islive(5)
	gen = blockquote.strings()
	assert next(gen) == ["/", "*", "*", "/"]
Esempio n. 2
0
    def backward_pass(self, accum_grad):
        _, timesteps, _ = accum_grad.shape

        # Variables where we save the accumulated gradient w.r.t each parameter
        grad_U = np.zeros_like(self.U)
        grad_V = np.zeros_like(self.V)
        grad_W = np.zeros_like(self.W)
        # The gradient w.r.t the layer input.
        # Will be passed on to the previous layer in the network
        accum_grad_next = np.zeros_like(accum_grad)

        # Back Propagation Through Time
        for t in reversed(range(timesteps)):
            # Update gradient w.r.t V at time step t
            grad_V += accum_grad[:, t].T.dot(self.states[:, t])
            # Calculate the gradient w.r.t the state input
            grad_wrt_state = accum_grad[:, t].dot(self.V) * self.activation.gradient(self.state_input[:, t])
            # Gradient w.r.t the layer input
            accum_grad_next[:, t] = grad_wrt_state.dot(self.U)
            # Update gradient w.r.t W and U by backprop. from time step t for at most
            # self.bptt_trunc number of time steps
            for t_ in reversed(np.arange(max(0, t - self.bptt_trunc), t+1)):
                grad_U += grad_wrt_state.T.dot(self.layer_input[:, t_])
                grad_W += grad_wrt_state.T.dot(self.states[:, t_-1])
                # Calculate gradient w.r.t previous state
                grad_wrt_state = grad_wrt_state.dot(self.W) * self.activation.gradient(self.state_input[:, t_-1])

        # Update weights
        self.U = self.U_opt.update(self.U, grad_U)
        self.V = self.V_opt.update(self.V, grad_V)
        self.W = self.W_opt.update(self.W, grad_W)

        return accum_grad_next
Esempio n. 3
0
    def test_create_changes_overridden_project(self):
        def custom_split_file(path):
            f = split_file(path)
            if f:
                f["project"] = "overridden-project"
                f["repository"] = "overridden-repository"
                f["codebase"] = "overridden-codebase"
            return f

        base = ("file:///home/warner/stuff/Projects/Buildbot/trees/" +
                "svnpoller/_trial_temp/test_vc/repositories/SVN-Repository/sample")
        s = self.attachSVNPoller(base, split_file=custom_split_file)
        s._prefix = "sample"

        logentries = dict(
            zip(range(1, 7), reversed(make_logentry_elements(6))))
        changes = s.create_changes(reversed([logentries[3], logentries[2]]))
        self.assertEqual(len(changes), 2)

        # note that parsing occurs in reverse
        self.assertEqual(changes[0]['branch'], "branch")
        self.assertEqual(changes[0]['revision'], '2')
        self.assertEqual(changes[0]['project'], "overridden-project")
        self.assertEqual(changes[0]['repository'], "overridden-repository")
        self.assertEqual(changes[0]['codebase'], "overridden-codebase")

        self.assertEqual(changes[1]['branch'], "branch")
        self.assertEqual(changes[1]['files'], [u'çmain.c'])
        self.assertEqual(changes[1]['revision'], '3')
        self.assertEqual(changes[1]['project'], "overridden-project")
        self.assertEqual(changes[1]['repository'], "overridden-repository")
        self.assertEqual(changes[1]['codebase'], "overridden-codebase")
Esempio n. 4
0
def reference_number(data):
    chk = -sum(int(x) * [7, 3, 1][i % 3]
               for i, x in enumerate(data[::-1])) % 10
    ref = '%s%d' % (data, chk)
    return ' '.join(
        reversed(
            [''.join(reversed(x)) for x in grouper(5, reversed(ref), '')]))
Esempio n. 5
0
    def wrap(f):
        @wraps(f)
        def inner_f(*args):
            val = f(*args)
            if nullable and val == ffi.NULL:
                return None
            elif returns_string:
                return ffi.string(val)
            return val

        # this insanity inserts a formatted argspec string
        # into the function's docstring, so that sphinx
        # gets the right args instead of just the wrapper args
        args, varargs, varkw, defaults = inspect.getargspec(f)
        defaults = () if defaults is None else defaults
        defaults = ["\"{}\"".format(a) if type(a) == str else a for a in defaults]
        l = ["{}={}".format(arg, defaults[(idx+1)*-1])
             if len(defaults)-1 >= idx else
             arg for idx, arg in enumerate(reversed(list(args)))]
        if varargs:
            l.append('*' + varargs)
        if varkw:
            l.append('**' + varkw)
        doc = "{}({})\n\nC: ``{}``\n\n{}".format(f.__name__, ', '.join(reversed(l)), decl, f.__doc__)
        inner_f.__doc__ = doc
        return inner_f
Esempio n. 6
0
 def test_reversed_simple_sequences(self):
     l = range(5)
     rev = reversed(l)
     assert list(rev) == [4, 3, 2, 1, 0]
     assert list(l.__reversed__()) == [4, 3, 2, 1, 0]
     s = "abcd"
     assert list(reversed(s)) == ['d', 'c', 'b', 'a']
Esempio n. 7
0
 def validate_anyOf(self, *args, **kwargs):
     for error in super(SchemaValidator, self).validate_anyOf(*args, **kwargs):
         # Split the suberrors up by which subschema they are from
         subschema_errors = defaultdict(list)
         for sube in error.context:
             subschema_errors[sube.schema_path[0]].append(sube)
         # Find the subschemas that did not have a 'type' error validating the instance at this path
         no_type_errors = dict(subschema_errors)
         valid_types = set()
         for i, errors in subschema_errors.iteritems():
             for e in errors:
                 if e.validator == 'type' and not e.path:
                     # Remove from the no_type_errors dict
                     no_type_errors.pop(i, None)
                     # Add the valid types to the list of all valid types
                     if self.is_type(e.validator_value, 'string'):
                         valid_types.add(e.validator_value)
                     else:
                         valid_types.update(e.validator_value)
         if not no_type_errors:
             # If all of the branches had a 'type' error, create our own virtual type error with all possible types
             for e in self.descend(error.instance, {'type': valid_types}):
                 yield e
         elif len(no_type_errors) == 1:
             # If one of the possible schemas did not have a 'type' error, assume that is the intended one and issue
             # all errors from that subschema
             for e in no_type_errors.values()[0]:
                 e.schema_path.extendleft(reversed(error.schema_path))
                 e.path.extendleft(reversed(error.path))
                 yield e
         else:
             yield error
Esempio n. 8
0
 def train(self, inp, out, training_weight=1.):
     inp = np.mat(inp).T
     out = np.mat(out).T
     deriv = []
     val = inp
     vals = [val]
     # forward calculation of activations and derivatives
     for weight,bias in self.__weights:
         val = weight*val
         val += bias
         deriv.append(self.__derivative(val))
         vals.append(self.__activation(val))
     deriv = iter(reversed(deriv))
     weights = iter(reversed(self.__weights))
     errs = []
     errs.append(np.multiply(vals[-1]-out, next(deriv)))
     # backwards propagation of errors
     for (w,b),d in zip(weights, deriv):
         errs.append(np.multiply(np.dot(w.T, errs[-1]), d))
     weights = iter(self.__weights)
     for (w,b),v,e in zip(\
             self.__weights,\
             vals, reversed(errs)):
         e *= self.__learning_rate*training_weight
         w -= e*v.T
         b -= e
     tmp = vals[-1]-out
     return np.dot(tmp[0].T,tmp[0])*.5*training_weight
Esempio n. 9
0
def _balance_tags(tags):
  """Throw out any close tags without an open tag.

  If {@code <table>} is used for formatting, embedded HTML shouldn't be able
  to use a mismatched {@code </table>} to break page layout.

  Args:
    tags: The list of all tags in this text.

  Returns:
    A string containing zero or more closed tags that close all elements that
    are opened in tags but not closed.
  """
  open_tags = []
  for i, tag in enumerate(tags):
    if tag[1] == '/':
      index = len(open_tags) - 1
      while index >= 0 and open_tags[index] != tag:
        index -= 1

      if index < 0:
        tags[i] = ''  # Drop close tag.
      else:
        tags[i] = ''.join(reversed(open_tags[index:]))
        del open_tags[index:]

    elif not _HTML5_VOID_ELEMENTS_RE.match(tag):
      open_tags.append('</' + tag[1:])

  return ''.join(reversed(open_tags))
Esempio n. 10
0
    def test_walk_versions_all_true(self, _migrate_up, _migrate_down):
        self.REPOSITORY.latest = 20
        self.migration_api.db_version.return_value = self.INIT_VERSION

        self._walk_versions(self.engine, snake_walk=True, downgrade=True)

        versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
        upgraded = []
        for v in versions:
            upgraded.append(mock.call(self.engine, v, with_data=True))
            upgraded.append(mock.call(self.engine, v))
        upgraded.extend(
            [mock.call(self.engine, v) for v in reversed(versions)]
        )
        self.assertEqual(upgraded, self._migrate_up.call_args_list)

        downgraded_1 = [
            mock.call(self.engine, v - 1, with_data=True) for v in versions
        ]
        downgraded_2 = []
        for v in reversed(versions):
            downgraded_2.append(mock.call(self.engine, v - 1))
            downgraded_2.append(mock.call(self.engine, v - 1))
        downgraded = downgraded_1 + downgraded_2
        self.assertEqual(self._migrate_down.call_args_list, downgraded)
Esempio n. 11
0
    def run(self):

        self.correct_octave_errors_by_chunks()
        self.remove_extreme_values()

        self.correct_jumps()
        self.pitch = list(reversed(self.pitch))
        self.correct_jumps()
        self.pitch = list(reversed(self.pitch))

        self.filter_noise_region()

        self.correct_oct_error()
        self.pitch = list(reversed(self.pitch))
        self.correct_oct_error()
        self.pitch = list(reversed(self.pitch))

        self.correct_octave_errors_by_chunks()
        self.filter_chunks_by_energy(chunk_limit=60)

        if self.out:
            self.data['pitch'] = self.pitch
            with open(self.fname[:-5] + "_filtered.json", 'w') as f: json.dump(self.data, f)

        return self.pitch
    def release_linked(self, event):
        """Called when mousebutton released over the linked list.
            Removes items from the first list and adds them to the other
        """
        # print self.curselection()
        ot = self.otherDropList
        #Go backwards so as to not delete wrong item when the list resizes
        l = self.curselection()

        items = []
        varl = []
        for d in reversed(l):
            # print d,type(d)
            items.append( self.get(d) )
            varl.append( self.variables[int(d)] )
            self.delete(d)
            self.variables.pop(int(d))

        # print items
        y = event.y_root-ot.winfo_rooty()
        i = ot.nearest(y)

        for item in reversed(items):
            if ot.nearest(y) == -1:
                ot.insert("end",item,varl.pop())
                i = ot.nearest(y)
                continue
            
            # print i
            bbox = ot.bbox(i)
            if bbox==None or bbox[3]/2.0 > y: 
                ot.insert(i,item,varl.pop())
            else:
                ot.insert(i+1,item,varl.pop())
            i += 1
Esempio n. 13
0
    def show_registers(self):
        """Print the available registers"""
        i = -1
        print('1D histograms')
        print('{: <3} {: ^40} {: ^5} {: ^8} {: ^8}'.\
                format('i', 'Title', 'Bin', 'Norm', 'Active'))
        print('-' * 79)

        for p in reversed(Experiment.plots):
            print('{: >3} {: <40} {: >5} {: >5.2e} {: >5}'.\
                    format(i, p.histogram.title[:40], p.bin_size,
                           p.norm, p.active))
            i -= 1
        print()

        i = -1
        print('2D histograms')
        print('{: <3} {: ^40} {: ^5} {: ^8} {: ^8}'.\
                format('i', 'Title', 'Bin', 'Norm', 'Active'))
        print('-' * 79)

        for p in reversed(Experiment.maps):
            print('{: >3} {: <40} {: >5} {: >5.2e} {: >5}'.\
                    format(i, p.histogram.title[:40], p.bin_size,
                           p.norm, p.active))
            i -= 1
        print()
Esempio n. 14
0
	def align(self, dataX, dataXY, reverse=False):
		if reverse:
			self.aligned = list(reversed(np.interp(list(reversed(dataX)), list(reversed(dataXY[0])), list(reversed(dataXY[1])))))
		else:
			self.aligned = list(np.interp(dataX, dataXY[0], dataXY[1]))
		
		return self.aligned
Esempio n. 15
0
 def filterRevisions(self, revisions, filter=None, max_revs=None):
     """Filter a set of revisions based on any number of filter criteria.
     If specified, filter should be a dict with keys corresponding to
     revision attributes, and values of 1+ strings"""
     if not filter:
         if max_revs is None:
             for rev in reversed(revisions):
                 yield DevRevision(rev)
         else:
             for index,rev in enumerate(reversed(revisions)):
                 if index >= max_revs:
                     break
                 yield DevRevision(rev)
     else:
         for index, rev in enumerate(reversed(revisions)):
             if max_revs and index >= max_revs:
                 break
             try:
                 for field,acceptable in filter.iteritems():
                     if not hasattr(rev, field):
                         raise DoesNotPassFilter
                     if type(acceptable) in (str, unicode):
                         if getattr(rev, field) != acceptable:
                             raise DoesNotPassFilter
                     elif type(acceptable) in (list, tuple, set):
                         if getattr(rev, field) not in acceptable:
                             raise DoesNotPassFilter
                 yield DevRevision(rev)
             except DoesNotPassFilter:
                 pass
Esempio n. 16
0
    def _get_descendants_by_speech(self, **kwargs):
        dqs = self._get_descendants(include_min=True, **kwargs)

        max_datetime = datetime.datetime(datetime.MAXYEAR, 12, 31)

        lookup = dict((x.id, x) for x in dqs)
        lookup[self.id] = self
        self.speech_min = max_datetime

        for d in dqs:
            if not d.speech_min:
                continue
            for parent_id in reversed(d.path):
                parent = lookup[parent_id]
                if not parent.speech_min or d.speech_min < parent.speech_min:
                    parent.speech_min = d.speech_min

        # Set the speech_min to all the earliests
        for d in dqs:
            if d.speech_min:
                continue
            for parent_id in reversed(d.path):
                parent = lookup[parent_id]
                if parent.speech_min:
                    d.speech_min = parent.speech_min
                    break

        return sorted(dqs, key=lambda s: getattr(s, 'speech_min', max_datetime))
Esempio n. 17
0
    def _do_get_provider_count_and_objs(self, **kw):
        """
        Se utiliza para listar solo los proyectos que cumplan ciertas
        condiciones y de acuerdo a ciertos permisos.
        """
        if TieneAlgunPermiso(tipo = u"Fase", recurso = u"Linea Base"):
            proyectos = DBSession.query(Proyecto).filter(Proyecto.estado != \
                        u"Nuevo").order_by(Proyecto.id).all()

            for proyecto in reversed(proyectos):
                buscado = self.buscado in str(proyecto.nro_fases) or \
                          self.buscado in str(proyecto.fecha_inicio) or \
                          self.buscado in str(proyecto.fecha_fin) or \
                          self.buscado in proyecto.lider.nombre_usuario or \
                          self.buscado in proyecto.nombre or \
                          self.buscado in proyecto.descripcion or \
                          self.buscado in proyecto.estado
                if not buscado: proyectos.remove(proyecto)

            for proyecto in reversed(proyectos):
                if not (TieneAlgunPermiso(tipo = u"Fase", recurso = \
                        u"Linea Base", id_proyecto = proyecto.id) \
                        .is_met(request.environ) and self.fase_apta(proyecto)): 
                    proyectos.remove(proyecto)
                
        else: proyectos = list()       
        return len(proyectos), proyectos 
Esempio n. 18
0
 def setZeroes(self, matrix):
     """
     :type matrix: List[List[int]]
     :rtype: void Do not return anything, modify matrix in-place instead.
     """
     # O(1) 176ms
     rows, cols = len(matrix), len(matrix[0])
     col0 = False # reverse the row assign action to save rows
     # mark the state at first row and col  
     for i in range(0, rows):
         if matrix[i][0] == 0:
             col0 = True
         for j in range(1, cols):
             if matrix[i][j] == 0:
                 matrix[i][0] = matrix[0][j] = 0
     # mark the entire row or column to 0 
     for i in reversed(range(rows)):
         for j in reversed(range(1, cols)):
             if matrix[i][0] == 0 or matrix[0][j] == 0:
                 matrix[i][j] = 0
         if col0: 
             matrix[i][0] = 0
     
     # # O(m+n) 216ms
     # m, n = [], []
     # for i in range(len(matrix)):
     #     for j in range(len(matrix[i])):
     #         if matrix[i][j] == 0:
     #             m.append(i)
     #             n.append(j)
     # for i in range(len(matrix)):
     #     for j in range(len(matrix[i])):
     #         if i in m or j in n:
     #             matrix[i][j] = 0
     return matrix
Esempio n. 19
0
 def scanner_network(self,gateway):
     scan = ''
     config_gateway = gateway.split('.')
     del config_gateway[-1]
     for i in config_gateway:
         scan += str(i) + '.'
     gateway = scan
     ranger = str(self.ip_range.text()).split('-')
     jobs = []
     manager = Manager()
     on_ips = manager.dict()
     for n in xrange(int(ranger[0]),int(ranger[1])):
         ip='%s{0}'.format(n)%(gateway)
         p = Process(target=self.working,args=(ip,on_ips))
         jobs.append(p)
         p.start()
     for i in jobs: i.join()
     for i in on_ips.values():
         Headers = []
         n = i.split('|')
         self.data['IPaddress'].append(n[0])
         self.data['MacAddress'].append(n[1])
         self.data['Hostname'].append('<unknown>')
         for n, key in enumerate(reversed(self.data.keys())):
             Headers.append(key)
             for m, item in enumerate(self.data[key]):
                 item = QTableWidgetItem(item)
                 item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
                 self.tables.setItem(m, n, item)
     Headers = []
     for key in reversed(self.data.keys()):
         Headers.append(key)
     self.tables.setHorizontalHeaderLabels(Headers)
Esempio n. 20
0
 def instructions_reversed(self):
     """Iterate in reverse order over all instructions in the module."""
     for function in reversed(self.functions[:]):
         for inst in function.instructions_reversed():
             yield inst
     for inst in reversed(self.global_insts[:]):
         yield inst
Esempio n. 21
0
File: key.py Progetto: JoonyLi/veusz
 def _layout(self, entries, totallines):
     """Layout the items, trying to keep the box as small as possible
     while still filling the columns"""
     
     maxcols = self.settings.columns
     numcols = min(maxcols, max(len(entries), 1))
     
     if not entries:
         return (list(), (0, 0))
     
     # start with evenly-sized rows and expand to fit
     numrows = totallines / numcols
     layout = []
     
     while not layout:
         # try to do a first cut of the layout, and expand the box until
         # everything fits
         (layout, colstats, newrows) = self._layoutChunk(entries, (0, 0), (numrows, numcols))
         if not layout:
             numrows = newrows
         
     # ok, we've got a layout where everything fits, now pull items right
     # to fill the remaining columns, if need be
     while colstats[-1] == 0:
         # shift 1 item to the right, up to the first column that has
         # excess items
         meanoccupation = max(1, sum(colstats)/float(numcols))
         
         # loop until we find a victim item which can be safely moved
         victimcol = numcols
         while True:
             # find the right-most column with excess occupation number
             for i in reversed(xrange(victimcol)):
                 if colstats[i] > meanoccupation:
                     victimcol = i
                     break
             
             # find the last item in the victim column
             victim = 0
             for i in reversed(xrange(len(layout))):
                 if layout[i][2] == victimcol:
                     victim = i
                     break
             
             # try to relayout with the victim item shoved to the next column
             (newlayout, newcolstats, newrows) = self._layoutChunk(entries[victim:],
                                                     (0, victimcol+1), (numrows, numcols))
             if newlayout:
                 # the relayout worked, so accept it
                 layout = layout[0:victim] + newlayout
                 colstats[victimcol] -= 1
                 del colstats[victimcol+1:]
                 colstats += newcolstats[victimcol+1:]
                 break
             
             # if we've run out of potential victims, just return what we have
             if victimcol == 0:
                 return (layout, (numrows, numcols))
     
     return (layout, (numrows, numcols))
Esempio n. 22
0
    def get_tail(self, n=10, raw=True, output=False, include_latest=False):
        """Get the last n lines from the history database.

        Parameters
        ----------
        n : int
          The number of lines to get
        raw, output : bool
          See :meth:`get_range`
        include_latest : bool
          If False (default), n+1 lines are fetched, and the latest one
          is discarded. This is intended to be used where the function
          is called by a user command, which it should not return.

        Returns
        -------
        Tuples as :meth:`get_range`
        """
        self.writeout_cache()
        if not include_latest:
            n += 1
        cur = self._run_sql("ORDER BY session DESC, line DESC LIMIT ?", (n,), raw=raw, output=output)
        if not include_latest:
            return reversed(list(cur)[1:])
        return reversed(list(cur))
Esempio n. 23
0
    def execute(self, slot, subindex, roi, result):
        if numpy.prod(roi.stop - roi.start) > 1e9:
            logger.error("Requesting a very large volume from DVID: {}\n"\
                         "Is that really what you meant to do?"
                         .format( roi ))
            
        # TODO: Modify accessor implementation to accept a pre-allocated array.

# FIXME: Disabled throttling for now.  Need a better heuristic or explicit setting.
#         # For "heavy" requests, we'll use the throttled accessor
#         HEAVY_REQ_SIZE = 256*256*10
#         if numpy.prod(result.shape) > HEAVY_REQ_SIZE:
#             accessor = self._throttled_accessor
#         else:
#             accessor = self._default_accessor

        accessor = self._default_accessor # FIXME (see above)
        
        if self._transpose_axes:
            roi_start = tuple(reversed(roi.start))
            roi_stop = tuple(reversed(roi.stop))
            result[:] = accessor.get_ndarray(roi_start, roi_stop).transpose()
        else:
            result[:] = accessor.get_ndarray(roi.start, roi.stop)
        return result
Esempio n. 24
0
def list_changes(upstream, feature, paths=[]):
    feature_revs = tuple(list_revs(upstream, feature, paths=paths))
    upstream_revs = tuple(list_revs(feature, upstream, paths=paths))

    feature_cids = dict([
        (c.change_id, c) for c in feature_revs if c.change_id is not None ])
    upstream_cids = dict([
        (c.change_id, c) for c in upstream_revs if c.change_id is not None ])

    incoming = filter(
        lambda r: r.change_id and r.change_id not in feature_cids,
        reversed(upstream_revs))
    outgoing = filter(
        lambda r: r.change_id and r.change_id not in upstream_cids,
        reversed(feature_revs))
    common = filter(
        lambda r: r.change_id in upstream_cids,
        reversed(feature_revs))
    upstream_unknown = filter(
        lambda r: r.change_id is None,
        reversed(upstream_revs))
    feature_unknown = filter(
        lambda r: r.change_id is None,
        reversed(feature_revs))

    return incoming, outgoing, common, upstream_unknown, feature_unknown
 def try_to_bump(self, left, right):
     left_edge = []
     # Instead of bumping left and right, one has to bump all [..., left] and [right, ...] children.
     # It may be that the child just before the left had a branch that overlaps with the right side branches.
     for child in self.child_blocks:
         left_edge += child.right_edges
         if child is left:
             break
     right_edge = []
     for child in reversed(self.child_blocks):
         right_edge += child.left_edges
         if child is right:
             break
     adjust = prefs.edge_width
     while (not bump_polygons(left_edge, right_edge, adjust)) and adjust < 1000:
         adjust += prefs.edge_width
     amount = adjust - 3 * prefs.edge_width
     if amount > 0:
         a2 = amount / 2
         self.width -= amount
         self.left += a2
         for child in self.child_blocks:
             child.move(a2, 0)
             if child is left:
                 break
         for child in reversed(self.child_blocks):
             child.move(-a2, 0)
             if child is right:
                 break
Esempio n. 26
0
    def _FindNodeValue(self, key):
        # |found_node_list| will be all the nodes that |key| has been found in.
        # |checked_node_set| are those that have been checked.
        info = self._value_info.get(key)
        if info is None:
            info = ([], set())
            self._value_info[key] = info
        found_node_list, checked_node_set = info

        # Check all the nodes not yet checked for |key|.
        newly_found = []
        for node in reversed(self._nodes):
            if node in checked_node_set:
                break
            value = node.Get(key)
            if value is not None:
                newly_found.append(node)
            checked_node_set.add(node)

        # The nodes will have been found in reverse stack order. After extending
        # the found nodes, the freshest value will be at the tip of the stack.
        found_node_list.extend(reversed(newly_found))
        if not found_node_list:
            return None

        return found_node_list[-1]._value.get(key)
Esempio n. 27
0
    def new_enum_type(self, name, enumerators, enumvalues, CTypesInt):
        assert isinstance(name, str)
        reverse_mapping = dict(zip(reversed(enumvalues),
                                   reversed(enumerators)))
        #
        class CTypesEnum(CTypesInt):
            __slots__ = []
            _reftypename = '%s &' % name

            def _get_own_repr(self):
                value = self._value
                try:
                    return '%d: %s' % (value, reverse_mapping[value])
                except KeyError:
                    return str(value)

            def _to_string(self, maxlen):
                value = self._value
                try:
                    return reverse_mapping[value]
                except KeyError:
                    return str(value)
        #
        CTypesEnum._fix_class()
        return CTypesEnum
Esempio n. 28
0
def _get_flatmirror_attrdict(cls, add, exclude, bases, dict_factory):
    attrdict = dict_factory()
    slotset = set()
    rev_mro = reversed(cls.mro())
    supercls = next(rev_mro)
    for basecls in reversed(bases):
        # skip some redundant base classes (typically: object)
        if issubclass(basecls, supercls):
            supercls = next(rev_mro)
        else:
            break
    while True:
        slots = getattr(supercls, '__slots__', ())
        excl = frozenset(slots).union(exclude)
        slotset.update(slots)
        for attrname, obj in supercls.__dict__.items():
            if attrname not in excl:
                attrdict[attrname] = obj
        if supercls is not cls:
            supercls = next(rev_mro)
        else:
            break
    if add is not None:
        attrdict.update(add)
    slots = attrdict.get('__slots__', None)
    if slots is not None:
        if isinstance(slots, basestring):
            slots = (slots,)
        slotset.update(slots)
        attrdict['__slots__'] = tuple(slotset)
    return attrdict
Esempio n. 29
0
def signature_argspec(f):
    from inspect import signature, Parameter, _empty

    try:
        if NO_ARGSPEC:
            sig = signature(f, follow_wrapped=False)
        else:
            sig = signature(f)
    except ValueError:
        raise TypeError("unsupported callable")
    args = list(
        k for k, v in sig.parameters.items() if v.kind in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD)
    )
    varargs = None
    keywords = None
    for k, v in sig.parameters.items():
        if v.kind == Parameter.VAR_POSITIONAL:
            varargs = k
        elif v.kind == Parameter.VAR_KEYWORD:
            keywords = k
    defaults = []
    for a in reversed(args):
        default = sig.parameters[a].default
        if default is _empty:
            break
        else:
            defaults.append(default)
    if defaults:
        defaults = tuple(reversed(defaults))
    else:
        defaults = None
    return FakeArgSpec(args, varargs, keywords, defaults)
Esempio n. 30
0
 def uniquePathsWithObstacles(self, obstacleGrid):
     row = len(obstacleGrid)
     col = len(obstacleGrid[0])
     if obstacleGrid[row-1][col-1] == 1:
         return 0
     
     path = [[1 for x in xrange(col)] for x in xrange(row)]
     _i, _j = row, col
     
     for i in reversed(xrange(row)):
         if obstacleGrid[i][0] == 1:
             _i = i
     while _i < row:
         path[_i][0] = 0
         _i += 1
         
     for j in reversed(xrange(col)):
         if obstacleGrid[0][j] == 1:
             _j = j
     while _j < col:
         path[0][_j] = 0
         _j += 1
         
     for i in xrange(1, row):
         for j in xrange(1, col):
             if obstacleGrid[i-1][j] == 1 and obstacleGrid[i][j-1] == 1:
                 path[i][j] = 0
             elif obstacleGrid[i-1][j] == 1:
                 path[i][j] = path[i][j-1]
             elif obstacleGrid[i][j-1] == 1:
                 path[i][j] = path[i-1][j]
             else:
                 path[i][j] = path[i-1][j] + path[i][j-1]
                 
     return path[row-1][col-1]
Esempio n. 31
0
def _merge_minidump_response(data, response):
    data['platform'] = 'native'
    if response.get('crashed') is not None:
        data['level'] = 'fatal' if response['crashed'] else 'info'

    validate_and_set_timestamp(data, response.get('timestamp'))

    if response.get('system_info'):
        _merge_system_info(data, response['system_info'])

    sdk_info = get_sdk_from_event(data)

    images = []
    set_path(data, 'debug_meta', 'images', value=images)

    for complete_image in response['modules']:
        image = {}
        _merge_image(
            image, complete_image, sdk_info,
            lambda e: write_error(e, data)
        )
        images.append(image)

    # Extract the crash reason and infos
    data_exception = get_path(data, 'exception', 'values', 0)
    exc_value = (
        'Assertion Error: %s' % response.get('assertion')
        if response.get('assertion')
        else 'Fatal Error: %s' % response.get('crash_reason')
    )
    data_exception['value'] = exc_value
    data_exception['type'] = response.get('crash_reason')

    data_threads = []
    if response['stacktraces']:
        data['threads'] = {'values': data_threads}
    else:
        error = SymbolicationFailed(message='minidump has no thread list',
                                    type=EventError.NATIVE_SYMBOLICATOR_FAILED)
        write_error(error, data)

    for complete_stacktrace in response['stacktraces']:
        is_requesting = complete_stacktrace.get('is_requesting')
        thread_id = complete_stacktrace.get('thread_id')

        data_thread = {
            'id': thread_id,
            'crashed': is_requesting,
        }
        data_threads.append(data_thread)

        if is_requesting:
            data_exception['thread_id'] = thread_id
            data_stacktrace = data_exception.setdefault('stacktrace', {})
            data_stacktrace['frames'] = []
        else:
            data_thread['stacktrace'] = data_stacktrace = {'frames': []}

        if complete_stacktrace.get('registers'):
            data_stacktrace['registers'] = complete_stacktrace['registers']

        for complete_frame in reversed(complete_stacktrace['frames']):
            new_frame = {}
            _merge_frame(new_frame, complete_frame)
            data_stacktrace['frames'].append(new_frame)
Esempio n. 32
0
# -*- coding: utf-8 -*-
from pylab import *
from random import *

cases = 100
max_n = 8

inp = open("poly.in", "w")
outp = open("poly.out", "w")
inp.write("%d\n" % cases)
for cc in xrange(cases):
    n = randint(2,max_n)
    xs = array([(random()-0.5)*10 for i in xrange(n)])*(random()+1)
    coefs = poly(xs)
    inp.write("%d\n%s\n" % (n, " ".join(["%.12lf" % c for c in reversed(coefs)])))
    outp.write("%s\n" % " ".join(["%.12lf" % s for s in sorted(xs)]))

Esempio n. 33
0
# Name: Hunter
# Date: 6/13/2017
"""
proj04

Asks the user for a string and prints out whether or not the string is a palindrome.

"""

user_input1 = raw_input(
    "Enter a word, and I will tell you if it is a palindrome! : ")
print user_input1

reverse = reversed(user_input1)

stringList = []
for letter in user_input1:
    stringList.append(letter)

print stringList

print user_input1

while user_input1:

    if user_input1[0:-1] == user_input1[-1:0]:
        print "This word is a palindrome!"
        break
    else:
        print "This word is not a palindrome :4("
        break
Esempio n. 34
0
 def backward(self,e_y):
   self.e_ys[-1] = e_y
   for (i, l) in reversed(list(enumerate(self.layers))):
     self.e_ys[i] = l.backward(self.e_ys[i+1])
   return self.e_ys[0]
Esempio n. 35
0
# -*- coding: utf-8 -*-
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols

from os import path as os_path
from sys import path as sys_path

from pkgutil import extend_path

__extended_path = "/home/vincent/catkin_ws/src/navigation/base_local_planner/src".split(
    ";")
for p in reversed(__extended_path):
    sys_path.insert(0, p)
    del p
del sys_path

__path__ = extend_path(__path__, __name__)
del extend_path

__execfiles = []
for p in __extended_path:
    src_init_file = os_path.join(p, __name__ + '.py')
    if os_path.isfile(src_init_file):
        __execfiles.append(src_init_file)
    else:
        src_init_file = os_path.join(p, __name__, '__init__.py')
        if os_path.isfile(src_init_file):
            __execfiles.append(src_init_file)
    del src_init_file
    del p
del os_path
Esempio n. 36
0
def addToken(conllu, sent_id, option, token_id, conllu_completo="", new_tokens=[], mergeSentencesId="", form=False):

    if form:
        if not os.path.isfile("../cgi-bin/tokenization.json"):
            tokenization = {}
            with open("../cgi-bin/tokenization.json", "w") as f:
                json.dump(tokenization, f)

        with open("../cgi-bin/tokenization.json") as f:
            tokenization = json.load(f)

    if not isinstance(conllu, estrutura_ud.Corpus):
        corpus = estrutura_ud.Corpus(recursivo=False, any_of_keywords=[re.escape("# sent_id = " + sent_id + "\n"), re.escape("# sent_id = " + mergeSentencesId + "\n")])
        corpus.load(conllu if not conllu_completo else conllu_completo)
    else:
        corpus = conllu

    if token_id == "left":
        token_id = corpus.sentences[sent_id].tokens[0].id
    elif token_id == "right":
        token_id = str(int(corpus.sentences[sent_id].tokens[-1].id)+1)

    if option in ["add", "addContraction"]:
        
        if not new_tokens:
            if not mergeSentencesId:
                novo_token = estrutura_ud.Token()
                novo_token.build("_\t_\t_\t_\t_\t_\t0\t_\t_\t_")
                new_tokens.append(novo_token)
            else:
                new_tokens = corpus.sentences[mergeSentencesId].tokens
        else:
            novo_token = estrutura_ud.Token()
            novo_token.build(new_tokens[0])
            new_tokens = [novo_token]

        last_id = ""
        for novo_token in reversed(new_tokens):
            if option == "add":
                novo_token.id = token_id if not '-' in novo_token.id else str(int(token_id)) + "-" + str(int(token_id)+int(novo_token.id.split("-")[1])-int(novo_token.id.split("-")[0]))
            elif option == "addContraction":
                novo_token.id = token_id + "-" + token_id
            if mergeSentencesId:
                if not last_id:
                    last_id = corpus.sentences[sent_id].tokens[-1].id
                if token_id == str(int(last_id)+1) and not '-' in novo_token.id:
                    novo_token.dephead = str(int(novo_token.dephead) + int(last_id))
            if not token_id in corpus.sentences[sent_id].map_token_id:
                corpus.sentences[sent_id].tokens.append(novo_token)
                corpus.sentences[sent_id].map_token_id[token_id] = len(corpus.sentences[sent_id].tokens) - 1
            else:
                corpus.sentences[sent_id].tokens.insert(corpus.sentences[sent_id].map_token_id[token_id], novo_token)
            if option == "add":
                for t, token in enumerate(corpus.sentences[sent_id].tokens):
                    if not '-' in novo_token.id:
                        if t > corpus.sentences[sent_id].map_token_id[token_id]:
                            token.id = str(int(token.id)+1) if not '-' in token.id else str(int(token.id.split("-")[0])+1) + "-" + str(int(token.id.split("-")[1])+1)
                            corpus.sentences[sent_id].map_token_id[token.id] = t
                for t, token in enumerate(corpus.sentences[sent_id].tokens):
                    if not mergeSentencesId and token.dephead not in ["0", "_"] and token.dephead in corpus.sentences[sent_id].map_token_id and token_id in corpus.sentences[sent_id].map_token_id and corpus.sentences[sent_id].map_token_id[token.dephead] >= corpus.sentences[sent_id].map_token_id[token_id]:
                        token.dephead = str(int(token.dephead)+1)

            if form:
                if not conllu in tokenization:
                    tokenization[conllu] = {}
                if not sent_id in tokenization[conllu]:
                    tokenization[conllu][sent_id] = []
                tokenization[conllu][sent_id].append({'option': option, 'token_id': token_id, 'new_token': [novo_token.to_str()]})

        if mergeSentencesId and token_id != str(int(last_id)+1):
            for t, token in enumerate(corpus.sentences[sent_id].tokens):
                if token.dephead not in ["0", "_"] and t > int(corpus.sentences[sent_id].map_token_id[new_tokens[-1].id]):
                    token.dephead = str(int(token.dephead) + int(new_tokens[-1].id))
                    
        if mergeSentencesId:
            if token_id == corpus.sentences[sent_id].tokens[0].id:
                corpus.sentences[sent_id].metadados['text'] = corpus.sentences[mergeSentencesId].text + ' ' + corpus.sentences[sent_id].text
            else:
                corpus.sentences[sent_id].metadados['text'] += ' ' + corpus.sentences[mergeSentencesId].text
            corpus.sentences.pop(mergeSentencesId)

    elif option in ["rm"]:
        if not '-' in token_id:
            for t, token in enumerate(corpus.sentences[sent_id].tokens):
                    if token_id in corpus.sentences[sent_id].map_token_id and t > corpus.sentences[sent_id].map_token_id[token_id]:
                        token.id = str(int(token.id)-1) if not '-' in token.id else str(int(token.id.split("-")[0])-1) + "-" + str(int(token.id.split("-")[1])-1)
                    if token.dephead not in ["_", "0"]:
                        if token.dephead in corpus.sentences[sent_id].map_token_id and token_id in corpus.sentences[sent_id].map_token_id and corpus.sentences[sent_id].map_token_id[token.dephead] > corpus.sentences[sent_id].map_token_id[token_id]:
                            token.dephead = str(int(token.dephead)-1)
        corpus.sentences[sent_id].tokens = [x for t, x in enumerate(corpus.sentences[sent_id].tokens) if t != corpus.sentences[sent_id].map_token_id[token_id]]

        if form:
            if not conllu in tokenization:
                tokenization[conllu] = {}
            if not sent_id in tokenization[conllu]:
                tokenization[conllu][sent_id] = []
            tokenization[conllu][sent_id].append({'option': option, 'token_id': token_id})

    if form:
        with open("../cgi-bin/tokenization.json", "w") as f:
            json.dump(tokenization, f)
        corpus.save(conllu + "_tokenization" if not conllu_completo else conllu_completo + "_tokenization")
        os.remove(conllu if not conllu_completo else conllu_completo)
        os.rename(conllu + "_tokenization" if not conllu_completo else conllu_completo + "_tokenization", conllu if not conllu_completo else conllu_completo)
    else:
        return corpus
Esempio n. 37
0
def process_payload(data):
    project = Project.objects.get_from_cache(id=data['project'])
    task_id_cache_key = task_id_cache_key_for_event(data)

    symbolicator = Symbolicator(
        project=project,
        task_id_cache_key=task_id_cache_key
    )

    stacktrace_infos = [
        stacktrace
        for stacktrace in find_stacktraces_in_data(data)
        if any(is_native_platform(x) for x in stacktrace.platforms)
    ]

    stacktraces = [
        {
            'registers': sinfo.stacktrace.get('registers') or {},
            'frames': [
                f for f in reversed(sinfo.stacktrace.get('frames') or ())
                if _handles_frame(data, f)
            ]
        }
        for sinfo in stacktrace_infos
    ]

    if not any(stacktrace['frames'] for stacktrace in stacktraces):
        return

    modules = native_images_from_data(data)
    signal = signal_from_data(data)

    response = symbolicator.process_payload(
        stacktraces=stacktraces,
        modules=modules,
        signal=signal,
    )

    if not _handle_response_status(data, response):
        return data

    assert len(modules) == len(response['modules']), (modules, response)

    sdk_info = get_sdk_from_event(data)

    for raw_image, complete_image in zip(modules, response['modules']):
        _merge_image(
            raw_image,
            complete_image,
            sdk_info,
            lambda e: write_error(
                e,
                data))

    assert len(stacktraces) == len(response['stacktraces']), (stacktraces, response)

    for sinfo, complete_stacktrace in zip(stacktrace_infos, response['stacktraces']):
        complete_frames_by_idx = {}
        for complete_frame in complete_stacktrace.get('frames') or ():
            complete_frames_by_idx \
                .setdefault(complete_frame['original_index'], []) \
                .append(complete_frame)

        new_frames = []
        native_frames_idx = 0

        for raw_frame in reversed(sinfo.stacktrace['frames']):
            if not _handles_frame(data, raw_frame):
                new_frames.append(raw_frame)
                continue

            for complete_frame in complete_frames_by_idx.get(native_frames_idx) or ():
                merged_frame = dict(raw_frame)
                _merge_frame(merged_frame, complete_frame)
                if merged_frame.get('package'):
                    raw_frame['package'] = merged_frame['package']
                new_frames.append(merged_frame)

            native_frames_idx += 1

        if sinfo.container is not None and native_frames_idx > 0:
            sinfo.container['raw_stacktrace'] = {
                'frames': list(sinfo.stacktrace['frames']),
                'registers': sinfo.stacktrace.get('registers')
            }

        new_frames.reverse()
        sinfo.stacktrace['frames'] = new_frames

    return data
Esempio n. 38
0
def s(to, data):

    computers = reversed(to.split('to'))
    reversed_way = '{}to{}'.format(*computers)

    res = get_connect

    for i, com in enumerate(pravila[to]):
        if i / 2 == 1 or i == 0:
            com.isOpen()
            com.write(res)
            com.close()
        if i / 2 != 1 and i != 0:
            com.isOpen()
            response = com.read(16)
            res = response
            com.close()
        print(com.port)

    for i, com in enumerate(pravila[reversed_way]):
        if i / 2 == 1 or i == 0:
            com.isOpen()
            com.write(res)
            com.close()
        if i / 2 != 1 and i != 0:
            com.isOpen()
            response = com.read(16)
            res = response
            com.close()

    if res == get_connect:
        print('Соединение открыто', res)
    time.sleep(5)

    res_data = b''
    rdata = data

    for i, com in enumerate(pravila[to]):

        if i / 2 == 1 or i == 0:
            com.isOpen()
            com.open()
            com.write(rdata)
            com.close()

        if i / 2 != 1 and i != 0:
            com.isOpen()
            com.open()
            print(com.in_waiting)
            s_data = com.read(16)
            res_data = s_data
            print(res_data, com.port)
            com.close()

    if res_data == data:
        print('Данные переданы:', res_data)
    time.sleep(5)
    computers = reversed(to.split('to'))
    reversed_way = '{}to{}'.format(*computers)

    res = get_connect

    for i, com in enumerate(pravila[to]):
        if i / 2 == 1 or i == 0:
            com.open()
            com.write(res)
            print('Пакет передан')
            com.close()
        if i / 2 != 1 and i != 0:
            com.open()
            response = com.read(16)
            res = response
            print(response, com.port)
            com.close()

    for i, com in enumerate(pravila[reversed_way]):
        if i / 2 == 1 or i == 0:
            com.open()
            com.write(res)
            com.close()
        if i / 2 != 1 and i != 0:
            com.open()
            response = com.read(16)
            res = response
            com.close()

    if res == get_connect:
        print('Соединение закрыто', res)
Esempio n. 39
0
 def scan_fxt(self, fxtid):
     order = reversed(self.obj_order[fxtid])
     for k in order:
         yield k, self.registry[fxtid][k]
def unregister():
    for cls in reversed(classes):
        bpy.utils.unregister_class(cls)
    del bpy.types.Scene.point_cloud
    bpy.types.SpaceView3D.draw_handler_remove(draw_handler['cloud'], 'WINDOW')
Esempio n. 41
0
def reverseAxes(img):
    '''Reverse the axes of numpy array `img'.'''
    return np.transpose(img,list(reversed(range(img.ndim))))
data = [111, 444, 555, 888, 412, 647, 951,
        789, 654, 258, 5, 4, 234,
        543, 345, 123235245, 213,
        435, 2334, 345
        ]

min_valid_amount = 200
max_valid_amount = 400

for index in range(len(data) - 1, -1, -1):
    if data[index] > max_valid_amount or data[index] < min_valid_amount:
        print(index, data)
        del data[index]
print(data)

# another backwards/reverse way

top_index = len(data) - 1
for index, number in enumerate(reversed(data)):
    if number < min_valid_amount or number > min_valid_amount:
        print(top_index - index, number)
        del data[top_index - index]
print(data)
import EasyFiles as ef 

# Given: A DNA string s of length at most 1000 bp.
# Return: The reverse complement sc of s.
complement = {'A':'T','C':'G','G':'C','T':'A'}
input = ef.getInput() # get the input list
DNA = input[0]
reverse = ''
for bp in reversed(DNA):
	bpComplement = complement[bp]
	reverse+=bpComplement

ef.writeOutput( reverse ) 
Esempio n. 44
0
    def krit_descending(self):
        mass = list(reversed(Sort.t_sort(self.mass.copy())))
        print(self.__print_matrix(mass))
        print('T = {}'.format(mass))

        self.__krit(mass)
Esempio n. 45
0
def all_shortest_paths(G, source, target, weight=None, method='dijkstra'):
    """Compute all shortest paths in the graph.

    Parameters
    ----------
    G : NetworkX graph

    source : node
       Starting node for path.

    target : node
       Ending node for path.

    weight : None or string, optional (default = None)
       If None, every edge has weight/distance/cost 1.
       If a string, use this edge attribute as the edge weight.
       Any edge attribute not present defaults to 1.

    method : string, optional (default = 'dijkstra')
       The algorithm to use to compute the path lengths.
       Supported options: 'dijkstra', 'bellman-ford'.
       Other inputs produce a ValueError.
       If `weight` is None, unweighted graph methods are used, and this
       suggestion is ignored.

    Returns
    -------
    paths : generator of lists
        A generator of all paths between source and target.

    Raises
    ------
    ValueError
        If `method` is not among the supported options.

    NetworkXNoPath
        If `target` cannot be reached from `source`.

    Examples
    --------
    >>> G = nx.Graph()
    >>> nx.add_path(G, [0, 1, 2])
    >>> nx.add_path(G, [0, 10, 2])
    >>> print([p for p in nx.all_shortest_paths(G, source=0, target=2)])
    [[0, 1, 2], [0, 10, 2]]

    Notes
    -----
    There may be many shortest paths between the source and target.

    See Also
    --------
    shortest_path()
    single_source_shortest_path()
    all_pairs_shortest_path()
    """
    method = 'unweighted' if weight is None else method
    if method == 'unweighted':
        pred = nx.predecessor(G, source)
    elif method == 'dijkstra':
        pred, dist = nx.dijkstra_predecessor_and_distance(G,
                                                          source,
                                                          weight=weight)
    elif method == 'bellman-ford':
        pred, dist = nx.bellman_ford_predecessor_and_distance(G,
                                                              source,
                                                              weight=weight)
    else:
        raise ValueError('method not supported: {}'.format(method))

    if target not in pred:
        raise nx.NetworkXNoPath('Target {} cannot be reached'
                                'from Source {}'.format(target, source))

    stack = [[target, 0]]
    top = 0
    while top >= 0:
        node, i = stack[top]
        if node == source:
            yield [p for p, n in reversed(stack[:top + 1])]
        if len(pred[node]) > i:
            top += 1
            if top == len(stack):
                stack.append([pred[node][i], 0])
            else:
                stack[top] = [pred[node][i], 0]
        else:
            stack[top - 1][1] += 1
            top -= 1
Esempio n. 46
0
def draw_snake(window,color, snake , size):
    for x,y in snake[:-1]:
        pygame.draw.rect(window,WHITE,[x, y, size, size])
    for x,y in reversed(snake):
        pygame.draw.rect(window,GREEN,[x, y, size, size])
        break
Esempio n. 47
0
from match import *

for dir in [1, 2, 3]:
    print "*" * 100
    print "Working on set" + str(dir) + " images"
    for i in [11, 10, 9, 8, 7, 6, 5, 4]:
        s1 = HarrisExtractor(8, 4, 10**i, 1, dir)
        s2 = HarrisExtractor(8, 4, 10**i, 3, dir)
        y = matcher(s1, s2)
        print "\n"
        print "*" * 60
        print "Using a threshold of 10^" + str(
            i) + " for filteirng Harris Points"
        print "Number of Harris Points in 1st ", len(s1.harris_Points)
        print "Number of Harris Points in 2nd ", len(s2.harris_Points)
        for j in reversed([
                1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.6,
                1.7
        ]):
            print "\n"
            print "Using a threshold of " + str(j) + " for ratio of 1st to 2nd"
            y.match_point_by_point(j)
            y.compare_Image(j)
            print "Number of matching points found", len(y.matching_pairs)
Esempio n. 48
0
def shortest_path(G, source=None, target=None, weight=None, method='dijkstra'):
    """Compute shortest paths in the graph.

    Parameters
    ----------
    G : NetworkX graph

    source : node, optional
        Starting node for path. If not specified, compute shortest
        paths for each possible starting node.

    target : node, optional
        Ending node for path. If not specified, compute shortest
        paths to all possible nodes.

    weight : None or string, optional (default = None)
        If None, every edge has weight/distance/cost 1.
        If a string, use this edge attribute as the edge weight.
        Any edge attribute not present defaults to 1.

    method : string, optional (default = 'dijkstra')
        The algorithm to use to compute the path.
        Supported options: 'dijkstra', 'bellman-ford'.
        Other inputs produce a ValueError.
        If `weight` is None, unweighted graph methods are used, and this
        suggestion is ignored.

    Returns
    -------
    path: list or dictionary
        All returned paths include both the source and target in the path.

        If the source and target are both specified, return a single list
        of nodes in a shortest path from the source to the target.

        If only the source is specified, return a dictionary keyed by
        targets with a list of nodes in a shortest path from the source
        to one of the targets.

        If only the target is specified, return a dictionary keyed by
        sources with a list of nodes in a shortest path from one of the
        sources to the target.

        If neither the source nor target are specified return a dictionary
        of dictionaries with path[source][target]=[list of nodes in path].

    Raises
    ------
    NodeNotFound
        If `source` is not in `G`.

    ValueError
        If `method` is not among the supported options.

    Examples
    --------
    >>> G = nx.path_graph(5)
    >>> print(nx.shortest_path(G, source=0, target=4))
    [0, 1, 2, 3, 4]
    >>> p = nx.shortest_path(G, source=0) # target not specified
    >>> p[4]
    [0, 1, 2, 3, 4]
    >>> p = nx.shortest_path(G, target=4) # source not specified
    >>> p[0]
    [0, 1, 2, 3, 4]
    >>> p = nx.shortest_path(G) # source, target not specified
    >>> p[0][4]
    [0, 1, 2, 3, 4]

    Notes
    -----
    There may be more than one shortest path between a source and target.
    This returns only one of them.

    See Also
    --------
    all_pairs_shortest_path()
    all_pairs_dijkstra_path()
    all_pairs_bellman_ford_path()
    single_source_shortest_path()
    single_source_dijkstra_path()
    single_source_bellman_ford_path()
    """
    if method not in ('dijkstra', 'bellman-ford'):
        # so we don't need to check in each branch later
        raise ValueError('method not supported: {}'.format(method))
    method = 'unweighted' if weight is None else method
    if source is None:
        if target is None:
            # Find paths between all pairs.
            if method == 'unweighted':
                paths = dict(nx.all_pairs_shortest_path(G))
            elif method == 'dijkstra':
                paths = dict(nx.all_pairs_dijkstra_path(G, weight=weight))
            else:  # method == 'bellman-ford':
                paths = dict(nx.all_pairs_bellman_ford_path(G, weight=weight))
        else:
            # Find paths from all nodes co-accessible to the target.
            with nx.utils.reversed(G):
                if method == 'unweighted':
                    paths = nx.single_source_shortest_path(G, target)
                elif method == 'dijkstra':
                    paths = nx.single_source_dijkstra_path(G,
                                                           target,
                                                           weight=weight)
                else:  # method == 'bellman-ford':
                    paths = nx.single_source_bellman_ford_path(G,
                                                               target,
                                                               weight=weight)
                # Now flip the paths so they go from a source to the target.
                for target in paths:
                    paths[target] = list(reversed(paths[target]))
    else:
        if target is None:
            # Find paths to all nodes accessible from the source.
            if method == 'unweighted':
                paths = nx.single_source_shortest_path(G, source)
            elif method == 'dijkstra':
                paths = nx.single_source_dijkstra_path(G,
                                                       source,
                                                       weight=weight)
            else:  # method == 'bellman-ford':
                paths = nx.single_source_bellman_ford_path(G,
                                                           source,
                                                           weight=weight)
        else:
            # Find shortest source-target path.
            if method == 'unweighted':
                paths = nx.bidirectional_shortest_path(G, source, target)
            elif method == 'dijkstra':
                paths = nx.dijkstra_path(G, source, target, weight)
            else:  # method == 'bellman-ford':
                paths = nx.bellman_ford_path(G, source, target, weight)
    return paths
Esempio n. 49
0
    def _predict(self, seq, runtime=True):
        softmax_list = []
        aux_softmax_list = []
        x_list = []
        for entry in seq:
            word = entry.word
            char_emb, _ = self.character_network.compute_embeddings(
                word, runtime=runtime)
            import sys
            if sys.version_info[0] == 2:
                word_emb, found = self.embeddings.get_word_embeddings(
                    word.decode('utf-8'))
            else:
                word_emb, found = self.embeddings.get_word_embeddings(word)
            if not found:
                word_emb = self.unknown_word_embedding[0]
            else:
                word_emb = dy.inputVector(word_emb)
            if sys.version_info[0] == 2:
                holistic_word = word.decode('utf-8').lower()
            else:
                holistic_word = word.lower()
            if holistic_word in self.encodings.word2int:
                hol_emb = self.holistic_word_embedding[
                    self.encodings.word2int[holistic_word]]
            else:
                hol_emb = self.holistic_word_embedding[
                    self.encodings.word2int['<UNK>']]
            proj_emb = self.emb_proj_w.expr(update=True) * word_emb
            proj_hol = self.hol_proj_w.expr(update=True) * hol_emb
            proj_char = self.char_proj_w.expr(update=True) * char_emb
            # x_list.append(dy.tanh(proj_char + proj_emb + proj_hol))

            if runtime:
                x_list.append(dy.tanh(proj_char + proj_emb + proj_hol))
            else:
                p1 = random.random()
                p2 = random.random()
                p3 = random.random()
                m1 = 1
                m2 = 1
                m3 = 1
                if p1 < self.config.input_dropout_prob:
                    m1 = 0
                if p2 < self.config.input_dropout_prob:
                    m2 = 0
                if p3 < self.config.input_dropout_prob:
                    m3 = 0

                scale = 1.0
                if m1 + m2 + m3 > 0:
                    scale = float(3) / (m1 + m2 + m3)
                m1 = dy.scalarInput(m1)
                m2 = dy.scalarInput(m2)
                m3 = dy.scalarInput(m3)
                scale = dy.scalarInput(scale)
                x_list.append(
                    dy.tanh((proj_char * m1 + proj_emb * m2 + proj_hol * m3) *
                            scale))

        # BDLSTM
        rnn_outputs = []
        for fw, bw, dropout in zip(self.bdrnn_fw, self.bdrnn_bw,
                                   self.config.layer_dropouts):
            if not runtime:
                fw.set_dropouts(0, dropout)
                bw.set_dropouts(0, dropout)
            else:
                fw.set_dropouts(0, 0)
                bw.set_dropouts(0, 0)
            fw_list = fw.initial_state().transduce(x_list)
            bw_list = list(
                reversed(bw.initial_state().transduce(reversed(x_list))))
            x_list = [
                dy.concatenate([x_fw, x_bw])
                for x_fw, x_bw in zip(fw_list, bw_list)
            ]
            # if runtime:
            #    x_out = x_list
            # else:
            #    x_out = [dy.dropout(x, dropout) for x in x_list]
            rnn_outputs.append(x_list)

        # SOFTMAX
        mlp_output = []
        for x in rnn_outputs[-1]:
            pre_softmax = []
            for iMLP in range(3):
                mlp_w = self.mlps[iMLP][0]
                mlp_b = self.mlps[iMLP][1]
                inp = x
                for w, b, drop, in zip(mlp_w, mlp_b,
                                       self.config.presoftmax_mlp_dropouts):
                    inp = dy.tanh(
                        w.expr(update=True) * inp + b.expr(update=True))
                    if not runtime:
                        inp = dy.dropout(inp, drop)
                pre_softmax.append(inp)
            mlp_output.append(pre_softmax)

        for softmax_inp, aux_softmax_inp in zip(
                mlp_output, rnn_outputs[self.config.aux_softmax_layer - 1]):
            softmax_list.append([
                dy.softmax(
                    self.softmax_upos_w.expr(update=True) * softmax_inp[0] +
                    self.softmax_upos_b.expr(update=True)),
                dy.softmax(
                    self.softmax_xpos_w.expr(update=True) * softmax_inp[1] +
                    self.softmax_xpos_b.expr(update=True)),
                dy.softmax(
                    self.softmax_attrs_w.expr(update=True) * softmax_inp[2] +
                    self.softmax_attrs_b.expr(update=True))
            ])
            aux_softmax_list.append([
                dy.softmax(
                    self.aux_softmax_upos_w.expr(update=True) *
                    aux_softmax_inp +
                    self.aux_softmax_upos_b.expr(update=True)),
                dy.softmax(
                    self.aux_softmax_xpos_w.expr(update=True) *
                    aux_softmax_inp +
                    self.aux_softmax_xpos_b.expr(update=True)),
                dy.softmax(
                    self.aux_softmax_attrs_w.expr(update=True) *
                    aux_softmax_inp +
                    self.aux_softmax_attrs_b.expr(update=True))
            ])

        return softmax_list, aux_softmax_list
Esempio n. 50
0
 def _recursive_remove_items_from_tree(item):
     for index in reversed(range(item.childCount())):
         _recursive_remove_items_from_tree(item.child(index))
     topic_name = item.data(0, Qt.UserRole)
     del self._tree_items[topic_name]
    def __init__(self, configuration, tc):
        """
        Initialization, creation of the "Tile Visualization" window and creation of the tile
        display in that window.

        :param configuration: object containing parameters set by the user
        :param tc: TileConstructor object with information on the tesselation
        """

        # Initialize instance variables
        self.m_diameter = tc.m_diameter
        self.phase_angle = tc.phase_angle
        self.configuration = configuration
        self.tc = tc
        # The "active tile" is colored blue. It is just being processed. Unprocessed tiles are
        # colored red, processed ones light-blue.
        self.active_tile = None

        # Get the size of the window from the configuration object and create the figure.
        figsize = ((self.configuration.conf.getfloat("Tile Visualization", "figsize horizontal")),
                   (self.configuration.conf.getfloat("Tile Visualization", "figsize vertical")))
        # self.fig = plt.figure(figsize=figsize, frameon=False)
        self.fig = plt.figure(figsize=figsize, facecolor=(0., 0., 0., 1))

        # Switch on interactive mode.
        plt.ion()
        # Replace the window location with coordinates stored from a previous run in configuration.
        self.mngr = plt.get_current_fig_manager()
        (x0, y0, width, height) = self.mngr.window.geometry().getRect()
        # look up stored position of upper left window corner:
        x0 = self.configuration.conf.getint('Hidden Parameters', 'tile window x0')
        y0 = self.configuration.conf.getint('Hidden Parameters', 'tile window y0')
        # move the tile visualization window to the stored position:
        self.mngr.window.setGeometry(x0, y0, width, height)

        self.ax = self.fig.add_subplot(111, facecolor='black')
        # Set the coordinate range in x and y. Coordinates are in radians from now on.
        fig_half_width = self.m_diameter / 2. + 0.0003 + self.tc.ol_outer
        plt.axis(([-fig_half_width, fig_half_width, -fig_half_width, fig_half_width]))
        plt.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False,
                        left=False, labelleft=False)

        # Compute and show the current outline of the moon phase.
        polygon_array = np.array(self.__MoonPhase__())
        moon_outline = (Polygon(polygon_array, closed=True, color='#FFD700', alpha=1.))

        self.ax.add_patch(moon_outline)

        # Draw all tiles. Their color is red (unprocessed).
        self.tiles = []
        label_fontsize = self.configuration.conf.getint("Tile Visualization", "label fontsize")
        label_shift = self.configuration.conf.getfloat("Tile Visualization", "label shift")
        for count, t in enumerate(self.tc.list_of_tiles_sorted):
            rectangle = Rectangle((t['x_left'], t['y_bottom']), self.tc.im_w, self.tc.im_h,
                                  color='red', alpha=0.5)
            self.tiles.append(rectangle)
            x_text_pos_col = (t['x_right'] - float(t['column_index'] + 1) / float(
                t['column_total'] + 1) * self.tc.im_w)
            x_text_pos = (label_shift * x_text_pos_col + (1. - label_shift) * t['x_center'])
            plt.text(x_text_pos, t['y_center'], str(count), horizontalalignment='center',
                     verticalalignment='center', fontsize=label_fontsize)

        # Add the tiles in reversed order.
        for t in reversed(self.tiles):
            self.ax.add_patch(t)

        # Initialize the RectangleSelector with which patches of tiles can be selected later.
        toggle_selector.RS = RectangleSelector(self.ax, self.line_select_callback, drawtype='box',
                                               useblit=True, button=[1, 3],
                                               # don't use middle button
                                               minspanx=0, minspany=0, spancoords='pixels')
        plt.connect('key_press_event', toggle_selector)

        # Initialize instance variables.
        self.select_rect_x_min = None
        self.select_rect_x_max = None
        self.select_rect_y_min = None
        self.select_rect_y_max = None

        # Initialize mouse coordinates for rectangle selector.
        self.x1 = -2.
        self.x2 = -2.
        self.y1 = -2.
        self.y2 = -2.
        self.selection_rectangle = None
        self.reset_selection_rectangle()

        self.fig.canvas.set_window_title("MoonPanoramaMaker: Tile Arrangement "
                                         "in normalized orientation (see user "
                                         "guide)")
        plt.tight_layout()
        self.fig.canvas.manager.show()
Esempio n. 52
0
    def sync(self, entrega_dir: pathlib.Path, rama: str, *, target_subdir: str = None):
        """Importa una entrega a los repositorios de alumnes.

        Args:
          entrega_dir: ruta en repo externo con los archivos actualizados.
          rama: rama en la que actualizar la entrega.
          target_subdir: directorio que se debe actuaizar dentro el repositorio.
              Si no se especifica, se usa el nombre de la rama (usar la cadena
              vacía para actualizar el toplevel).

        Raises:
          github.UnknownObjectException si el repositorio no existe.
          github.GithubException si se recibió algún otro error de la API.
        """
        if target_subdir is None:
            target_subdir = rama

        gh = github.Github(GITHUB_TOKEN)
        repo = self.gh_repo or gh.get_repo(self.repo_full)
        gitref = repo.get_git_ref(f"heads/{rama}")
        ghuser = random.choice(self.github_users)  # ¯\_(ツ)_/¯ Only entregas knows.
        prefix_re = re.compile(re.escape(target_subdir.rstrip("/") + "/"))

        # Estado actual del repo.
        cur_sha = gitref.object.sha
        # NOTE: como solo trabajamos en un subdirectorio, se podría limitar el uso
        # de recursive a ese directorio (si trabajáramos con repos muy grandes).
        cur_tree = repo.get_git_tree(cur_sha, recursive=True)
        cur_commit = repo.get_git_commit(cur_sha)

        # Tree de la entrega en master, para manejar borrados.
        baseref = repo.get_git_ref(f"heads/{repo.default_branch}")
        base_tree = repo.get_git_tree(baseref.object.sha, recursive=True)

        # Examinar el repo de entregas para obtener los commits a aplicar.
        entrega_repo = git.Repo(entrega_dir, search_parent_directories=True)
        entrega_relpath = entrega_dir.relative_to(entrega_repo.working_dir).as_posix()
        pending_commits = []
        cur_commit_date = cur_commit.author.date

        # La fecha de la API siempre viene en UTC, pero PyGithub no le asigna
        # timezone, y se interpretaría en zona horaria local por omisión. Ver
        # https://github.com/PyGithub/PyGithub/pull/704.
        cur_commit_date = cur_commit_date.replace(tzinfo=timezone.utc)

        for commit in entrega_repo.iter_commits(paths=[entrega_relpath]):
            if commit.authored_date > cur_commit_date.timestamp():
                pending_commits.append(commit)

        for commit in reversed(pending_commits):
            entrega_tree = commit.tree.join(entrega_relpath)
            tree_contents = tree_to_github(entrega_tree, target_subdir, repo)
            entrega_files = set(tree_contents.keys())
            tree_elements = list(tree_contents.values())
            tree_elements.extend(
                deleted_files(entrega_files, cur_tree, prefix_re, base_tree)
            )
            author_date = datetime.fromtimestamp(commit.authored_date).astimezone()
            author_info = github.InputGitAuthor(
                ghuser, f"{ghuser}@users.noreply.github.com", author_date.isoformat()
            )
            cur_tree = repo.create_git_tree(tree_elements, cur_tree)
            cur_commit = repo.create_git_commit(
                commit.message, cur_tree, [cur_commit], author_info
            )
            # Se necesita obtener el árbol de manera recursiva para tener
            # los contenidos del subdirectorio de la entrega.
            cur_tree = repo.get_git_tree(cur_tree.sha, recursive=True)

        gitref.edit(cur_commit.sha)
Esempio n. 53
0
def create_feature_extractor(
    model: nn.Module,
    return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
    train_return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
    eval_return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
    tracer_kwargs: Dict = {},
    suppress_diff_warning: bool = False,
) -> fx.GraphModule:
    """
    Creates a new graph module that returns intermediate nodes from a given
    model as dictionary with user specified keys as strings, and the requested
    outputs as values. This is achieved by re-writing the computation graph of
    the model via FX to return the desired nodes as outputs. All unused nodes
    are removed, together with their corresponding parameters.

    Desired output nodes must be specified as a ``.`` separated
    path walking the module hierarchy from top level module down to leaf
    operation or leaf module. For more details on the node naming conventions
    used here, please see the :ref:`relevant subheading <about-node-names>`
    in the `documentation <https://pytorch.org/vision/stable/feature_extraction.html>`_.

    Not all models will be FX traceable, although with some massaging they can
    be made to cooperate. Here's a (not exhaustive) list of tips:

        - If you don't need to trace through a particular, problematic
          sub-module, turn it into a "leaf module" by passing a list of
          ``leaf_modules`` as one of the ``tracer_kwargs`` (see example below).
          It will not be traced through, but rather, the resulting graph will
          hold a reference to that module's forward method.
        - Likewise, you may turn functions into leaf functions by passing a
          list of ``autowrap_functions`` as one of the ``tracer_kwargs`` (see
          example below).
        - Some inbuilt Python functions can be problematic. For instance,
          ``int`` will raise an error during tracing. You may wrap them in your
          own function and then pass that in ``autowrap_functions`` as one of
          the ``tracer_kwargs``.

    For further information on FX see the
    `torch.fx documentation <https://pytorch.org/docs/stable/fx.html>`_.

    Args:
        model (nn.Module): model on which we will extract the features
        return_nodes (list or dict, optional): either a ``List`` or a ``Dict``
            containing the names (or partial names - see note above)
            of the nodes for which the activations will be returned. If it is
            a ``Dict``, the keys are the node names, and the values
            are the user-specified keys for the graph module's returned
            dictionary. If it is a ``List``, it is treated as a ``Dict`` mapping
            node specification strings directly to output names. In the case
            that ``train_return_nodes`` and ``eval_return_nodes`` are specified,
            this should not be specified.
        train_return_nodes (list or dict, optional): similar to
            ``return_nodes``. This can be used if the return nodes
            for train mode are different than those from eval mode.
            If this is specified, ``eval_return_nodes`` must also be specified,
            and ``return_nodes`` should not be specified.
        eval_return_nodes (list or dict, optional): similar to
            ``return_nodes``. This can be used if the return nodes
            for train mode are different than those from eval mode.
            If this is specified, ``train_return_nodes`` must also be specified,
            and `return_nodes` should not be specified.
        tracer_kwargs (dict, optional): a dictionary of keywork arguments for
            ``NodePathTracer`` (which passes them onto it's parent class
            `torch.fx.Tracer <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer>`_).
        suppress_diff_warning (bool, optional): whether to suppress a warning
            when there are discrepancies between the train and eval version of
            the graph. Defaults to False.

    Examples::

        >>> # Feature extraction with resnet
        >>> model = torchvision.models.resnet18()
        >>> # extract layer1 and layer3, giving as names `feat1` and feat2`
        >>> model = create_feature_extractor(
        >>>     model, {'layer1': 'feat1', 'layer3': 'feat2'})
        >>> out = model(torch.rand(1, 3, 224, 224))
        >>> print([(k, v.shape) for k, v in out.items()])
        >>>     [('feat1', torch.Size([1, 64, 56, 56])),
        >>>      ('feat2', torch.Size([1, 256, 14, 14]))]

        >>> # Specifying leaf modules and leaf functions
        >>> def leaf_function(x):
        >>>     # This would raise a TypeError if traced through
        >>>     return int(x)
        >>>
        >>> class LeafModule(torch.nn.Module):
        >>>     def forward(self, x):
        >>>         # This would raise a TypeError if traced through
        >>>         int(x.shape[0])
        >>>         return torch.nn.functional.relu(x + 4)
        >>>
        >>> class MyModule(torch.nn.Module):
        >>>     def __init__(self):
        >>>         super().__init__()
        >>>         self.conv = torch.nn.Conv2d(3, 1, 3)
        >>>         self.leaf_module = LeafModule()
        >>>
        >>>     def forward(self, x):
        >>>         leaf_function(x.shape[0])
        >>>         x = self.conv(x)
        >>>         return self.leaf_module(x)
        >>>
        >>> model = create_feature_extractor(
        >>>     MyModule(), return_nodes=['leaf_module'],
        >>>     tracer_kwargs={'leaf_modules': [LeafModule],
        >>>                    'autowrap_functions': [leaf_function]})

    """
    is_training = model.training

    assert any(arg is not None for arg in [return_nodes, train_return_nodes, eval_return_nodes]), (
        "Either `return_nodes` or `train_return_nodes` and " "`eval_return_nodes` together, should be specified"
    )

    assert not ((train_return_nodes is None) ^ (eval_return_nodes is None)), (
        "If any of `train_return_nodes` and `eval_return_nodes` are " "specified, then both should be specified"
    )

    assert (return_nodes is None) ^ (train_return_nodes is None), (
        "If `train_return_nodes` and `eval_return_nodes` are specified, " "then both should be specified"
    )

    # Put *_return_nodes into Dict[str, str] format
    def to_strdict(n) -> Dict[str, str]:
        if isinstance(n, list):
            return {str(i): str(i) for i in n}
        return {str(k): str(v) for k, v in n.items()}

    if train_return_nodes is None:
        return_nodes = to_strdict(return_nodes)
        train_return_nodes = deepcopy(return_nodes)
        eval_return_nodes = deepcopy(return_nodes)
    else:
        train_return_nodes = to_strdict(train_return_nodes)
        eval_return_nodes = to_strdict(eval_return_nodes)

    # Repeat the tracing and graph rewriting for train and eval mode
    tracers = {}
    graphs = {}
    mode_return_nodes: Dict[str, Dict[str, str]] = {"train": train_return_nodes, "eval": eval_return_nodes}
    for mode in ["train", "eval"]:
        if mode == "train":
            model.train()
        elif mode == "eval":
            model.eval()

        # Instantiate our NodePathTracer and use that to trace the model
        tracer = NodePathTracer(**tracer_kwargs)
        graph = tracer.trace(model)

        name = model.__class__.__name__ if isinstance(model, nn.Module) else model.__name__
        graph_module = fx.GraphModule(tracer.root, graph, name)

        available_nodes = list(tracer.node_to_qualname.values())
        # FIXME We don't know if we should expect this to happen
        assert len(set(available_nodes)) == len(
            available_nodes
        ), "There are duplicate nodes! Please raise an issue https://github.com/pytorch/vision/issues"
        # Check that all outputs in return_nodes are present in the model
        for query in mode_return_nodes[mode].keys():
            # To check if a query is available we need to check that at least
            # one of the available names starts with it up to a .
            if not any([re.match(rf"^{query}(\.|$)", n) is not None for n in available_nodes]):
                raise ValueError(
                    f"node: '{query}' is not present in model. Hint: use "
                    "`get_graph_node_names` to make sure the "
                    "`return_nodes` you specified are present. It may even "
                    "be that you need to specify `train_return_nodes` and "
                    "`eval_return_nodes` separately."
                )

        # Remove existing output nodes (train mode)
        orig_output_nodes = []
        for n in reversed(graph_module.graph.nodes):
            if n.op == "output":
                orig_output_nodes.append(n)
        assert len(orig_output_nodes)
        for n in orig_output_nodes:
            graph_module.graph.erase_node(n)

        # Find nodes corresponding to return_nodes and make them into output_nodes
        nodes = [n for n in graph_module.graph.nodes]
        output_nodes = OrderedDict()
        for n in reversed(nodes):
            module_qualname = tracer.node_to_qualname.get(n)
            if module_qualname is None:
                # NOTE - Know cases where this happens:
                # - Node representing creation of a tensor constant - probably
                #   not interesting as a return node
                # - When packing outputs into a named tuple like in InceptionV3
                continue
            for query in mode_return_nodes[mode]:
                depth = query.count(".")
                if ".".join(module_qualname.split(".")[: depth + 1]) == query:
                    output_nodes[mode_return_nodes[mode][query]] = n
                    mode_return_nodes[mode].pop(query)
                    break
        output_nodes = OrderedDict(reversed(list(output_nodes.items())))

        # And add them in the end of the graph
        with graph_module.graph.inserting_after(nodes[-1]):
            graph_module.graph.output(output_nodes)

        # Remove unused modules / parameters
        graph_module.graph.eliminate_dead_code()
        graph_module.recompile()

        # Keep track of the tracer and graph so we can choose the main one
        tracers[mode] = tracer
        graphs[mode] = graph

    # Warn user if there are any discrepancies between the graphs of the
    # train and eval modes
    if not suppress_diff_warning:
        _warn_graph_differences(tracers["train"], tracers["eval"])

    # Build the final graph module
    graph_module = DualGraphModule(model, graphs["train"], graphs["eval"], class_name=name)

    # Restore original training mode
    model.train(is_training)
    graph_module.train(is_training)

    return graph_module
Esempio n. 54
0
def polynomial(z, coefficients): # Horner method
    t = complex(0, 0)
    for c in reversed(coefficients):
        t = t * z + c
    return t
def backward_phase(augmented):
    for i in reversed(range(1, augmented.shape[0])):
        sliced = augmented[:i + 1, :]
        rr_up(sliced[:, i:])
        augmented[:i + 1, :] = sliced
        print_mat(augmented)
Esempio n. 56
0
def loopingFunction(a=1 * 2):
    c = []
    f = [c, a]

    for a in range(6 or 8):
        for b in range(8):
            if a == b:
                c.append((a, b, True))
            elif a < b:
                c.append((b, a, False))
            else:
                c.append((a, b, False))

            if a != b:
                z = 1
            else:
                z = 0

            if z == 0:
                continue

            if z == 1 and b == 6:
                break

            if a == b:
                z = 0

    print c

    f = 1

    while f < (10 or 8):
        m = 1
        f += 1

    print "m=", m

    x = [u for u in range(8)]

    x = [(u, v) for (u, v) in zip(range(8), reversed(range(8)))]
    print x

    x = [(u if u % 2 == 0 else 0) for u in range(10)]
    print x

    x = [(u if u % 2 == 0 else 0) for u in (a if cond() else range(9))]
    print x

    y = [[3 + (l if l else -1) for l in [m, m + 1]]
         for m in [f for f in range(2)]]
    print "f=", f
    print "y=", y

    if x:
        l = "YES"
    else:
        l = "NO"

    if x:
        l = "yes"
    else:
        if True:
            l = "no"

    print "Triple and chain"

    if m and l and f:
        print "OK"

    print "Triple or chain"
    if m or l or f:
        print "Okey"

    print "Nested if not chain"
    if not m:
        if not l:
            print "ok"

    print "Braced if not chain with 'or'"
    if not (m or l):
        print "oki"

    print "Braced if not chain with 'and'"
    if not (m and l):
        print "oki"

    d = 1
    print "Nested if chain with outer else"
    if a:
        if b or c:
            if d:
                print "inside nest"

    else:
        print "outer else"

    print x

    while False:
        pass
    else:
        print "Executed else branch for False condition while loop"

    while True:
        break
    else:
        print "Executed else branch for True condition while loop"

    for x in range(7):
        pass
    else:
        print "Executed else branch for no break for loop"

    for x in range(7):
        break
    else:
        print "Executed else branch despite break in for loop"

    x = iter(range(5))

    while next(x):
        pass
    else:
        print "Executed else branch of while loop without break"
Esempio n. 57
0
def generate_sigstruct(attr, args, mrenclave):
    '''Generate Sigstruct.

    field format: (offset, type, value)
    ''' # pylint: disable=too-many-locals

    fields = {
        'header': (offs.SGX_ARCH_ENCLAVE_CSS_HEADER,
                   "<4L", 0x00000006, 0x000000e1, 0x00010000, 0x00000000),
        'module_vendor': (offs.SGX_ARCH_ENCLAVE_CSS_MODULE_VENDOR, "<L", 0x00000000),
        'date': (offs.SGX_ARCH_ENCLAVE_CSS_DATE, "<HBB", attr['year'], attr['month'], attr['day']),
        'header2': (offs.SGX_ARCH_ENCLAVE_CSS_HEADER2,
                    "<4L", 0x00000101, 0x00000060, 0x00000060, 0x00000001),
        'hw_version': (offs.SGX_ARCH_ENCLAVE_CSS_HW_VERSION, "<L", 0x00000000),
        'misc_select': (offs.SGX_ARCH_ENCLAVE_CSS_MISC_SELECT, "4s", attr['misc_select']),
        'misc_mask': (offs.SGX_ARCH_ENCLAVE_CSS_MISC_MASK, "4s", attr['misc_select']),
        'attributes': (offs.SGX_ARCH_ENCLAVE_CSS_ATTRIBUTES, "8s8s", attr['flags'], attr['xfrms']),
        'attribute_mask': (offs.SGX_ARCH_ENCLAVE_CSS_ATTRIBUTE_MASK,
                           "8s8s", attr['flags'], attr['xfrms']),
        'enclave_hash': (offs.SGX_ARCH_ENCLAVE_CSS_ENCLAVE_HASH, "32s", mrenclave),
        'isv_prod_id': (offs.SGX_ARCH_ENCLAVE_CSS_ISV_PROD_ID, "<H", attr['isv_prod_id']),
        'isv_svn': (offs.SGX_ARCH_ENCLAVE_CSS_ISV_SVN, "<H", attr['isv_svn']),
    }

    sign_buffer = bytearray(128 + 128)

    for field in fields.values():
        if field[0] >= offs.SGX_ARCH_ENCLAVE_CSS_MISC_SELECT:
            struct.pack_into(field[1], sign_buffer,
                             field[0] - offs.SGX_ARCH_ENCLAVE_CSS_MISC_SELECT + 128,
                             *field[2:])
        else:
            struct.pack_into(field[1], sign_buffer, field[0], *field[2:])

    proc = subprocess.Popen(
        ['openssl', 'rsa', '-modulus', '-in', args['key'], '-noout'],
        stdout=subprocess.PIPE)
    modulus_out, _ = proc.communicate()
    modulus = bytes.fromhex(modulus_out[8:8+offs.SE_KEY_SIZE*2].decode())
    modulus = bytes(reversed(modulus))

    proc = subprocess.Popen(
        ['openssl', 'sha256', '-binary', '-sign', args['key']],
        stdin=subprocess.PIPE, stdout=subprocess.PIPE)
    signature, _ = proc.communicate(sign_buffer)
    signature = signature[::-1]

    modulus_int = int.from_bytes(modulus, byteorder='little')
    signature_int = int.from_bytes(signature, byteorder='little')

    tmp1 = signature_int * signature_int
    q1_int = tmp1 // modulus_int
    tmp2 = tmp1 % modulus_int
    q2_int = tmp2 * signature_int // modulus_int

    q1 = q1_int.to_bytes(384, byteorder='little') # pylint: disable=invalid-name
    q2 = q2_int.to_bytes(384, byteorder='little') # pylint: disable=invalid-name

    fields.update({
        'modulus': (offs.SGX_ARCH_ENCLAVE_CSS_MODULUS, "384s", modulus),
        'exponent': (offs.SGX_ARCH_ENCLAVE_CSS_EXPONENT, "<L", 3),
        'signature': (offs.SGX_ARCH_ENCLAVE_CSS_SIGNATURE, "384s", signature),

        'q1': (offs.SGX_ARCH_ENCLAVE_CSS_Q1, "384s", q1),
        'q2': (offs.SGX_ARCH_ENCLAVE_CSS_Q2, "384s", q2),
    })

    buffer = bytearray(offs.SGX_ARCH_ENCLAVE_CSS_SIZE)

    for field in fields.values():
        struct.pack_into(field[1], buffer, field[0], *field[2:])

    return buffer
        print("Unable to find last downloaded tweet id or reply id for user",
              user_id)

    return tweet


nextID = sys.argv[1]
while nextID:
    tweet = find_tweet(nextID, tweetsDB)
    if tweet is None:
        # can also be triggered if database is incomplete - make sure you have downloaded all tweets needed
        tweets.append("---tweet has been deleted---")
        nextID = ''

    else:
        tweets.append(tweet)
        if "in_reply_to_status_id" in tweet.keys():
            try:
                nextID = tweet["in_reply_to_status_id"]["$numberLong"]

            except:
                nextID = tweet["in_reply_to_status_id"]

        else:
            # this is root
            nextID = ''

# we print ids from root to leaf
for i in reversed(tweets):
    print(i)
Esempio n. 59
0
    def _handle_reunion_packet(self, packet: Packet):
        # TODO: don't accept reunion from disconnected peers and its children
        parser = ReunionParser(packet)

        if not parser.is_valid():
            print("Ignoring invalid reunion packet")
            return

        neighbor = parser.entries[-1]

        if not self.is_neighbour(neighbor):
            print("Ignoring reunion packet received from non neighbor")
            return

        if parser.request_type == Packet.REQUEST:
            for address in parser.entries:
                node = self.graph.find_node(address)
                if node:
                    node.update_last_seen()

            resp_packet = PacketFactory.new_reunion_packet(Packet.RESPONSE, self.address, list(reversed(parser.entries)))
            self.send_packet(neighbor, resp_packet)

        else:
            print("Ignoring reunion response packet")
Esempio n. 60
0
    """
    EXPECTING ALTERNATING LIST OF operands AND operators
    """
    if isinstance(elements, basestring):
        try:
            return {"id": int(elements)}
        except Exception, e:
            Log.error("problem", e)
    if isinstance(elements, list) and len(elements) == 1:
        if isinstance(elements[0], basestring):
            return {"id": int(elements[0])}
        return elements[0]
    if isinstance(elements, Mapping):
        return elements

    for i in reversed(range(1, len(elements), 2)):
        if elements[i] == ":":
            return _parse_key(elements[:i - 1:] + [{"id": int(elements[i - 1]), "source": _parse_key(elements[i + 1]), "type": "aggregation"}] + elements[i + 2::])
    for i in range(1, len(elements), 2):
        if elements[i] == ".":
            return _parse_key(elements[:i - 1:] + [{"id": int(elements[i + 1]), "source": _parse_key(elements[i - 1]), "type": "join"}] + elements[i + 2::])
    Log.error("Do not know how to parse")


def etl2key(etl):
    if etl.source:
        if etl.source.type:
            if etl.type == etl.source.type:
                if etl.type == "join":
                    return etl2key(etl.source) + "." + unicode(etl.id)
                else: