Example #1
0
 def __init__(self, lhs, rhs):
     if not isinstance(lhs, LinSum):
         lhs = LinSum(lhs)
     if not isinstance(rhs, LinSum):
         rhs = LinSum(rhs)
     self.lhs = lhs
     self.rhs = rhs
 def coerce(cls, value):
     if isinstance(value, cls):
         return value
     elif isinstance(value, Number):
         return cls([value])
     else:
         raise TypeError(type(value))
Example #3
0
    def get_calling_nodes(self):
        from jedi.evaluate.dynamic import MergedExecutedParams
        old_arguments_list = []
        arguments = self

        while arguments not in old_arguments_list:
            if not isinstance(arguments, TreeArguments):
                break

            old_arguments_list.append(arguments)
            for name, default, star_count in reversed(list(arguments.as_tree_tuple_objects())):
                if not star_count or not isinstance(name, tree.Name):
                    continue

                names = self._evaluator.goto(arguments.context, name)
                if len(names) != 1:
                    break
                if not isinstance(names[0], ParamName):
                    break
                param = names[0].get_param()
                if isinstance(param, MergedExecutedParams):
                    # For dynamic searches we don't even want to see errors.
                    return []
                if not isinstance(param, ExecutedParam):
                    break
                if param.var_args is None:
                    break
                arguments = param.var_args
                break

        return [arguments.argument_node or arguments.trailer]
    def testBasic(self):
        """
        Test basic operations.

        Setup an XMPPClientFactory, which sets up an XMPPAuthenticator, and let
        it produce a protocol instance. Then inspect the instance variables of
        the authenticator and XML stream objects.
        """
        self.client_jid = jid.JID('[email protected]/resource')

        # Get an XmlStream instance. Note that it gets initialized with the
        # XMPPAuthenticator (that has its associateWithXmlStream called) that
        # is in turn initialized with the arguments to the factory.
        xs = client.XMPPClientFactory(self.client_jid,
                                      'secret').buildProtocol(None)

        # test authenticator's instance variables
        self.assertEqual('example.com', xs.authenticator.otherHost)
        self.assertEqual(self.client_jid, xs.authenticator.jid)
        self.assertEqual('secret', xs.authenticator.password)

        # test list of initializers
        version, tls, sasl, bind, session = xs.initializers

        self.assert_(isinstance(tls, xmlstream.TLSInitiatingInitializer))
        self.assert_(isinstance(sasl, SASLInitiatingInitializer))
        self.assert_(isinstance(bind, client.BindInitializer))
        self.assert_(isinstance(session, client.SessionInitializer))

        self.assertFalse(tls.required)
        self.assertTrue(sasl.required)
        self.assertFalse(bind.required)
        self.assertFalse(session.required)
Example #5
0
def plot_helper_figure_assert(args, ISPIN):
    if ISPIN == 2:
        assert args.figure is None or (isinstance(args.figure, list) and len(args.figure) == 2), \
            'The number of figures should be 2!'
    elif ISPIN == 1:
        assert args.figure is None or (isinstance(args.figure, list) and len(args.figure) == 1), \
            'The number of figures should be 1!'
Example #6
0
def warns(expected_warning, *args, **kwargs):
    """Assert that code raises a particular class of warning.

    Specifically, the input @expected_warning can be a warning class or
    tuple of warning classes, and the code must return that warning
    (if a single class) or one of those warnings (if a tuple).

    This helper produces a list of ``warnings.WarningMessage`` objects,
    one for each warning raised.

    This function can be used as a context manager, or any of the other ways
    ``pytest.raises`` can be used::

        >>> with warns(RuntimeWarning):
        ...    warnings.warn("my warning", RuntimeWarning)
    """
    wcheck = WarningsChecker(expected_warning)
    if not args:
        return wcheck
    elif isinstance(args[0], str):
        code, = args
        assert isinstance(code, str)
        frame = sys._getframe(1)
        loc = frame.f_locals.copy()
        loc.update(kwargs)

        with wcheck:
            code = py.code.Source(code).compile()
            py.builtin.exec_(code, frame.f_globals, loc)
    else:
        func = args[0]
        with wcheck:
            return func(*args[1:], **kwargs)
 def notify(self, sender, event=None):
     signal, data = event
     if signal == 'world_changed':
         world, old_object, new_object = data
         if isinstance(new_object.data,pd.DataFrame):
             self.refresh()
     elif signal == 'world_object_removed':
         world, old_object = data
         if isinstance(old_object.data,pd.DataFrame):
             figure = plt.figure(self._figures[old_object.name])
             figure.clf()
             del self._figures[old_object.name]
             self.refresh()
     elif signal == 'world_object_changed':
         world, old_object, world_object = data
         if isinstance(world_object.data,pd.DataFrame):
             self.refresh_world_object(world_object)
     elif signal == 'world_object_item_changed':
         world, world_object, item, old, new = data
         if isinstance(world_object.data,pd.DataFrame):
             # self.refresh_manager(world_object)
             if item == 'attribute':
                 self.update_dataframe_figure(world_object, new)
     elif signal == 'world_sync':
         self.refresh()
Example #8
0
 def __init__(self, expire=2, *args, **kwarg):
     dargs = [ arg for arg in args if isinstance(arg, collections.Iterable)]
     if isinstance(expire, collections.Iterable):
         dargs.insert(0, expire)
         expire = 2
     super(TokenCache, self).__init__(*dargs, **kwarg)
     self.expire = expire
    def to_dict(self):
        """
        Returns the model properties as a dict
        """
        result = {}

        for attr, _ in iteritems(self.swagger_types):
            value = getattr(self, attr)
            if isinstance(value, list):
                result[attr] = list(map(
                    lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
                    value
                ))
            elif hasattr(value, "to_dict"):
                result[attr] = value.to_dict()
            elif isinstance(value, dict):
                result[attr] = dict(map(
                    lambda item: (item[0], item[1].to_dict())
                    if hasattr(item[1], "to_dict") else item,
                    value.items()
                ))
            else:
                result[attr] = value

        return result
Example #10
0
def showCumulOverlap(mode, modes, *args, **kwargs):
    """Show cumulative overlap using :func:`~matplotlib.pyplot.plot`.

    :type mode: :class:`.Mode`, :class:`.Vector`
    :arg modes: multiple modes
    :type modes: :class:`.ModeSet`, :class:`.ANM`, :class:`.GNM`, :class:`.PCA`
    """

    import matplotlib.pyplot as plt
    if not isinstance(mode, (Mode, Vector)):
        raise TypeError('mode must be NMA, ModeSet, Mode or Vector, not {0}'
                        .format(type(mode)))
    if not isinstance(modes, (NMA, ModeSet)):
        raise TypeError('modes must be NMA, ModeSet, or Mode, not {0}'
                        .format(type(modes)))
    cumov = (calcOverlap(mode, modes) ** 2).cumsum() ** 0.5
    if isinstance(modes, NMA):
        arange = np.arange(0.5, len(modes)+0.5)
    else:
        arange = modes.getIndices() + 0.5
    show = plt.plot(arange, cumov, *args, **kwargs)
    plt.title('Cumulative overlap with {0}'.format(str(mode)))
    plt.xlabel('{0} mode index'.format(modes))
    plt.ylabel('Cumulative overlap')
    plt.axis((arange[0]-0.5, arange[-1]+0.5, 0, 1))
    if SETTINGS['auto_show']:
        showFigure()
    return show
  def testCreateRetrieveUpdateDelete(self):
    if not conf.options.get_value('runlive') == 'true':
      return

    # Either load the recording or prepare to make a live request.
    conf.configure_cache(self.client, 'testCreateRetrieveUpdateDelete')

    customer_id = self.client.RetrieveCustomerId().GetCustomerId()
    rnd_number = random.randrange(0, 100001)
    org_unit_name = 'test_org_unit_name%s' % (rnd_number)
    org_unit_description = 'test_org_unit_description%s' % (rnd_number)
    org_unit_path = org_unit_name

    new_entry = self.client.CreateOrgUnit(customer_id, org_unit_name,
                                          parent_org_unit_path='/',
                                          description=org_unit_description,
                                          block_inheritance=False)
    self.assert_(isinstance(new_entry,
                            gdata.apps.organization.data.OrgUnitEntry))
    self.assertEquals(new_entry.org_unit_path, org_unit_path)

    entry = self.client.RetrieveOrgUnit(customer_id, org_unit_path)
    self.assert_(isinstance(entry,
                            gdata.apps.organization.data.OrgUnitEntry))
    self.assertEquals(entry.org_unit_name, org_unit_name)
    self.assertEquals(entry.org_unit_description, org_unit_description)
    self.assertEquals(entry.parent_org_unit_path, '')
    self.assertEquals(entry.org_unit_path, org_unit_path)
    self.assertEquals(entry.org_unit_block_inheritance, 'false')

    self.client.DeleteOrgUnit(customer_id, org_unit_name)
Example #12
0
def showCumulFractVars(modes, *args, **kwargs):
    """Show fraction of variances of *modes* using :func:`~matplotlib.pyplot.
    plot`.  Note that mode indices are incremented by 1.  See also
    :func:`.showFractVars` function."""

    import matplotlib.pyplot as plt
    if not isinstance(modes, (Mode, NMA, ModeSet)):
        raise TypeError('modes must be a Mode, NMA, or ModeSet instance, '
                        'not {0}'.format(type(modes)))
    if isinstance(modes, Mode):
        indices = modes.getIndices() + 0.5
        modes = [modes]
    elif isinstance(modes, ModeSet):
        indices = modes.getIndices() + 0.5
    else:
        indices = np.arange(len(modes)) + 0.5

    fracts = calcFractVariance(modes).cumsum()
    show = plt.plot(indices, fracts, *args, **kwargs)
    axis = list(plt.axis())
    axis[0] = 0.5
    axis[2] = 0
    axis[3] = 1
    plt.axis(axis)
    plt.xlabel('Mode index')
    plt.ylabel('Fraction of variance')
    if SETTINGS['auto_show']:
        showFigure()
    return show
Example #13
0
def showOverlap(mode, modes, *args, **kwargs):
    """Show overlap :func:`~matplotlib.pyplot.bar`.

    :arg mode: a single mode/vector
    :type mode: :class:`.Mode`, :class:`.Vector`
    :arg modes: multiple modes
    :type modes: :class:`.ModeSet`, :class:`.ANM`, :class:`.GNM`, :class:`.PCA`
    """

    import matplotlib.pyplot as plt
    if not isinstance(mode, (Mode, Vector)):
        raise TypeError('mode must be Mode or Vector, not {0}'
                        .format(type(mode)))
    if not isinstance(modes, (NMA, ModeSet)):
        raise TypeError('modes must be NMA or ModeSet, not {0}'
                        .format(type(modes)))
    overlap = abs(calcOverlap(mode, modes))
    if isinstance(modes, NMA):
        arange = np.arange(0.5, len(modes)+0.5)
    else:
        arange = modes.getIndices() + 0.5
    show = plt.bar(arange, overlap, *args, **kwargs)
    plt.title('Overlap with {0}'.format(str(mode)))
    plt.xlabel('{0} mode index'.format(modes))
    plt.ylabel('Overlap')
    if SETTINGS['auto_show']:
        showFigure()
    return show
Example #14
0
 def _print_Assignment(self, expr):
     from sympy.functions.elementary.piecewise import Piecewise
     from sympy.matrices.expressions.matexpr import MatrixSymbol
     from sympy.tensor.indexed import IndexedBase
     lhs = expr.lhs
     rhs = expr.rhs
     # We special case assignments that take multiple lines
     if isinstance(expr.rhs, Piecewise):
         # Here we modify Piecewise so each expression is now
         # an Assignment, and then continue on the print.
         expressions = []
         conditions = []
         for (e, c) in rhs.args:
             expressions.append(Assignment(lhs, e))
             conditions.append(c)
         temp = Piecewise(*zip(expressions, conditions))
         return self._print(temp)
     elif isinstance(lhs, MatrixSymbol):
         # Here we form an Assignment for each element in the array,
         # printing each one.
         lines = []
         for (i, j) in self._traverse_matrix_indices(lhs):
             temp = Assignment(lhs[i, j], rhs[i, j])
             code0 = self._print(temp)
             lines.append(code0)
         return "\n".join(lines)
     elif self._settings.get("contract", False) and (lhs.has(IndexedBase) or
             rhs.has(IndexedBase)):
         # Here we check if there is looping to be done, and if so
         # print the required loops.
         return self._doprint_loops(rhs, lhs)
     else:
         lhs_code = self._print(lhs)
         rhs_code = self._print(rhs)
         return self._get_statement("%s = %s" % (lhs_code, rhs_code))
Example #15
0
 def GetRowLabelValue(self, row):
     label = self.df.index[row]
     if isinstance(label, datetime):
         return label.strftime("%Y-%m-%d %H:%M:%S")
     elif isinstance(label, date):
         return label.strftime("%Y-%m-%d")
     return str(label)
Example #16
0
def prim_ne(args):
    a0, a1 = args
    assert isinstance(a0, Integer)
    assert isinstance(a1, Integer)
    if a0.value != a1.value:
        return w_true
    return w_false
Example #17
0
    def __init__(self, data=None):
        """

        """
        if isinstance(data, (str, unicode)) and os.path.isfile(data):
            self.data = fabio.open(data).data.astype("float32")
        elif  isinstance(data, fabio.fabioimage.fabioimage):
            self.data = data.data.astype("float32")
        else:
            try:
                self.data = data.astype("float32")
            except Exception as error:
                logger.error("Unable to understand this type of data %s: %s", data, error)
        self._bilin = Bilinear(self.data)
        self._blured_data = None
        self._median_data = None
        self._labeled_massif = None
        self._number_massif = None
        self._valley_size = None
        self._binned_data = None
        self.binning = None  # Binning is 2-list usually
        self._sem = threading.Semaphore()
        self._sem_label = threading.Semaphore()
        self._sem_binning = threading.Semaphore()
        self._sem_median = threading.Semaphore()
	def _load(self, src, text_src):
		if isinstance(src, PredictionResult):
			pass
		elif isinstance(src, str):
			result = PredictionResult()
			result.load(src)
		else:
			raise Exception('"result" should be PredictionResult or string.')
	
		if not result.analyzable():
			raise ValueError('The given result is not analyzable.')
	
		# +++ Need to move to another place.			   
		#if self.model._hashcode != result.model_id:
		#	sys.stderr.write('Warning: model ID is different from that in the predicted result. Do you use a different model to analyze?\n')
	
		if text_src is None:
			self.filepath = result.text_src
		else:
			self.filepath = text_src
		self.extra_svm_files = result.extra_svm_files
		predicted_y = result.predicted_y
		self.acc = result.get_accuracy()
		decvals = result.decvals
		true_y = result.true_y
				   
		self.insts, self.true_labels, self.predict_labels = [], set(), set()
		for idx in range(len(true_y)):
			self.insts += [TextInstance(idx, true_y = true_y[idx], predicted_y = predicted_y[idx], decvals = list(decvals[idx]))]
			self.true_labels.add(true_y[idx])
			self.predict_labels.add(predicted_y[idx])
Example #19
0
def main_loop(expr):
    function = None
    todo = None
    i=0
    while True:
        #print "meh"
        jitdriver.jit_merge_point(function=function, todo=todo, expr=expr)
        if isinstance(expr, Substitution):
            expr = expr.apply()
        if isinstance(expr, Value) and todo is None:
            break
        #print expr, todo
        #import pdb; pdb.set_trace()
        print repr(expr)
        expr, todo = expr.step(todo)
        i=i+1
        print i
        function = None
        if isinstance(expr, Substitution):
            recursive = expr.recursive
            #print recursive
            function = expr.rhs
            #print repr(function)
            #print function.name
            if recursive:
                #print "can enter jit", function, expr
                jitdriver.can_enter_jit(function=function, todo=todo, expr=expr)
    return expr
Example #20
0
 def __init__(self, padding):
     self.padding = padding
     if isinstance(padding[0], int):
         self.padding = ((padding[0], padding[0]), padding[1])
     if isinstance(padding[1], int):
         self.padding = (self.padding[0], (padding[1], padding[1]))
     self.padding_value = 0
Example #21
0
def args_to_matching_eager(l, ctx, default_dtype=None):
  """Convert sequence `l` to eager same-type Tensors."""
  EagerTensor = ops.EagerTensor  # pylint: disable=invalid-name
  if all(isinstance(x, EagerTensor) for x in l):
    return l[0].dtype, l
  # TODO(josh11b): Could we do a better job if we also passed in the
  # allowed dtypes when that was known?

  # Is some input already a Tensor with a dtype?
  dtype = None
  for t in l:
    if isinstance(t, EagerTensor):
      dtype = t.dtype
      break

  internal_convert_to_tensor = ops.internal_convert_to_tensor
  if dtype is None:
    # Infer a dtype based on the first value, and use that dtype for the
    # remaining values.
    ret = []
    for t in l:
      ret.append(internal_convert_to_tensor(
          t, dtype, preferred_dtype=default_dtype, ctx=ctx))
      if dtype is None:
        dtype = ret[-1].dtype
  else:
    ret = [internal_convert_to_tensor(t, dtype, ctx=ctx) for t in l]

  return dtype, ret
Example #22
0
    def union(self, other):
        """
        Specialized union for TimedeltaIndex objects. If combine
        overlapping ranges with the same DateOffset, will be much
        faster than Index.union

        Parameters
        ----------
        other : TimedeltaIndex or array-like

        Returns
        -------
        y : Index or TimedeltaIndex
        """
        self._assert_can_do_setop(other)
        if not isinstance(other, TimedeltaIndex):
            try:
                other = TimedeltaIndex(other)
            except (TypeError, ValueError):
                pass
        this, other = self, other

        if this._can_fast_union(other):
            return this._fast_union(other)
        else:
            result = Index.union(this, other)
            if isinstance(result, TimedeltaIndex):
                if result.freq is None:
                    result.freq = to_offset(result.inferred_freq)
            return result
Example #23
0
    def _filter_constraints(self, constraints):
        """Separate the constraints by type.

        Parameters
        ----------
        constraints : list
            A list of constraints.

        Returns
        -------
        dict
            A map of type key to an ordered set of constraints.
        """
        constr_map = {s.EQ: [],
                      s.LEQ: [],
                      s.SOC: [],
                      s.SOC_EW: [],
                      s.SDP: [],
                      s.EXP: []}
        for c in constraints:
            if isinstance(c, lo.LinEqConstr):
                constr_map[s.EQ].append(c)
            elif isinstance(c, lo.LinLeqConstr):
                constr_map[s.LEQ].append(c)
            elif isinstance(c, SOC):
                constr_map[s.SOC].append(c)
            elif isinstance(c, SDP):
                constr_map[s.SDP].append(c)
            elif isinstance(c, ExpCone):
                constr_map[s.EXP].append(c)
        return constr_map
Example #24
0
def _find_built_slot_operator(dep_struct):
	for x in dep_struct:
		if isinstance(x, list):
			for atom in _find_built_slot_operator(x):
				yield atom
		elif isinstance(x, Atom) and x.slot_operator_built:
			yield x
Example #25
0
    def __init__(self, parent, name=None):
        QTreeWidgetItem.__init__(self)
        
        self.filterList = ('mmp', 'MMP')
        
        if isinstance(parent, QListView):
            self.p = None
            if name:
                self.f = name
            else:
                self.f = '/'
        else:
            self.p = parent
            self.f = name
            
        self.readable = QDir( self.fullName() ).isReadable()

        if  not self.readable :
            self.setIcon(0, folderLockedIcon)
        else:
            self.setIcon(0, folderClosedIcon)
        if name is not None:
            self.setText(1, name)
        if isinstance(parent, QTreeWidget):
            parent.addTopLevelItem(self)
        else:
            parent.addChild(self)
Example #26
0
def serialize(value, prefix=None, idx=None):

    serialized = {}

    if isinstance(value, dict):
        for k, v in value.items():
            if isinstance(v, (dict, list, tuple)):
                serialized.update(serialize(v, k))
            else:
                key = ''.join([
                    prefix or '',
                    '[%s]' % k if prefix is not None else k,
                    '[%s]' % idx if idx is not None else '',
                ])
                serialized.update({key: get_as_str(v)})

    elif isinstance(value, (list, tuple)):
        for i, v in enumerate(value):
            serialized.update(serialize(v, prefix, i))

    else:
        if prefix is not None and idx is not None:
            key = prefix + '[' + str(idx) +']'
            serialized.update({key: get_as_str(value)})
        else:
            raise TypeError("only hash or arrays are allowed as value")

    return serialized
Example #27
0
 def convert(ctx, x):
     if isinstance(x, (ctx.mpf, ctx.mpc)):
         return x
     if isinstance(x, ctx._constant):
         return +x
     if isinstance(x, complex) or hasattr(x, "_mpc_"):
         re = ctx.convert(x.real)
         im = ctx.convert(x.imag)
         return ctx.mpc(re,im)
     if isinstance(x, basestring):
         v = mpi_from_str(x, ctx.prec)
         return ctx.make_mpf(v)
     if hasattr(x, "_mpi_"):
         a, b = x._mpi_
     else:
         try:
             a, b = x
         except (TypeError, ValueError):
             a = b = x
         if hasattr(a, "_mpi_"):
             a = a._mpi_[0]
         else:
             a = convert_mpf_(a, ctx.prec, round_floor)
         if hasattr(b, "_mpi_"):
             b = b._mpi_[1]
         else:
             b = convert_mpf_(b, ctx.prec, round_ceiling)
     if a == fnan or b == fnan:
         a = fninf
         b = finf
     assert mpf_le(a, b), "endpoints must be properly ordered"
     return ctx.make_mpf((a, b))
Example #28
0
    def clean(self):
        data = super(WorkflowStateActionDynamicForm, self).clean()

        # Consolidate the dynamic fields into a single JSON field called
        # 'action_data'.
        action_data = {}

        for field_name, field_data in self.schema['fields'].items():
            action_data[field_name] = data.pop(
                field_name, field_data.get('default', None)
            )
            if isinstance(action_data[field_name], QuerySet):
                # Flatten the queryset to a list of ids
                action_data[field_name] = list(
                    action_data[field_name].values_list('id', flat=True)
                )
            elif isinstance(action_data[field_name], Model):
                # Store only the ID of a model instance
                action_data[field_name] = action_data[field_name].pk

        data['action_data'] = action_data
        data = import_string(self.action_path).clean(
            form_data=data, request=self.request
        )
        self.action_path
        data['action_data'] = json.dumps(action_data)

        return data
Example #29
0
 def test_connect_timeout(self):
     try:
         requests.get(TARPIT, timeout=(0.1, None))
         assert False, "The connect() request should time out."
     except ConnectTimeout as e:
         assert isinstance(e, ConnectionError)
         assert isinstance(e, Timeout)
Example #30
0
    def append(self, other):
        """
        Append a collection of Index options together

        Parameters
        ----------
        other : Index or list/tuple of indices

        Returns
        -------
        appended : Index
        """
        name = self.name
        to_concat = [self]

        if isinstance(other, (list, tuple)):
            to_concat = to_concat + list(other)
        else:
            to_concat.append(other)

        for obj in to_concat:
            if isinstance(obj, Index) and obj.name != name:
                name = None
                break

        to_concat = self._ensure_compat_concat(to_concat)
        return Index(_concat._concat_compat(to_concat), name=name)
Example #31
0
     if val is None:
         val = False
     elif val == 'xtal':
         val = True
     elif val == 'dock':
         val = os.path.abspath(os.path.join(args_in.dock6, \
                            'ancg-'+labels['complex']+'.mol2.gz'))
         if (not nonzero(val)):
             print 'No dock6 output in ' + val
             job_status['no_dock6'] += 1
             passError = True
             continue  # Dock6 files are missing
 elif key == 'frcmodList':
     val = [val]
 # Actual strings to pass
 if isinstance(val, str):
     interactive_to_pass.append("%s = '%s'" % (key, val))
     terminal_to_pass.append("--%s %s" % (key, val))
 elif isinstance(val, bool):
     if val:
         interactive_to_pass.append("%s = %s" % (key, val))
         terminal_to_pass.append("--%s" % (key))
 elif isinstance(val, int):
     interactive_to_pass.append("%s = %d" % (key, val))
     terminal_to_pass.append("--%s %d" % (key, val))
 elif isinstance(val, float):
     interactive_to_pass.append("%s = %.5f" % (key, val))
     terminal_to_pass.append("--%s %.5f" % (key, val))
 elif isinstance(val, list):
     if isinstance(val[0], str):
         interactive_to_pass.append(
Example #32
0
 def test_storage(self):
     a = np.rec.array(variant_table_data, dtype=variant_table_dtype)
     vt = self.setup_instance(a)
     assert isinstance(vt.values, h5py.Group)
Example #33
0
 def test_storage(self):
     g = self.setup_instance(np.array(diploid_genotype_ac_data))
     # default is bcolz mem
     assert isinstance(g.values, bcolz.carray)
     assert g.values.rootdir is None, g.values.rootdir
Example #34
0
 def test_storage(self):
     g = self.setup_instance(np.array(diploid_genotype_data))
     assert isinstance(g.values, h5py.Dataset)
     assert g.values.compression == 'lzf'
Example #35
0
 def test_storage(self):
     g = self.setup_instance(np.array(diploid_genotype_data))
     assert isinstance(g.values, zarr.Array)
     assert isinstance(g.values.store, zarr.DirectoryStore)
Example #36
0
 def test_storage(self):
     g = self.setup_instance(np.array(diploid_genotype_data))
     assert isinstance(g.values, bcolz.carray)
     eq('zlib', g.values.cparams.cname)
     eq(1, g.values.cparams.clevel)
Example #37
0
 def test_storage(self):
     g = self.setup_instance(np.array(diploid_genotype_data))
     assert isinstance(g.values, bcolz.carray)
     assert g.values.rootdir is not None
Example #38
0
pyarrow_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
                            "pyarrow_files")
sys.path.insert(0, pyarrow_path)

# See https://github.com/ray-project/ray/issues/131.
helpful_message = """

If you are using Anaconda, try fixing this problem by running:

    conda install libgcc
"""

try:
    import pyarrow  # noqa: F401
except ImportError as e:
    if ((hasattr(e, "msg") and isinstance(e.msg, str)
         and ("libstdc++" in e.msg or "CXX" in e.msg))):
        # This code path should be taken with Python 3.
        e.msg += helpful_message
    elif (hasattr(e, "message") and isinstance(e.message, str)
          and ("libstdc++" in e.message or "CXX" in e.message)):
        # This code path should be taken with Python 2.
        condition = (hasattr(e, "args") and isinstance(e.args, tuple)
                     and len(e.args) == 1 and isinstance(e.args[0], str))
        if condition:
            e.args = (e.args[0] + helpful_message, )
        else:
            if not hasattr(e, "args"):
                e.args = ()
            elif not isinstance(e.args, tuple):
                e.args = (e.args, )
    def _run_op(self, op):
        if isinstance(op, pipeline_ops_iothub_http.MethodInvokeOperation):
            logger.debug(
                "{}({}): Translating Method Invoke Operation for HTTP.".format(
                    self.name, op.name))
            query_params = "api-version={apiVersion}".format(
                apiVersion=pkg_constant.IOTHUB_API_VERSION)
            #  if the target is a module.

            body = json.dumps(op.method_params)
            path = http_path_iothub.get_method_invoke_path(
                op.target_device_id, op.target_module_id)
            # NOTE: we do not add the sas Authorization header here. Instead we add it later on in
            # the HTTPTransportStage
            x_ms_edge_string = "{deviceId}/{moduleId}".format(
                deviceId=self.pipeline_root.pipeline_configuration.device_id,
                moduleId=self.pipeline_root.pipeline_configuration.module_id,
            )  # these are the identifiers of the current module
            user_agent_string = urllib.parse.quote_plus(
                user_agent.get_iothub_user_agent() +
                str(self.pipeline_root.pipeline_configuration.product_info))
            # Method Invoke must be addressed to the gateway hostname because it is an Edge op
            headers = {
                "Host":
                self.pipeline_root.pipeline_configuration.gateway_hostname,
                "Content-Type": "application/json",
                "Content-Length": str(len(str(body))),
                "x-ms-edge-moduleId": x_ms_edge_string,
                "User-Agent": user_agent_string,
            }
            op_waiting_for_response = op

            def on_request_response(op, error):
                logger.debug(
                    "{}({}): Got response for MethodInvokeOperation".format(
                        self.name, op.name))
                error = map_http_error(error=error, http_op=op)
                if not error:
                    op_waiting_for_response.method_response = json.loads(
                        op.response_body)
                op_waiting_for_response.complete(error=error)

            self.send_op_down(
                pipeline_ops_http.HTTPRequestAndResponseOperation(
                    method="POST",
                    path=path,
                    headers=headers,
                    body=body,
                    query_params=query_params,
                    callback=on_request_response,
                ))

        elif isinstance(op, pipeline_ops_iothub_http.GetStorageInfoOperation):
            logger.debug(
                "{}({}): Translating Get Storage Info Operation to HTTP.".
                format(self.name, op.name))
            query_params = "api-version={apiVersion}".format(
                apiVersion=pkg_constant.IOTHUB_API_VERSION)
            path = http_path_iothub.get_storage_info_for_blob_path(
                self.pipeline_root.pipeline_configuration.device_id)
            body = json.dumps({"blobName": op.blob_name})
            user_agent_string = urllib.parse.quote_plus(
                user_agent.get_iothub_user_agent() +
                str(self.pipeline_root.pipeline_configuration.product_info))
            headers = {
                "Host": self.pipeline_root.pipeline_configuration.hostname,
                "Accept": "application/json",
                "Content-Type": "application/json",
                "Content-Length": str(len(str(body))),
                "User-Agent": user_agent_string,
            }

            op_waiting_for_response = op

            def on_request_response(op, error):
                logger.debug(
                    "{}({}): Got response for GetStorageInfoOperation".format(
                        self.name, op.name))
                error = map_http_error(error=error, http_op=op)
                if not error:
                    op_waiting_for_response.storage_info = json.loads(
                        op.response_body)
                op_waiting_for_response.complete(error=error)

            self.send_op_down(
                pipeline_ops_http.HTTPRequestAndResponseOperation(
                    method="POST",
                    path=path,
                    headers=headers,
                    body=body,
                    query_params=query_params,
                    callback=on_request_response,
                ))

        elif isinstance(
                op, pipeline_ops_iothub_http.NotifyBlobUploadStatusOperation):
            logger.debug(
                "{}({}): Translating Get Storage Info Operation to HTTP.".
                format(self.name, op.name))
            query_params = "api-version={apiVersion}".format(
                apiVersion=pkg_constant.IOTHUB_API_VERSION)
            path = http_path_iothub.get_notify_blob_upload_status_path(
                self.pipeline_root.pipeline_configuration.device_id)
            body = json.dumps({
                "correlationId": op.correlation_id,
                "isSuccess": op.is_success,
                "statusCode": op.request_status_code,
                "statusDescription": op.status_description,
            })
            user_agent_string = urllib.parse.quote_plus(
                user_agent.get_iothub_user_agent() +
                str(self.pipeline_root.pipeline_configuration.product_info))

            # NOTE we do not add the sas Authorization header here. Instead we add it later on in
            # the HTTPTransportStage
            headers = {
                "Host": self.pipeline_root.pipeline_configuration.hostname,
                "Content-Type": "application/json; charset=utf-8",
                "Content-Length": str(len(str(body))),
                "User-Agent": user_agent_string,
            }
            op_waiting_for_response = op

            def on_request_response(op, error):
                logger.debug(
                    "{}({}): Got response for GetStorageInfoOperation".format(
                        self.name, op.name))
                error = map_http_error(error=error, http_op=op)
                op_waiting_for_response.complete(error=error)

            self.send_op_down(
                pipeline_ops_http.HTTPRequestAndResponseOperation(
                    method="POST",
                    path=path,
                    headers=headers,
                    body=body,
                    query_params=query_params,
                    callback=on_request_response,
                ))

        else:
            # All other operations get passed down
            self.send_op_down(op)
    def __eq__(self, other):
        """Returns true if both objects are equal"""
        if not isinstance(other, GETARPaymentTypewithSuccess):
            return False

        return self.__dict__ == other.__dict__
Example #41
0
    def delete_all_contexts(
            self,
            request: context.DeleteAllContextsRequest = None,
            *,
            parent: str = None,
            retry: retries.Retry = gapic_v1.method.DEFAULT,
            timeout: float = None,
            metadata: Sequence[Tuple[str, str]] = (),
    ) -> None:
        r"""Deletes all active contexts in the specified session.

        Args:
            request (:class:`~.context.DeleteAllContextsRequest`):
                The request object. The request message for
                [Contexts.DeleteAllContexts][google.cloud.dialogflow.v2.Contexts.DeleteAllContexts].
            parent (:class:`str`):
                Required. The name of the session to delete all contexts
                from. Format:
                ``projects/<Project ID>/agent/sessions/<Session ID>`` or
                ``projects/<Project ID>/agent/environments/<Environment ID>/users/<User ID>/sessions/<Session ID>``.
                If ``Environment ID`` is not specified we assume default
                'draft' environment. If ``User ID`` is not specified, we
                assume default '-' user.
                This corresponds to the ``parent`` field
                on the ``request`` instance; if ``request`` is provided, this
                should not be set.

            retry (google.api_core.retry.Retry): Designation of what errors, if any,
                should be retried.
            timeout (float): The timeout for this request.
            metadata (Sequence[Tuple[str, str]]): Strings which should be
                sent along with the request as metadata.
        """
        # Create or coerce a protobuf request object.
        # Sanity check: If we got a request object, we should *not* have
        # gotten any keyword arguments that map to the request.
        has_flattened_params = any([parent])
        if request is not None and has_flattened_params:
            raise ValueError("If the `request` argument is set, then none of "
                             "the individual field arguments should be set.")

        # Minor optimization to avoid making a copy if the user passes
        # in a context.DeleteAllContextsRequest.
        # There's no risk of modifying the input as we've already verified
        # there are no flattened fields.
        if not isinstance(request, context.DeleteAllContextsRequest):
            request = context.DeleteAllContextsRequest(request)

            # If we have keyword arguments corresponding to fields on the
            # request, apply these.

            if parent is not None:
                request.parent = parent

        # Wrap the RPC method; this adds retry and timeout information,
        # and friendly error handling.
        rpc = self._transport._wrapped_methods[
            self._transport.delete_all_contexts]

        # Certain fields should be provided within the metadata header;
        # add these here.
        metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata(
            (("parent", request.parent), )), )

        # Send the request.
        rpc(
            request,
            retry=retry,
            timeout=timeout,
            metadata=metadata,
        )
Example #42
0
def settings():
    '''Read tests/config.yml and return AttrDict simulating Reactor.settings'''
    with open(os.path.join(PWD, 'tests', 'config.yml'), "r") as conf:
        y = yaml.safe_load(conf)
        assert isinstance(y, dict)
        return AttrDict(y)
Example #43
0
    def __init__(
        self,
        *,
        credentials: Optional[credentials.Credentials] = None,
        transport: Union[str, ContextsTransport, None] = None,
        client_options: Optional[client_options_lib.ClientOptions] = None,
        client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
    ) -> None:
        """Instantiate the contexts client.

        Args:
            credentials (Optional[google.auth.credentials.Credentials]): The
                authorization credentials to attach to requests. These
                credentials identify the application to the service; if none
                are specified, the client will attempt to ascertain the
                credentials from the environment.
            transport (Union[str, ~.ContextsTransport]): The
                transport to use. If set to None, a transport is chosen
                automatically.
            client_options (client_options_lib.ClientOptions): Custom options for the
                client. It won't take effect if a ``transport`` instance is provided.
                (1) The ``api_endpoint`` property can be used to override the
                default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
                environment variable can also be used to override the endpoint:
                "always" (always use the default mTLS endpoint), "never" (always
                use the default regular endpoint) and "auto" (auto switch to the
                default mTLS endpoint if client certificate is present, this is
                the default value). However, the ``api_endpoint`` property takes
                precedence if provided.
                (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
                is "true", then the ``client_cert_source`` property can be used
                to provide client certificate for mutual TLS transport. If
                not provided, the default SSL client certificate will be used if
                present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
                set, no client certificate will be used.
            client_info (google.api_core.gapic_v1.client_info.ClientInfo):
                The client info used to send a user-agent string along with
                API requests. If ``None``, then default info will be used.
                Generally, you only need to set this if you're developing
                your own client library.

        Raises:
            google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
                creation failed for any reason.
        """
        if isinstance(client_options, dict):
            client_options = client_options_lib.from_dict(client_options)
        if client_options is None:
            client_options = client_options_lib.ClientOptions()

        # Create SSL credentials for mutual TLS if needed.
        use_client_cert = bool(
            util.strtobool(
                os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))

        ssl_credentials = None
        is_mtls = False
        if use_client_cert:
            if client_options.client_cert_source:
                import grpc  # type: ignore

                cert, key = client_options.client_cert_source()
                ssl_credentials = grpc.ssl_channel_credentials(
                    certificate_chain=cert, private_key=key)
                is_mtls = True
            else:
                creds = SslCredentials()
                is_mtls = creds.is_mtls
                ssl_credentials = creds.ssl_credentials if is_mtls else None

        # Figure out which api endpoint to use.
        if client_options.api_endpoint is not None:
            api_endpoint = client_options.api_endpoint
        else:
            use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
            if use_mtls_env == "never":
                api_endpoint = self.DEFAULT_ENDPOINT
            elif use_mtls_env == "always":
                api_endpoint = self.DEFAULT_MTLS_ENDPOINT
            elif use_mtls_env == "auto":
                api_endpoint = (self.DEFAULT_MTLS_ENDPOINT
                                if is_mtls else self.DEFAULT_ENDPOINT)
            else:
                raise MutualTLSChannelError(
                    "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
                )

        # Save or instantiate the transport.
        # Ordinarily, we provide the transport, but allowing a custom transport
        # instance provides an extensibility point for unusual situations.
        if isinstance(transport, ContextsTransport):
            # transport is a ContextsTransport instance.
            if credentials or client_options.credentials_file:
                raise ValueError("When providing a transport instance, "
                                 "provide its credentials directly.")
            if client_options.scopes:
                raise ValueError("When providing a transport instance, "
                                 "provide its scopes directly.")
            self._transport = transport
        else:
            Transport = type(self).get_transport_class(transport)
            self._transport = Transport(
                credentials=credentials,
                credentials_file=client_options.credentials_file,
                host=api_endpoint,
                scopes=client_options.scopes,
                ssl_channel_credentials=ssl_credentials,
                quota_project_id=client_options.quota_project_id,
                client_info=client_info,
            )
Example #44
0
 def __new__(cls, dtype):
   if isinstance(dtype, str) and dtype == 'bfloat16' or dtype == _bfloat16_dtype:
     if _bfloat16_dtype not in cls._finfo_cache:
       cls._finfo_cache[_bfloat16_dtype] = cls._bfloat16_finfo()
     return cls._finfo_cache[_bfloat16_dtype]
   return super().__new__(cls, dtype)
Example #45
0
    def __eq__(self, other):
        """Returns true if both objects are equal"""
        if not isinstance(other, ListRulesForbiddenBody):
            return False

        return self.to_dict() == other.to_dict()
Example #46
0
    def update_context(
            self,
            request: gcd_context.UpdateContextRequest = None,
            *,
            context: gcd_context.Context = None,
            update_mask: field_mask.FieldMask = None,
            retry: retries.Retry = gapic_v1.method.DEFAULT,
            timeout: float = None,
            metadata: Sequence[Tuple[str, str]] = (),
    ) -> gcd_context.Context:
        r"""Updates the specified context.

        Args:
            request (:class:`~.gcd_context.UpdateContextRequest`):
                The request object. The request message for
                [Contexts.UpdateContext][google.cloud.dialogflow.v2.Contexts.UpdateContext].
            context (:class:`~.gcd_context.Context`):
                Required. The context to update.
                This corresponds to the ``context`` field
                on the ``request`` instance; if ``request`` is provided, this
                should not be set.
            update_mask (:class:`~.field_mask.FieldMask`):
                Optional. The mask to control which
                fields get updated.
                This corresponds to the ``update_mask`` field
                on the ``request`` instance; if ``request`` is provided, this
                should not be set.

            retry (google.api_core.retry.Retry): Designation of what errors, if any,
                should be retried.
            timeout (float): The timeout for this request.
            metadata (Sequence[Tuple[str, str]]): Strings which should be
                sent along with the request as metadata.

        Returns:
            ~.gcd_context.Context:
                Dialogflow contexts are similar to natural language
                context. If a person says to you "they are orange", you
                need context in order to understand what "they" is
                referring to. Similarly, for Dialogflow to handle an
                end-user expression like that, it needs to be provided
                with context in order to correctly match an intent.

                Using contexts, you can control the flow of a
                conversation. You can configure contexts for an intent
                by setting input and output contexts, which are
                identified by string names. When an intent is matched,
                any configured output contexts for that intent become
                active. While any contexts are active, Dialogflow is
                more likely to match intents that are configured with
                input contexts that correspond to the currently active
                contexts.

                For more information about context, see the `Contexts
                guide <https://cloud.google.com/dialogflow/docs/contexts-overview>`__.

        """
        # Create or coerce a protobuf request object.
        # Sanity check: If we got a request object, we should *not* have
        # gotten any keyword arguments that map to the request.
        has_flattened_params = any([context, update_mask])
        if request is not None and has_flattened_params:
            raise ValueError("If the `request` argument is set, then none of "
                             "the individual field arguments should be set.")

        # Minor optimization to avoid making a copy if the user passes
        # in a gcd_context.UpdateContextRequest.
        # There's no risk of modifying the input as we've already verified
        # there are no flattened fields.
        if not isinstance(request, gcd_context.UpdateContextRequest):
            request = gcd_context.UpdateContextRequest(request)

            # If we have keyword arguments corresponding to fields on the
            # request, apply these.

            if context is not None:
                request.context = context
            if update_mask is not None:
                request.update_mask = update_mask

        # Wrap the RPC method; this adds retry and timeout information,
        # and friendly error handling.
        rpc = self._transport._wrapped_methods[self._transport.update_context]

        # Certain fields should be provided within the metadata header;
        # add these here.
        metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata(
            (("context.name", request.context.name), )), )

        # Send the request.
        response = rpc(
            request,
            retry=retry,
            timeout=timeout,
            metadata=metadata,
        )

        # Done; return the response.
        return response
def main():
    # Output variables
    changed = False
    created_snapshots = []

    # Input
    argument_spec = ec2_argument_spec()
    argument_spec.update(dict(
        tag=dict(required=False, default=AUTOMATION_TAG),
        grace=dict(required=False, default=GRACE_MINUTES, )
    ))
    module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
    automation_tag = module.params.get('tag', AUTOMATION_TAG)
    grace_minutes = module.params.get('grace', GRACE_MINUTES)
    if grace_minutes.isdigit():
        grace_minutes = int(grace_minutes)
    else:
        module.fail_json(msg='"grace" should be an integer value')


    # Get all the times of the actions we should trigger
    now = datetime.utcnow()
    times = []
    for minute in range(0, grace_minutes):
        action_time = now - timedelta(minutes=minute)
        times.append(action_time)

    # Get all the snapshots and instances with an automation tag
    conn = ec2_connect(module)
    filters = {
        'tag-key': automation_tag
    }
    instances = conn.get_only_instances(filters=filters)
    snapshots = conn.get_all_snapshots(filters=filters)
    skipped_instances = []

    # We use the description to check if a snapshot exists. Make a list for easy access
    snapshot_descriptions = map(lambda x: x.description, snapshots)

    snapshot_configs = {}
    for instance in instances:
        # Get the automation tag (should exists, because we filtered)
        automation = json.loads(instance.tags[automation_tag])

        try:
            snapshot_times = automation['sn']
        except KeyError as e:
            skipped_instances.append({ 'instance_id': instance.id, 'reason': 'no sn key'})
            # Go to the next iteration
            continue

        if not isinstance(snapshot_times, list):
            snapshot_times = [snapshot_times]

        make_snapshot = False
        trigger_datetime = None
        for trigger_time in times:
            for snapshot_time in snapshot_times:
                if '%(h)02d%(m)02d' % {'h': trigger_time.hour, 'm': trigger_time.minute} == snapshot_time:
                    make_snapshot = True
                    trigger_datetime = trigger_time
                    break  # Exit snapshot_times loop
                elif '%(h)d%(m)02d' % {'h': trigger_time.hour, 'm': trigger_time.minute} == snapshot_time:
                    make_snapshot = True
                    trigger_datetime = trigger_time
                    break  # Exit snapshot_times loop
            if make_snapshot:
                break  # Exit times loop

        if not make_snapshot:
            skipped_instances.append({ 'instance_id': instance.id, 'reason': 'not the right time'})
            continue  # Try again with the next instance

        for dev, mapping_type in instance.block_device_mapping.items():
            snapshot_config = {
                'instance_id': instance.id,
                'volume_id': mapping_type.volume_id,
                'device': dev,
                'time': trigger_datetime,
            }

            snapshot_configs[mapping_type.volume_id] = snapshot_config

    for volume_id, config in snapshot_configs.items():
        trigger_date_string = config['time'].strftime('%Y-%m-%dT%H:%M')
        instance_id = config['instance_id']
        device = config['device']
        description = 'cat_sn_%(id)s_%(date)s' % {'id': volume_id, 'date': trigger_date_string}

        if description in snapshot_descriptions:
            continue

        snapshot_name = '%(inst)s-%(vol)s-%(date)s' % {
            'inst': instance_id, 'vol': volume_id, 'date': datetime.utcnow().isoformat()
        }

        generated_tag = {'prune': True, 'map': {'i': instance_id, 'd': device, 'v': volume_id}}
        if module.check_mode:
            snapshot_id = None
        else:
            snapshot = conn.create_snapshot(volume_id, description=description)
            conn.create_tags(snapshot.id, {
                'Name': snapshot_name,
                automation_tag: json.dumps(generated_tag)
            })
            snapshot_id = snapshot.id

        changed = True
        created_snapshots.append({'snapshot_id': snapshot_id, 'description': description, 'tag': generated_tag})

    module.exit_json(changed=changed, snapshots=created_snapshots, skipped_instances=skipped_instances)
Example #48
0
    def __ne__(self, other):
        """Returns true if both objects are not equal"""
        if not isinstance(other, ListRulesForbiddenBody):
            return True

        return self.to_dict() != other.to_dict()
Example #49
0
async def resolver(event: NewMessage.Event) -> None:
    """Resolve an invite link or a username."""
    link = event.matches[0].group(1)
    if not link:
        await event.answer("`Resolved the void.`")
        return
    text = f"`Couldn't resolve:` {link}"
    for link_type, pattern in invite_links.items():
        match = pattern.match(link)
        if match is not None:
            valid = match.group(1)
            if link_type == "private":
                creatorid, cid, _ = utils.resolve_invite_link(valid)
                if not cid:
                    await event.answer(text)
                    return
                try:
                    creator = await client.get_entity(creatorid)
                    creator = await get_chat_link(creator)
                except (TabError, ValueError):
                    creator = f"`{creatorid}`"
                text = f"**Link:** {link}"
                text += f"\n**Link creator:** {creator}\n**ID:** `{cid}`"
                try:
                    chat = await client.get_entity(cid)
                except (TypeError, ValueError):
                    break

                if isinstance(chat, types.Channel):
                    result = await client(
                        functions.channels.GetFullChannelRequest(channel=chat))
                    text += await misc.resolve_channel(event.client, result)
                elif isinstance(chat, types.Chat):
                    result = await client(
                        functions.messages.GetFullChatRequest(chat_id=chat))
                    text += await misc.resolve_chat(event.client, result)
                break
            else:
                try:
                    chat = await client.get_entity(valid)
                except (TypeError, ValueError):
                    continue

                if isinstance(chat, types.User):
                    text = f"**ID:** `{chat.id}`"
                    if chat.username:
                        text += f"\n**Username:** @{chat.username}"
                    text += f"\n{await get_chat_link(chat)}"

                if isinstance(chat, types.ChatForbidden):
                    text += f"\n`Not allowed to view {chat.title}.`"
                elif isinstance(chat, types.ChatEmpty):
                    text += "\n`The chat is empty.`"
                elif isinstance(chat, types.Chat):
                    text = f"**Chat:** @{valid}"
                    result = await client(
                        functions.messages.GetFullChatRequest(chat_id=chat))
                    text += await misc.resolve_chat(event.client, result)

                if isinstance(chat, types.ChannelForbidden):
                    text += f"\n`Not allowed to view {chat.title}.`"
                elif isinstance(chat, types.Channel):
                    text = f"**Channel:** @{valid}"
                    result = await client(
                        functions.channels.GetFullChannelRequest(channel=chat))
                    text += await misc.resolve_channel(event.client, result)
    await event.answer(text, link_preview=False)
Example #50
0
    def _real_extract(self, url):
        url, data = unsmuggle_url(url)
        headers = std_headers
        if data is not None:
            headers = headers.copy()
            headers.update(data)
        if 'Referer' not in headers:
            headers['Referer'] = url

        # Extract ID from URL
        mobj = re.match(self._VALID_URL, url)
        video_id = mobj.group('id')
        orig_url = url
        if mobj.group('pro') or mobj.group('player'):
            url = 'http://player.vimeo.com/video/' + video_id

        # Retrieve video webpage to extract further information
        request = compat_urllib_request.Request(url, None, headers)
        try:
            webpage = self._download_webpage(request, video_id)
        except ExtractorError as ee:
            if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
                errmsg = ee.cause.read()
                if b'Because of its privacy settings, this video cannot be played here' in errmsg:
                    raise ExtractorError(
                        'Cannot download embed-only video without embedding '
                        'URL. Please call ananse with the URL of the page '
                        'that embeds this video.',
                        expected=True)
            raise

        # Now we begin extracting as much information as we can from what we
        # retrieved. First we extract the information common to all extractors,
        # and latter we extract those that are Vimeo specific.
        self.report_extraction(video_id)

        # Extract the config JSON
        try:
            try:
                config_url = self._html_search_regex(
                    r' data-config-url="(.+?)"', webpage, 'config URL')
                config_json = self._download_webpage(config_url, video_id)
                config = json.loads(config_json)
            except RegexNotFoundError:
                # For pro videos or player.vimeo.com urls
                # We try to find out to which variable is assigned the config dic
                m_variable_name = re.search('(\w)\.video\.id', webpage)
                if m_variable_name is not None:
                    config_re = r'%s=({[^}].+?});' % re.escape(
                        m_variable_name.group(1))
                else:
                    config_re = [
                        r' = {config:({.+?}),assets:', r'(?:[abc])=({.+?});'
                    ]
                config = self._search_regex(config_re,
                                            webpage,
                                            'info section',
                                            flags=re.DOTALL)
                config = json.loads(config)
        except Exception as e:
            if re.search(
                    'The creator of this video has not given you permission to embed it on this domain.',
                    webpage):
                raise ExtractorError(
                    'The author has restricted the access to this video, try with the "--referer" option'
                )

            if re.search('<form[^>]+?id="pw_form"', webpage) is not None:
                self._verify_video_password(url, video_id, webpage)
                return self._real_extract(url)
            else:
                raise ExtractorError('Unable to extract info section', cause=e)
        else:
            if config.get('view') == 4:
                config = self._verify_player_video_password(url, video_id)

        # Extract title
        video_title = config["video"]["title"]

        # Extract uploader and uploader_id
        video_uploader = config["video"]["owner"]["name"]
        video_uploader_id = config["video"]["owner"]["url"].split(
            '/')[-1] if config["video"]["owner"]["url"] else None

        # Extract video thumbnail
        video_thumbnail = config["video"].get("thumbnail")
        if video_thumbnail is None:
            video_thumbs = config["video"].get("thumbs")
            if video_thumbs and isinstance(video_thumbs, dict):
                _, video_thumbnail = sorted(
                    (int(width if width.isdigit() else 0), t_url)
                    for (width, t_url) in video_thumbs.items())[-1]

        # Extract video description

        video_description = self._html_search_regex(
            r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
            webpage,
            'description',
            default=None)
        if not video_description:
            video_description = self._html_search_meta('description',
                                                       webpage,
                                                       default=None)
        if not video_description and mobj.group('pro'):
            orig_webpage = self._download_webpage(
                orig_url,
                video_id,
                note='Downloading webpage for description',
                fatal=False)
            if orig_webpage:
                video_description = self._html_search_meta('description',
                                                           orig_webpage,
                                                           default=None)
        if not video_description and not mobj.group('player'):
            self._downloader.report_warning('Cannot find video description')

        # Extract video duration
        video_duration = int_or_none(config["video"].get("duration"))

        # Extract upload date
        video_upload_date = None
        mobj = re.search(
            r'<meta itemprop="dateCreated" content="(\d{4})-(\d{2})-(\d{2})T',
            webpage)
        if mobj is not None:
            video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)

        try:
            view_count = int(
                self._search_regex(r'UserPlays:(\d+)', webpage, 'view count'))
            like_count = int(
                self._search_regex(r'UserLikes:(\d+)', webpage, 'like count'))
            comment_count = int(
                self._search_regex(r'UserComments:(\d+)', webpage,
                                   'comment count'))
        except RegexNotFoundError:
            # This info is only available in vimeo.com/{id} urls
            view_count = None
            like_count = None
            comment_count = None

        # Vimeo specific: extract request signature and timestamp
        sig = config['request']['signature']
        timestamp = config['request']['timestamp']

        # Vimeo specific: extract video codec and quality information
        # First consider quality, then codecs, then take everything
        codecs = [('vp6', 'flv'), ('vp8', 'flv'), ('h264', 'mp4')]
        files = {'hd': [], 'sd': [], 'other': []}
        config_files = config["video"].get("files") or config["request"].get(
            "files")
        for codec_name, codec_extension in codecs:
            for quality in config_files.get(codec_name, []):
                format_id = '-'.join((codec_name, quality)).lower()
                key = quality if quality in files else 'other'
                video_url = None
                if isinstance(config_files[codec_name], dict):
                    file_info = config_files[codec_name][quality]
                    video_url = file_info.get('url')
                else:
                    file_info = {}
                if video_url is None:
                    video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
                        % (video_id, sig, timestamp, quality, codec_name.upper())

                files[key].append({
                    'ext': codec_extension,
                    'url': video_url,
                    'format_id': format_id,
                    'width': file_info.get('width'),
                    'height': file_info.get('height'),
                })
        formats = []
        for key in ('other', 'sd', 'hd'):
            formats += files[key]
        if len(formats) == 0:
            raise ExtractorError('No known codec found')

        subtitles = {}
        text_tracks = config['request'].get('text_tracks')
        if text_tracks:
            for tt in text_tracks:
                subtitles[tt['lang']] = 'http://vimeo.com' + tt['url']

        video_subtitles = self.extract_subtitles(video_id, subtitles)
        if self._downloader.params.get('listsubtitles', False):
            self._list_available_subtitles(video_id, subtitles)
            return

        return {
            'id': video_id,
            'uploader': video_uploader,
            'uploader_id': video_uploader_id,
            'upload_date': video_upload_date,
            'title': video_title,
            'thumbnail': video_thumbnail,
            'description': video_description,
            'duration': video_duration,
            'formats': formats,
            'webpage_url': url,
            'view_count': view_count,
            'like_count': like_count,
            'comment_count': comment_count,
            'subtitles': video_subtitles,
        }
Example #51
0
def parse_show_install(data):
    """Helper method to parse the output of the 'show install all impact' or
        'install all' commands.

    Sample Output:

    Installer will perform impact only check. Please wait.

    Verifying image bootflash:/nxos.7.0.3.F2.2.bin for boot variable "nxos".
    [####################] 100% -- SUCCESS

    Verifying image type.
    [####################] 100% -- SUCCESS

    Preparing "bios" version info using image bootflash:/nxos.7.0.3.F2.2.bin.
    [####################] 100% -- SUCCESS

    Preparing "nxos" version info using image bootflash:/nxos.7.0.3.F2.2.bin.
    [####################] 100% -- SUCCESS

    Performing module support checks.
    [####################] 100% -- SUCCESS

    Notifying services about system upgrade.
    [####################] 100% -- SUCCESS



    Compatibility check is done:
    Module  bootable          Impact  Install-type  Reason
    ------  --------  --------------  ------------  ------
         8       yes      disruptive         reset  Incompatible image for ISSU
        21       yes      disruptive         reset  Incompatible image for ISSU


    Images will be upgraded according to following table:
    Module       Image  Running-Version(pri:alt)    New-Version   Upg-Required
    ------  ----------  ----------------------------------------  ------------
         8       lcn9k                7.0(3)F3(2)    7.0(3)F2(2)           yes
         8        bios                     v01.17         v01.17            no
        21       lcn9k                7.0(3)F3(2)    7.0(3)F2(2)           yes
        21        bios                     v01.70         v01.70            no
    """
    if len(data) > 0:
        data = massage_install_data(data)
    ud = {'raw': data}
    ud['processed'] = []
    ud['disruptive'] = False
    ud['upgrade_needed'] = False
    ud['error'] = False
    ud['install_in_progress'] = False
    ud['server_error'] = False
    ud['upgrade_succeeded'] = False
    ud['use_impact_data'] = False

    # Check for server errors
    if isinstance(data, int):
        if data == -1:
            ud['server_error'] = True
        elif data >= 500:
            ud['server_error'] = True
        elif data == -32603:
            ud['server_error'] = True
        return ud
    else:
        ud['list_data'] = data.split('\n')

    for x in ud['list_data']:
        # Check for errors and exit if found.
        if re.search(r'Pre-upgrade check failed', x):
            ud['error'] = True
            break
        if re.search(r'[I|i]nvalid command', x):
            ud['error'] = True
            break
        if re.search(r'No install all data found', x):
            ud['error'] = True
            break

        # Check for potentially transient conditions
        if re.search(r'Another install procedure may be in progress', x):
            ud['install_in_progress'] = True
            break
        if re.search(r'Backend processing error', x):
            ud['server_error'] = True
            break
        if re.search(r'^(-1|5\d\d)$', x):
            ud['server_error'] = True
            break

        # Check for messages indicating a successful upgrade.
        if re.search(r'Finishing the upgrade', x):
            ud['upgrade_succeeded'] = True
            break
        if re.search(r'Install has been successful', x):
            ud['upgrade_succeeded'] = True
            break

        # We get these messages when the upgrade is non-disruptive and
        # we loose connection with the switchover but far enough along that
        # we can be confident the upgrade succeeded.
        if re.search(r'timeout trying to send command: install', x):
            ud['upgrade_succeeded'] = True
            ud['use_impact_data'] = True
            break
        if re.search(r'[C|c]onnection failure: timed out', x):
            ud['upgrade_succeeded'] = True
            ud['use_impact_data'] = True
            break

        # Begin normal parsing.
        if re.search(r'----|Module|Images will|Compatibility', x):
            ud['processed'].append(x)
            continue
        # Check to see if upgrade will be disruptive or non-disruptive and
        # build dictionary of individual modules and their status.
        # Sample Line:
        #
        # Module  bootable      Impact  Install-type  Reason
        # ------  --------  ----------  ------------  ------
        #     8        yes  disruptive         reset  Incompatible image
        rd = r'(\d+)\s+(\S+)\s+(disruptive|non-disruptive)\s+(\S+)'
        mo = re.search(rd, x)
        if mo:
            ud['processed'].append(x)
            key = 'm%s' % mo.group(1)
            field = 'disruptive'
            if mo.group(3) == 'non-disruptive':
                ud[key] = {field: False}
            else:
                ud[field] = True
                ud[key] = {field: True}
            field = 'bootable'
            if mo.group(2) == 'yes':
                ud[key].update({field: True})
            else:
                ud[key].update({field: False})
            continue

        # Check to see if switch needs an upgrade and build a dictionary
        # of individual modules and their individual upgrade status.
        # Sample Line:
        #
        # Module  Image  Running-Version(pri:alt)    New-Version  Upg-Required
        # ------  -----  ----------------------------------------  ------------
        # 8       lcn9k                7.0(3)F3(2)    7.0(3)F2(2)           yes
        mo = re.search(r'(\d+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(yes|no)', x)
        if mo:
            ud['processed'].append(x)
            key = 'm%s_%s' % (mo.group(1), mo.group(2))
            field = 'upgrade_needed'
            if mo.group(5) == 'yes':
                ud[field] = True
                ud[key] = {field: True}
            else:
                ud[key] = {field: False}
            continue

    return ud
Example #52
0
async def rmbg(event: NewMessage.Event) -> None:
    """Remove the background from an image or sticker."""
    API_KEY = client.config['api_keys'].get('api_key_removebg', False)
    if not API_KEY:
        await event.answer("`You don't have an API key set for remove.bg!`")
        return

    match = event.matches[0].group(1)
    reply = await event.get_reply_message()

    if match and match != '':
        async with aiohttp.ClientSession() as session:
            try:
                async with session.get(match) as response:
                    if not (response.status == 200
                            and response.content_type.startswith('image/')):
                        await event.answer(
                            "`The provided link seems to be invalid.`")
                        return
            except aiohttp.client_exceptions.InvalidURL:
                await event.answer("`Invalid URL provided!`")
                return
            except Exception as e:
                await event.answer(f"`Unknown exception: {e}`")
                return
        media = match
    elif reply and reply.media:
        ext = utils.get_extension(reply.media)
        acceptable = [".jpg", ".png", ".bmp", ".tif", ".webp"]
        if ext not in acceptable:
            await event.answer("`Nice try, fool!`")
            return

        await event.answer("`Downloading media...`")
        media = io.BytesIO()
        await client.download_media(reply, media)
        if ext in [".bmp", ".tif", ".webp"]:
            new_media = io.BytesIO()
            try:
                pilImg = PIL.Image.open(media)
            except OSError as e:
                await event.answer(f'`OSError: {e}`')
                return
            pilImg.save(new_media, format="PNG")
            pilImg.close()
            media.close()
            media = new_media
    else:
        await event.answer("`Reply to a photo or provide a valid link.`")
        return

    response = await client.loop.run_in_executor(
        None, functools.partial(removebg_post, API_KEY, media.getvalue()))
    if not isinstance(media, str):
        media.close()
    if response.status_code == requests.codes.ok:
        await event.delete()
        image = io.BytesIO(response.content)
        image.name = "image.png"
        await event.answer(file=image, force_document=True, reply=True)
        image.close()
    else:
        error = response.json()['errors'][0]
        code = error.get('code', False)
        title = error.get('title', 'No title?')
        body = code + ': ' + title if code else title
        text = f"`[{response.status_code}] {body}`"
        await event.answer(text)
Example #53
0
 def __eq__(self, other):
     if isinstance(other, PymenuAction):
         return self._action == other._action
     return False
    def upsert(self, token, indicators, flush=False):
        if not UPSERT_MODE:
            return self.create_bulk(token, indicators, flush=flush)

        # Create current index if needed
        index = self._create_index()

        count = 0

        # http://stackoverflow.com/questions/30111258/elasticsearch-in-equivalent-operator-in-elasticsearch

        # aggregate indicators based on dedup criteria
        agg = {}
        for d in sorted(indicators, key=lambda k: k['lasttime'], reverse=True):
            key = []

            for v in UPSERT_MATCH:

                if d.get(v):
                    if isinstance(d[v], basestring):
                        key.append(d[v])
                    elif isinstance(d[v], float) or isinstance(d[v], int):
                        key.append(str(d[v]))
                    elif isinstance(d[v], list):
                        for k in d[v]:
                            key.append(k)

            key = "_".join(key)

            # already seen in batch
            if key in agg:
                # look for older first times
                if d.get('firsttime') < agg[key].get('firsttime'):
                    agg[key]['firsttime'] = d['firsttime']
                    if d.get('count'):
                        agg[key]['count'] = agg[key].get('count') + d.get(
                            'count')

            # haven't yet seen in batch
            else:
                agg[key] = d

        actions = []

        #self.lockm.lock_aquire()
        for d in agg:
            d = agg[d]

            filters = {'limit': 1}
            for x in UPSERT_MATCH:
                if d.get(x):
                    filters[x] = d[x]

            if d.get('tags'):
                filters['tags'] = d['tags']

            if d.get('rdata'):
                filters['rdata'] = d['rdata']

            # search for existing, return latest record
            try:
                # search the current index only
                rv = self.search(token,
                                 filters,
                                 sort='reporttime',
                                 raw=True,
                                 sindex=index)
            except Exception as e:
                logger.error(e)
                raise e

            rv = rv['hits']['hits']

            # Indicator does not exist in results
            if len(rv) == 0:
                if not d.get('count'):
                    d['count'] = 1

                if d.get('group') and type(d['group']) != list:
                    d['group'] = [d['group']]

                expand_ip_idx(d)

                # append create to create set
                if UPSERT_TRACE:
                    logger.debug('upsert: creating new {}'.format(
                        d['indicator']))
                actions.append({
                    '_index': index,
                    '_type': 'indicator',
                    '_source': d,
                })

                count += 1
                continue

            # Indicator exists in results
            else:
                if UPSERT_TRACE:
                    logger.debug('upsert: match indicator {}'.format(
                        rv[0]['_id']))

                # map result
                i = rv[0]

                # skip new indicators that don't have a more recent lasttime
                if not self._is_newer(d, i['_source']):
                    logger.debug('skipping...')
                    continue

                # map existing indicator
                i = i['_source']

                # we're working within the same index
                if rv[0]['_index'] == self._current_index():

                    # update fields
                    i['count'] += 1
                    i['lasttime'] = d['lasttime']
                    i['reporttime'] = d['lasttime']

                    if d.get('message'):
                        if not i.get('message'):
                            i['message'] = []

                        i['message'].append(d['message'])

                    # append update to create set
                    if UPSERT_TRACE:
                        logger.debug(
                            'upsert: updating same index {}, {}'.format(
                                d['indicator'], rv[0]['_id']))
                    actions.append({
                        '_op_type': 'update',
                        '_index': rv[0]['_index'],
                        '_type': 'indicator',
                        '_id': rv[0]['_id'],
                        '_body': {
                            'doc': i
                        }
                    })

                    count += 1
                    continue

                # if we aren't in the same index
                else:

                    # update fields
                    i['count'] = i['count'] + 1
                    i['lasttime'] = d['lasttime']
                    i['reporttime'] = d['lasttime']

                    if d.get('message'):
                        if not i.get('message'):
                            i['message'] = []

                        i['message'].append(d['message'])

                    # append create to create set
                    if UPSERT_TRACE:
                        logger.debug('upsert: updating across index {}'.format(
                            d['indicator']))
                    actions.append({
                        '_index': index,
                        '_type': 'indicator',
                        '_source': i,
                    })

                    # delete the old document
                    if UPSERT_TRACE:
                        logger.debug(
                            'upsert: deleting old index {}, {}'.format(
                                d['indicator'], rv[0]['_id']))

                    actions.append({
                        '_op_type': 'delete',
                        '_index': rv[0]['_index'],
                        '_type': 'indicator',
                        '_id': rv[0]['_id']
                    })

                    count += 1
                    continue

        if len(actions) > 0:
            try:
                helpers.bulk(self.handle, actions)
            except Exception as e:
                #self.lockm.lock_release()
                raise e

        if flush:
            self.flush()

        #self.lockm.lock_release()
        return count
Example #55
0
	def deepresolve (obj):
		obj = Lazy.resolve (obj)
		if isinstance(obj, list):
			for i in range(len(obj)):
				obj[i] = Lazy.deepresolve(obj[i])
		return obj
Example #56
0
    def runAlgorithm(algOrName, parameters, onFinish=None, feedback=None, context=None):
        if isinstance(algOrName, QgsProcessingAlgorithm):
            alg = algOrName
        else:
            alg = QgsApplication.processingRegistry().createAlgorithmById(algOrName)

        if feedback is None:
            feedback = QgsProcessingFeedback()

        if alg is None:
            msg = Processing.tr('Error: Algorithm {0} not found\n').format(algOrName)
            feedback.reportError(msg)
            raise QgsProcessingException(msg)

        if context is None:
            context = dataobjects.createContext(feedback)

        if context.feedback() is None:
            context.setFeedback(feedback)

        ok, msg = alg.checkParameterValues(parameters, context)
        if not ok:
            msg = Processing.tr('Unable to execute algorithm\n{0}').format(msg)
            feedback.reportError(msg)
            raise QgsProcessingException(msg)

        if not alg.validateInputCrs(parameters, context):
            feedback.pushInfo(
                Processing.tr('Warning: Not all input layers use the same CRS.\nThis can cause unexpected results.'))

        ret, results = execute(alg, parameters, context, feedback)
        if ret:
            feedback.pushInfo(
                Processing.tr('Results: {}').format(results))

            if onFinish is not None:
                onFinish(alg, context, feedback)
            else:
                # auto convert layer references in results to map layers
                for out in alg.outputDefinitions():
                    if out.name() not in results:
                        continue

                    if isinstance(out, (QgsProcessingOutputVectorLayer, QgsProcessingOutputRasterLayer, QgsProcessingOutputMapLayer)):
                        result = results[out.name()]
                        if not isinstance(result, QgsMapLayer):
                            layer = context.takeResultLayer(result) # transfer layer ownership out of context
                            if layer:
                                results[out.name()] = layer # replace layer string ref with actual layer (+ownership)
                    elif isinstance(out, QgsProcessingOutputMultipleLayers):
                        result = results[out.name()]
                        if result:
                            layers_result = []
                            for l in result:
                                if not isinstance(result, QgsMapLayer):
                                    layer = context.takeResultLayer(l) # transfer layer ownership out of context
                                    if layer:
                                        layers_result.append(layer)
                                    else:
                                        layers_result.append(l)
                                else:
                                    layers_result.append(l)

                            results[out.name()] = layers_result # replace layers strings ref with actual layers (+ownership)

        else:
            msg = Processing.tr("There were errors executing the algorithm.")
            feedback.reportError(msg)
            raise QgsProcessingException(msg)

        if isinstance(feedback, MessageBarProgress):
            feedback.close()
        return results
Example #57
0
 def warn_inplace(exc, nav, repl_pairs, local_opt):
     """failure_callback for NavigatorOptimizer: ignore InconsistencyErrors, print traceback
     """
     if isinstance(exc, InconsistencyError):
         return
     return NavigatorOptimizer.warn(exc, nav, repl_pairs, local_opt)
Example #58
0
 def __init__(self, action):
     assert isinstance(action, int)
     self._action = action
Example #59
0
class NavigatorOptimizer(Optimizer):
    """Abstract class

    """
    @staticmethod
    def warn(exc, nav, repl_pairs, local_opt):
        """failure_callback for NavigatorOptimizer: print traceback
        """
        _logger.error("Optimization failure due to: %s" % str(local_opt))
        _logger.error("TRACEBACK:")
        _logger.error(traceback.format_exc())
        if isinstance(exc, AssertionError) or config.on_opt_error == 'raise':
            raise exc
    @staticmethod
    def warn_inplace(exc, nav, repl_pairs, local_opt):
        """failure_callback for NavigatorOptimizer: ignore InconsistencyErrors, print traceback
        """
        if isinstance(exc, InconsistencyError):
            return
        return NavigatorOptimizer.warn(exc, nav, repl_pairs, local_opt)
    @staticmethod
    def warn_ignore(exc, nav, repl_pairs, local_opt):
        """failure_callback for NavigatorOptimizer: ignore all errors
        """
        pass

    def __init__(self, local_opt, ignore_newtrees = 'auto', failure_callback = None):
        """
        :param local_opt:  a LocalOptimizer to apply over a Env (or None is Ok too).
        :param ignore_newtrees:
            - True: new subgraphs returned by an optimization is not a candidate for optimization
            - False: new subgraphs returned by an optimization is a candidate for optimization
            - 'auto': let the local_opt set this parameter via its 'reentrant' attribute.
        :param failure_callback:
            a function that takes (exception, navigator, [(old, new),
            (old,new),...]) and we call it if there's an exception.

            If the trouble is from local_opt.transform(), the new variables will be 'None'.

            If the trouble is from validation (the new types don't match for
            example) then the new variables will be the ones created by
            transform().

            If this parameter is None, then exceptions are not caught here (raised normally).
        """
        self.local_opt = local_opt
        if ignore_newtrees == 'auto':
            self.ignore_newtrees = not getattr(local_opt, 'reentrant', True)
        else:
            self.ignore_newtrees = ignore_newtrees
        self.failure_callback = failure_callback

    def attach_updater(self, env, importer, pruner, chin = None):
        """Install some Env listeners to help the navigator deal with the ignore_trees-related functionality.

        :param importer: function that will be called whenever when optimizations add stuff to the graph.
        :param pruner: function to be called when optimizations remove stuff from graph.
        :param chin: "on change input" called whenever an node's inputs change.

        :returns: The Env plugin that handles the three tasks.  Keep this around so that you can detach later!

        """
        if self.ignore_newtrees:
            importer = None

        if importer is None and pruner is None:
            return None

        class Updater:
            if importer is not None:
                def on_import(self, env, node):
                    importer(node)
            if pruner is not None:
                def on_prune(self, env, node):
                    pruner(node)
            if chin is not None:
                def on_change_input(self, env, node, i, r, new_r):
                    chin(node, i, r, new_r)

        u = Updater()
        env.extend(u)
        return u

    def detach_updater(self, env, u):
        """Undo the work of attach_updater.

        :param u: a return-value of attach_updater

        :returns: None.
        """
        if u is not None:
            env.remove_feature(u)

    def process_node(self, env, node, lopt = None):
        """
        This function will use `lopt` to `transform` the `node`.  The `transform` method will
        return either False or a list of Variables that are intended to replace `node.outputs`.

        If the env accepts the replacement, then the optimization is successful, and this
        function returns True.

        If there are no replacement candidates or the env rejects the replacements, this
        function returns False.

        :param env:  an Env
        :param node: an Apply instance in `env`
        :param lopt: a LocalOptimizer instance that may have a better idea for how to compute
        node's outputs.
        :rtype: Bool
        :returns: True iff the `node`'s outputs were replaced in the `env`.

        """
        lopt = lopt or self.local_opt
        try:
            replacements = lopt.transform(node)
        except Exception, e:
            if self.failure_callback is not None:
                self.failure_callback(e, self, [(x, None) for x in node.outputs], lopt)
                return False
            else:
                raise
        if replacements is False or replacements is None:
            return False
        if not isinstance(replacements, (tuple, list)):
            raise TypeError('Optimizer %s gave wrong type of replacement. Expected list or tuple.' % lopt)
        if len(node.outputs) != len(replacements):
            raise ValueError('Optimizer %s gave wrong number of replacements' % lopt)
        # If an output would be replaced by itself, no need to perform
        # the replacement
        repl_pairs = [(r, rnew) for r, rnew in zip(node.outputs, replacements)
                if rnew is not r]
        if len(repl_pairs) == 0:
            return False
        try:
            env.replace_all_validate(repl_pairs, reason=lopt)
            return True
        except Exception, e:
            # This means the replacements were rejected by the env.
            #
            # This is not supposed to happen.  The default failure_callback will print a
            # traceback as a warning.
            if self.failure_callback is not None:
                self.failure_callback(e, self, repl_pairs, lopt)
                return False
            else:
                raise
Example #60
0
	def resolve (obj):
		while isinstance (obj, Lazy):
			obj = obj()
		return obj