Esempio n. 1
0
def run():
    primes = prime_reader.read_primes_default()
    last_prime = primes.next()

    prime_count = 0
    diag_total = 1
    current = 1
    for ring in eu.numbers(2):
        incr = (ring - 1) * 2

        for corner in xrange(3):
            current = op.iadd(current, incr)
            while current > last_prime:
                last_prime = primes.next()
            if current == last_prime:
                prime_count = op.iadd(prime_count, 1)
            
        current = op.add(current, incr)
        
        diag_total = op.iadd(diag_total, 4)
        
        perc = op.div(float(prime_count), diag_total)

        # print ring, side_length(ring), last_prime, diag_total, perc
        
        if op.lt(perc, 0.1):
            return side_length(ring)
 def test_mixed_addition(self):
     self.assertQuantityEqual(1*pq.ft + 1*pq.m, 4.280839895 * pq.ft)
     self.assertQuantityEqual(1*pq.ft + pq.m, 4.280839895 * pq.ft)
     self.assertQuantityEqual(pq.ft + 1*pq.m, 4.280839895 * pq.ft)
     self.assertQuantityEqual(pq.ft + pq.m, 4.280839895 * pq.ft)
     self.assertQuantityEqual(op.iadd(1*pq.ft, 1*pq.m), 4.280839895 * pq.ft)
     self.assertRaises(ValueError, lambda: 10*pq.J + 3*pq.m)
     self.assertRaises(ValueError, lambda: op.iadd(10*pq.J, 3*pq.m))
Esempio n. 3
0
 def test_inplace(self):
     #operator = self.module
     class C(object):
         def __iadd__     (self, other): return "iadd"
         def __iand__     (self, other): return "iand"
         def __ifloordiv__(self, other): return "ifloordiv"
         def __ilshift__  (self, other): return "ilshift"
         def __imod__     (self, other): return "imod"
         def __imul__     (self, other): return "imul"
         def __ior__      (self, other): return "ior"
         def __ipow__     (self, other): return "ipow"
         def __irshift__  (self, other): return "irshift"
         def __isub__     (self, other): return "isub"
         def __itruediv__ (self, other): return "itruediv"
         def __ixor__     (self, other): return "ixor"
         def __getitem__(self, other): return 5  # so that C is a sequence
     c = C()
     self.assertEqual(operator.iadd     (c, 5), "iadd")
     self.assertEqual(operator.iand     (c, 5), "iand")
     self.assertEqual(operator.ifloordiv(c, 5), "ifloordiv")
     self.assertEqual(operator.ilshift  (c, 5), "ilshift")
     self.assertEqual(operator.imod     (c, 5), "imod")
     self.assertEqual(operator.imul     (c, 5), "imul")
     self.assertEqual(operator.ior      (c, 5), "ior")
     self.assertEqual(operator.ipow     (c, 5), "ipow")
     self.assertEqual(operator.irshift  (c, 5), "irshift")
     self.assertEqual(operator.isub     (c, 5), "isub")
     self.assertEqual(operator.itruediv (c, 5), "itruediv")
     self.assertEqual(operator.ixor     (c, 5), "ixor")
     self.assertEqual(operator.iconcat  (c, c), "iadd")
Esempio n. 4
0
 def __iadd__(self,other):
     if not isinstance(other, Unit):
         raise ValueError("Both operands must be of type Unit")
     elif self.unit != other.unit:
         raise ValueError("unit %s is not %s" % (self.unit, other.unit))
     else:
         return Unit(operator.iadd(self.value,other.value), self.unit)
Esempio n. 5
0
def cumsum(num_list):
    """ python cumsum

    References:
        http://stackoverflow.com/questions/9258602/elegant-pythonic-cumsum
    """
    return reduce(lambda acc, itm: operator.iadd(acc, [acc[-1] + itm]), num_list, [0])[1:]
Esempio n. 6
0
def add_factory_6(x):
    """
    >>> add5 = add_factory_6(5)
    >>> print add5.send(10)
    15
    """

    yield iadd(x, (yield))
Esempio n. 7
0
 def test_inplace_addition(self, input_tuple, expected):
     self.ureg.autoconvert_offset_to_baseunit = False
     (q1v, q1u), (q2v, q2u) = input_tuple
     # update input tuple with new values to have correct values on failure
     input_tuple = ((np.array([q1v]*2, dtype=np.float), q1u),
                    (np.array([q2v]*2, dtype=np.float), q2u))
     Q_ = self.Q_
     qin1, qin2 = input_tuple
     q1, q2 = Q_(*qin1), Q_(*qin2)
     q1_cp = copy.copy(q1)
     if expected == 'error':
         self.assertRaises(OffsetUnitCalculusError, op.iadd, q1_cp, q2)
     else:
         expected = np.array([expected[0]]*2, dtype=np.float), expected[1]
         self.assertEqual(op.iadd(q1_cp, q2).units, Q_(*expected).units)
         q1_cp = copy.copy(q1)
         self.assertQuantityAlmostEqual(op.iadd(q1_cp, q2), Q_(*expected),
                                        atol=0.01)
Esempio n. 8
0
    def test_inplace(self):
        import operator

        list = []
        assert operator.iadd(list, [1, 2]) is list
        assert list == [1, 2]

        list = [1, 2]
        assert operator.imul(list, 2) is list
        assert list == [1, 2, 1, 2]
Esempio n. 9
0
def test_iadd():
    class C(object):
        def __add__(self, other):
            return '__add__'

        def __iadd__(self, other):
            return '__iadd__'

    c = C()
    # c + 5
    assert operator.add(c, 5) == "__add__"
    assert Operator.add(c, 5) == '__add__'
    # c += 5
    assert operator.iadd(c, 5) == "__iadd__"
    assert Operator.iadd(c, 5) == '__iadd__'

    class C(object):
        def __add__(self, other):
            return '__add__'
    c = C()
    assert operator.iadd(c, 5) == "__add__"
    assert Operator.iadd(c, 5) == '__add__'
Esempio n. 10
0
def isLychrel(num, iter):
    if iter >= 50: return True

    # optimization through hash table
    if mapper.has_key(num) and mapper[num]: return False

    val = int(str(num)[::-1]) + num
    if isPalindrome(val):
        mapper[num] = False
        return False

    recurse = isLychrel(val, operator.iadd(iter, 1))
    mapper[num] = False if recurse else True
    return recurse
	def loadFromPCAP(self,file):
		self.clean()
		a = os.popen("tshark -r "+file+" -Tfields -e frame.number -e ip.src \
			-e ip.dst -e frame.len -e tcp.flags -e frame.time_relative -e \
			tcp.analysis.ack_rtt -E header=y -E separator=,")
		for line in a.readlines():
			if(line!=""):
				temp=line.split(',')
				p = Packet(temp[0],temp[1],temp[2],temp[3],temp[4],temp[5],
					temp[6].strip("\n"))
				# if the packet succeeded we add it to the list, and if not
				# we increment a failed packets read flage
				if(p.valid()):
					self.packetList.append(p)
				else:
					self.failedPacketAttempts = operator.iadd(\
						self.failedPacketAttempts,1)
Esempio n. 12
0
def main():
    _file = open('poker.txt', 'r')
    count = 0

    for line in _file:
        tokens = line.split()
        first = ''; second = ''
        for num in range(0, 5): first = first + tokens[num] + ' '
        for num in range(5, 10): second = second + tokens[num] + ' '

        first_hand = translateHands(translateCards(first))
        second_hand = translateHands(translateCards(second))

        # print line, compare(first_hand, second_hand)
        if compare(first_hand, second_hand) == 1: count = operator.iadd(count, 1)

    print count
Esempio n. 13
0
def common_subarrays(X, axis=0):
    """
    Find common subarrays of 2 dimensional X, where axis is the axis to apply the search over.
    Common subarrays are returned as a dictionary of <subarray, [index]> pairs, where
    the subarray is a tuple representing the subarray and the index is the index
    for the subarray in X, where index is the index to the remaining axis.
    
    :param :class:`np.ndarray` X: 2d array to check for common subarrays in
    :param int axis: axis to apply subarray detection over. 
        When the index is 0, compare rows, columns, otherwise.   
    
    Examples:
    =========

    In a 2d array:    
    >>> import numpy as np
    >>> X = np.zeros((3,6), dtype=bool)
    >>> X[[1,1,1],[0,4,5]] = 1; X[1:,[2,3]] = 1
    >>> X
    array([[False, False, False, False, False, False],
           [ True, False,  True,  True,  True,  True],
           [False, False,  True,  True, False, False]], dtype=bool)
    >>> d = common_subarrays(X,axis=1)
    >>> len(d)
    3
    >>> X[:, d[tuple(X[:,0])]]
    array([[False, False, False],
           [ True,  True,  True],
           [False, False, False]], dtype=bool)
    >>> d[tuple(X[:,4])] == d[tuple(X[:,0])] == [0, 4, 5]
    True
    >>> d[tuple(X[:,1])]
    [1]
    """
    from collections import defaultdict
    from itertools import count
    from operator import iadd
    assert X.ndim == 2 and axis in (0,1), "Only implemented for 2D arrays"
    subarrays = defaultdict(list)
    cnt = count()
    np.apply_along_axis(lambda x: iadd(subarrays[tuple(x)], [cnt.next()]), 1-axis, X)
    return subarrays
Esempio n. 14
0
 def __iadd__(self,other):
     self._check_type(other)
     return Unit(operator.iadd(self.value,other.value), self.unit)
Esempio n. 15
0
    def test_inplace(self):
        class C(object):
            def __iadd__(self, other):
                return "iadd"

            def __iand__(self, other):
                return "iand"

            def __ifloordiv__(self, other):
                return "ifloordiv"

            def __ilshift__(self, other):
                return "ilshift"

            def __imod__(self, other):
                return "imod"

            def __imul__(self, other):
                return "imul"

            def __ior__(self, other):
                return "ior"

            def __ipow__(self, other):
                return "ipow"

            def __irshift__(self, other):
                return "irshift"

            def __isub__(self, other):
                return "isub"

            def __itruediv__(self, other):
                return "itruediv"

            def __ixor__(self, other):
                return "ixor"

            def __getitem__(self, other):
                return 5  # so that C is a sequence

        c = C()
        self.assertEqual(operator.iadd(c, 5), "iadd")
        self.assertEqual(operator.iand(c, 5), "iand")
        self.assertEqual(operator.ifloordiv(c, 5), "ifloordiv")
        self.assertEqual(operator.ilshift(c, 5), "ilshift")
        self.assertEqual(operator.imod(c, 5), "imod")
        self.assertEqual(operator.imul(c, 5), "imul")
        self.assertEqual(operator.ior(c, 5), "ior")
        self.assertEqual(operator.ipow(c, 5), "ipow")
        self.assertEqual(operator.irshift(c, 5), "irshift")
        self.assertEqual(operator.isub(c, 5), "isub")
        self.assertEqual(operator.itruediv(c, 5), "itruediv")
        self.assertEqual(operator.ixor(c, 5), "ixor")
        self.assertEqual(operator.iconcat(c, c), "iadd")
        self.assertEqual(operator.__iadd__(c, 5), "iadd")
        self.assertEqual(operator.__iand__(c, 5), "iand")
        self.assertEqual(operator.__ifloordiv__(c, 5), "ifloordiv")
        self.assertEqual(operator.__ilshift__(c, 5), "ilshift")
        self.assertEqual(operator.__imod__(c, 5), "imod")
        self.assertEqual(operator.__imul__(c, 5), "imul")
        self.assertEqual(operator.__ior__(c, 5), "ior")
        self.assertEqual(operator.__ipow__(c, 5), "ipow")
        self.assertEqual(operator.__irshift__(c, 5), "irshift")
        self.assertEqual(operator.__isub__(c, 5), "isub")
        self.assertEqual(operator.__itruediv__(c, 5), "itruediv")
        self.assertEqual(operator.__ixor__(c, 5), "ixor")
        self.assertEqual(operator.__iconcat__(c, c), "iadd")
Esempio n. 16
0
    def _write(self):
        # NOTE: we do not check that the buffer is of length N here. This means
        # that at the beginning of an episode we will add the initial N-1
        # transitions (of size 1, 2, ...) and at the end of an episode (when
        # called from write_last) we will write the final transitions of size (N,
        # N-1, ...). See the Note in the docstring.

        # Form the n-step transition given the steps.
        observation = self._buffer[0].observation
        action = self._buffer[0].action
        extras = self._buffer[0].extras
        next_observation = self._next_observation

        # Give the same tree structure to the n-step return accumulator,
        # n-step discount accumulator, and self.discount, so that they can be
        # iterated in parallel using tree.map_structure.
        (n_step_return, total_discount,
         self_discount) = tree_utils.broadcast_structures(
             self._buffer[0].reward, self._buffer[0].discount, self._discount)

        # Copy total_discount, so that accumulating into it doesn't affect
        # _buffer[0].discount.
        total_discount = tree.map_structure(np.copy, total_discount)

        # Broadcast n_step_return to have the broadcasted shape of
        # reward * discount. Also copy, to avoid accumulating into
        # _buffer[0].reward.
        n_step_return = tree.map_structure(
            lambda r, d: np.copy(np.broadcast_to(r,
                                                 np.broadcast(r, d).shape)),
            n_step_return, total_discount)

        # NOTE: total discount will have one less discount than it does
        # step.discounts. This is so that when the learner/update uses an additional
        # discount we don't apply it twice. Inside the following loop we will
        # apply this right before summing up the n_step_return.
        for step in itertools.islice(self._buffer, 1, None):
            (step_discount, step_reward,
             total_discount) = tree_utils.broadcast_structures(
                 step.discount, step.reward, total_discount)

            # Equivalent to: `total_discount *= self._discount`.
            tree.map_structure(operator.imul, total_discount, self_discount)

            # Equivalent to: `n_step_return += step.reward * total_discount`.
            tree.map_structure(lambda nsr, sr, td: operator.iadd(nsr, sr * td),
                               n_step_return, step_reward, total_discount)

            # Equivalent to: `total_discount *= step.discount`.
            tree.map_structure(operator.imul, total_discount, step_discount)

        transition = types.Transition(observation=observation,
                                      action=action,
                                      reward=n_step_return,
                                      discount=total_discount,
                                      next_observation=next_observation,
                                      extras=extras)

        # Create a list of steps.
        if self._final_step_placeholder is None:
            # utils.final_step_like is expensive (around 0.085ms) to run every time
            # so we cache its output.
            self._final_step_placeholder = utils.final_step_like(
                self._buffer[0], next_observation)
        final_step: base.Step = self._final_step_placeholder._replace(
            observation=next_observation)
        steps = list(self._buffer) + [final_step]

        # Calculate the priority for this transition.
        table_priorities = utils.calculate_priorities(self._priority_fns,
                                                      steps)

        # Insert the transition into replay along with its priority.
        self._writer.append(transition)
        for table, priority in table_priorities.items():
            self._writer.create_item(table=table,
                                     num_timesteps=1,
                                     priority=priority)
Esempio n. 17
0
##判断是否是字典类型
print operator.isMappingType({1: "one", 2: "two"})
print operator.isMappingType(1)

##判断是否是数字类型
print operator.isNumberType(1)
print operator.isNumberType(1.000001)

##判断是否是序列类型
print operator.isSequenceType([1, 2, 3])
print operator.isSequenceType((1, 2, 3))

#将与自身的值相加之和的值赋给自身 同 +=
#但是不改变自身的值,返回值返回相加的结果
a = 0
b = operator.iadd(a, 2)
print a
print b

#将与自身序列相加的结果赋给自身 同 +=
#但是不改变自身的值,返回值返回相加的结果
a = [1, 2]
b = [3, 4]
c = operator.iconcat(a, b)
print c

#将与自身的值相减之和的值赋给自身 同 -=
#但是不改变自身的值,返回值返回相减的结果
a = 2
b = operator.isub(a, 1)
print a
Esempio n. 18
0
    def test_iadd(self):
        import operator

        list = []
        assert operator.iadd(list, [1, 2]) is list
        assert list == [1, 2]
Esempio n. 19
0
 def iadd_usecase(x, y):
     return operator.iadd(x, y)
Esempio n. 20
0
 def recv_data(buffer):
     # cannot use received += buffer with a variable
     # defined in the parent function.
     operator.iadd(received, buffer)
Esempio n. 21
0

# In[10]:


# Add using operator module
a,b = 1,2.0
print(a+b)
operator.add(a,b)


# In[11]:


a,b = 1, 2.0
a=operator.iadd(a,b)
print (a)


# In[12]:


help(operator.iadd)


# In[14]:


print (a)
print (b)
a += b
Esempio n. 22
0
operator.delitem(a, slice(1, 3))
print(a)


# ------------------------------------------------------------------------------
# In-Place Operators
# In addition to the standard operators, many types of objects support
# "inplace" modification through special operators such as +=.


c = [1, 2, 3]
d = ["a", "b", "c"]
print("c =", c)
print("d =", d)

operator.iadd(c, d)
print("iadd(c, d) => c =", c)


# ------------------------------------------------------------------------------
# Attribute and Item "Getters"
# One of the most unusual features of the operator module is the concept of
# getters. These callable objects are constructed at runtime and retrieve
# attributes of objects or contents from sequences.

getName = operator.attrgetter("__name__")
print([getName(func) for func in (abs, max, min, dict)])

# Item getters work like lambda x,y=5: x[y]

getItem = operator.itemgetter("val")
Esempio n. 23
0
def detect_types(records, min_conf=0.95, hweight=6, max_iter=100):
    """Detects record types by selecting the first type which reaches the
    minimum confidence level (based on number of hits).

    Args:
        records (Iter[dict]): Rows of data whose keys are the field names.
            E.g., output from any `meza.io` read function.

        min_conf (float): minimum confidence level, a lower value will
            converge faster (default: 0.95)

        hweight (int): weight to give header row, a higher value will
            converge faster (default: 6).

            detect_types(records, 0.9, 3)['count'] == 23
            detect_types(records, 0.9, 4)['count'] == 10
            detect_types(records, 0.9, 5)['count'] == 6
            detect_types(records, 0.95, 5)['count'] == 31
            detect_types(records, 0.95, 6)['count'] == 17
            detect_types(records, 0.95, 7)['count'] == 11

        max_iter (int): maximum number of iterations to perform (default: 100)

    Returns:
        tuple(Iter[dict], dict): Tuple of records and the result

    See also:
        `meza.process.type_cast`
        `meza.process.gen_types`
        `meza.process.gen_confidences`
        `meza.typetools.guess_type_by_field`
        `meza.typetools.guess_type_by_value`

    Examples:
        >>> record = {
        ...     'null': 'None',
        ...     'bool': 'false',
        ...     'int': '1',
        ...     'float': '1.5',
        ...     'text': 'Iñtërnâtiônàližætiøn',
        ...     'date': '5/4/82',
        ...     'time': '2:30',
        ...     'datetime': '5/4/82 2pm',
        ... }
        >>> records = it.repeat(record)
        >>> types = detect_types(records)[1]['types']
        >>> set(t['id'] for t in types) == {
        ...     'int', 'text', 'float', 'datetime', 'bool', 'time',
        ...     'date', 'null'}
        True
        >>> all(t['id'] == t['type'] for t in types)
        True
    """
    records = iter(records)
    tally = {}
    consumed = []

    if hweight < 1:
        raise ValueError("`hweight` must be greater than or equal to 1!")

    if min_conf >= 1:
        raise ValueError("`min_conf must` be less than 1!")

    for record in records:
        if not tally:
            # take a first guess using the header
            ftypes = tt.guess_type_by_field(record.keys())
            tally = {t["id"]: defaultdict(int) for t in ftypes}
            [iadd(tally[t["id"]][t["type"]], hweight) for t in ftypes]

        # now guess using the values
        for t in tt.guess_type_by_value(record):
            try:
                tally[t["id"]][t["type"]] += 1
            except KeyError:
                tally[t["id"]] = defaultdict(int)
                tally[t["id"]][t["type"]] = 1

        types = list(gen_types(tally))
        confidence = min(gen_confidences(tally, types, hweight))
        consumed.append(record)
        count = len(consumed)

        if (confidence >= min_conf) or count >= max_iter:
            break

    records = it.chain(consumed, records)

    result = {"confidence": confidence, "types": types, "count": count, "accurate": confidence >= min_conf}

    return records, result
Esempio n. 24
0
 def update_event(self, inp=-1):
     self.set_output_val(0, operator.iadd(self.input(0), self.input(1)))
Esempio n. 25
0
def test_bundles(bundles_to_test, resultdir, reset):
    if reset == 'FULL' or reset == 'MODEL':
        for bundle in bundles_to_test:
            h_name = "h-{}".format(bundle.name)
            unit_n = subprocess.check_output([
                "juju status --format oneline | grep {} | cut -d '/' -f 2 | cut -d ':' -f 1"
                .format(h_name)
            ],
                                             shell=True,
                                             universal_newlines=True).rstrip()
            if unit_n:
                subprocess.check_call([
                    'juju', 'ssh', '{}/{}'.format(h_name, unit_n), '-Ct',
                    "if [[ $(juju switch --list) ]]; then echo y | tengu destroy-model {0}; fi"
                    .format(bundle.name[:10])
                ])
    if reset == 'FULL':
        services_to_destroy = [
            "h-{}".format(bundle.name) for bundle in bundles_to_test
        ] + ['rest2jfed']
        if services_to_destroy:
            subprocess.check_call([
                'echo y | tengu destroy-service tenguci {}'.format(
                    " ".join(services_to_destroy))
            ],
                                  shell=True)
        try:
            subprocess.check_call([
                'juju', 'cached-images', 'delete', '--kind', 'lxc', '--series',
                'trusty', '--arch', 'amd64'
            ])
        except subprocess.CalledProcessError:
            pass
    logging.info("testing bundles at \n\t{}\nWriting results to {}".format(
        "\n\t".join([b.dirpath for b in bundles_to_test]), resultdir))
    # Get all charms that have to be pushed
    sojobo_bundle = CharmStoreObject(
        path='{}/../bundles/sojobo/bundle.yaml'.format(JUJU_REPOSITORY))
    init_bundle = CharmStoreObject(
        path=
        '{}/trusty/hauchiwa/files/tengu_management/templates/init-bundle/bundle.yaml'
        .format(JUJU_REPOSITORY))
    charms_to_test = []
    # charms in hauchiwa and init bundle need to be pushed but those bundles don't need to be tested
    for bundle in bundles_to_test + [sojobo_bundle, init_bundle]:
        charms_to_test = charms_to_test + get_charms_from_bundle(
            bundle, namespace_whitelist=["cs:~" + USERNAME])
    charms_to_test = list({v.url: v for v in charms_to_test}.values())

    # Push all charms that will be tested
    logging.info("Pushing the following charms to 'staging': \n\t{}\n".format(
        "\n\t".join([c.url for c in charms_to_test])))
    for charm in charms_to_test:
        charm.push()
    logging.info("Pushed Charms: \n\t{}\n".format("\n\t".join(
        [c.url for c in charms_to_test])))

    # Setup the directory for each bundle
    testdirs = []
    for bundle in bundles_to_test:
        testdirs.append(
            bootstrap_testdir(sojobo_bundle, bundle, init_bundle,
                              charms_to_test))

    logging.info("Pushing the following bundles to 'staging': \n\t{}\n".format(
        "\n\t".join([c.url for c in bundles_to_test])))
    for bundle in bundles_to_test:
        bundle.push()

    # First hauchiwa can't be created in parallell because bundledeployer might try to deploy
    # rest2jfed while it already exists. (race condition because they ask for permission instead of forgiveness)
    create_hauchiwa(testdirs[0], resultdir)

    with Pool(5) as pool:
        result = pool.starmap(create_hauchiwa, [[testdir, resultdir]
                                                for testdir in testdirs[1:]])
    # Due to a bug, the pool will hang if one of the run_tests functions exits, so we do it here.
    if False in result:
        exit(1)

    sleeptime = 0

    # Run tests (run_tests should throw exception if test fails)
    # This runs in parallell
    logging.info("Running tests in: \n\t{}\n".format("\n\t".join(testdirs)))
    with Pool(5) as pool:
        result = pool.starmap(
            run_tests, [[testdir, resultdir,
                         operator.iadd(sleeptime, 20)]
                        for testdir in testdirs])
    # Due to a bug, the pool will hang if one of the run_tests functions exits, so we do it here.
    if False in result:
        exit(1)

    # If all tests succeed, publish all charms
    logging.info("Publishing charms/bundles: \n\t{}\n".format("\n\t".join(
        [c.url for c in charms_to_test + bundles_to_test])))
    for csobject in charms_to_test + bundles_to_test:
        csobject.publish('stable')
Esempio n. 26
0
#Case 1 - Immutable target-such as String,number and tuples

import operator
a = 5
b = 6
c = 5
d = 6

x = operator.add(a, b)
# printing the modified value
print("Value after adding using normal operator : ", end="")
print(x)
# printing the modified value
print("Value after adding using Inplace operator : ", end="")
y = operator.iadd(c, d)
print(y)
# printing value of first argument
# value is unchanged
print("Value of first argument using normal operator : ", end="")
print(a)

# printing value of first argument
# value is unchanged
print("Value of first argument using Inplace operator : ", end="")
print(x)

# Case 2 - Mutable Target-list,dictionaries

e = [1, 2, 3, 4, 5]
#using add() to add arguments passed
z = operator.add(e, [1, 2, 3])
Esempio n. 27
0
import operator
x = "hai"
y = "hello"
x = operator.iadd(x, y)
print(x)
y = "kiran"
z = "kumar"
y = operator.iconcat(y, z)
print(y)
y = "kirankiran"
y += z
print(y)

print('\n\n\n♥')
li = [5, 4, 3, 2, 1]
print('li: ', li)
li2 = li
print('ids are same after assign') if li is li2 else print(
    "not same after assing")
li2.append(0)
print('ids are same after append of li2') if li is li2 else print(
    "not same after append ")
print('li2: ', li2)
print('li ele are also changed since both ids are same')
print('li: ', li)

print('\n\n\n♥')
print("normal addition of lists\n")
li = li + [1, 2, 3, 4]
print('ids are same after li normal add') if li is li2 else print(
    "ids are not same after li normal add new li is created ")
Esempio n. 28
0
#immutable targets
import operator

x = 5
y = 6
a = 5
b = 6

z = operator.add(a, b)
p = operator.iadd(x, y)

print("value after adding using normal operator: ", end=" ")
print(z)
print("value after adding using inplace operator: ", end=" ")
print(p)
print("value of first argument using normal operator: ", end=" ")
print(a)
print("value of first argument using inplace iperator: ", end=" ")
print(x)

#mutable targets
a = [1, 2, 3, 4, 5]
z = operator.add(a, [1, 2, 3])
print("\nvalue after adding using normal operator: ", end=" ")
print(z)
print("value of first argument using normal operator: ", end=" ")
print(a)

p = operator.iadd(a, [1, 2, 3])
print("value after adding using inplace operator: ", end=" ")
print(p)
Esempio n. 29
0
 def __iadd__(self,other):
     self._check_type(other)
     return Unit(operator.iadd(self.value,other.value), self.unit)
Esempio n. 30
0
 def recv_data(buffer):
     # cannot use received += buffer with a variable
     # defined in the parent function.
     operator.iadd(received, buffer)
Esempio n. 31
0
# Python code to demonstrate the working of
# iadd() and iconcat()

import operator

# using iadd() to add and assign value
x = operator.iadd(2, 3)

# printing the modified value
print("The value after adding and assigning : ")
print(x)

# initializing value
y = "geeks"
z = "forgeeks"

# using iconcat() to concat the sequences
y = operator.iconcat(y, z)

print("The string after concatenation is : ")
print(y)

# Python code to demonstrate the working of
# isub() and imul()

# using isub() to subtract and assign value
x = operator.isub(2, 3)

print("The value after subtracting and assigning : ")
print(x)
Esempio n. 32
0
import operator as op

############ immutable targerts
a = 10
b = 10

######### a+b
op.add(a, b)
print(a)

######### a+b
op.iadd(a, b)
print(a)

########### mutable targets
a = [1, 2, 3, 4]

######### a+b
op.add(a, [5])
print(a)

########## a+=b
op.iadd(a, [6])
print(a)
Esempio n. 33
0
a, b = 1, 2

print("\nOperator para suma \n")
print(operator.add(a, b))  #Suma usando el modulo operator

#Sumando de manera rapida a un variable

print("\nSuma rapida \n")
print(a)
a += 2
print(a)

#Una forma de hacer la suma parecida a " += " con el modulo operator
print("\nSuma rapida con modulo Operator \n")
print(operator.iadd(a, b))

#Exponenciacion

print("\nExponenciacion\n")

#Para elevar una base a un exponente en Python normalmente se hace simplemente asi:

print("Exponenciacion rapida")
print(2**3)

print("Exponenciacion rapida 2")
print(pow(2, 3))

#tambien se puede hacer raices o exponentes fraccionarios
Esempio n. 34
0
def detect_types(records, min_conf=0.95, hweight=6, max_iter=100):
    """Detects record types by selecting the first type which reaches the
    minimum confidence level (based on number of hits).

    Args:
        records (Iter[dict]): Rows of data whose keys are the field names.
            E.g., output from any `meza.io` read function.

        min_conf (float): minimum confidence level, a lower value will
            converge faster (default: 0.95)

        hweight (int): weight to give header row, a higher value will
            converge faster (default: 6).

            detect_types(records, 0.9, 3)['count'] == 23
            detect_types(records, 0.9, 4)['count'] == 10
            detect_types(records, 0.9, 5)['count'] == 6
            detect_types(records, 0.95, 5)['count'] == 31
            detect_types(records, 0.95, 6)['count'] == 17
            detect_types(records, 0.95, 7)['count'] == 11

        max_iter (int): maximum number of iterations to perform (default: 100)

    Returns:
        tuple(Iter[dict], dict): Tuple of records and the result

    See also:
        `meza.process.type_cast`
        `meza.process.gen_types`
        `meza.process.gen_confidences`
        `meza.typetools.guess_type_by_field`
        `meza.typetools.guess_type_by_value`

    Examples:
        >>> record = {
        ...     'null': 'None',
        ...     'bool': 'false',
        ...     'int': '1',
        ...     'float': '1.5',
        ...     'text': 'Iñtërnâtiônàližætiøn',
        ...     'date': '5/4/82',
        ...     'time': '2:30',
        ...     'datetime': '5/4/82 2pm',
        ... }
        >>> records = it.repeat(record)
        >>> types = detect_types(records)[1]['types']
        >>> set(t['id'] for t in types) == {
        ...     'int', 'text', 'float', 'datetime', 'bool', 'time',
        ...     'date', 'null'}
        True
        >>> all(t['id'] == t['type'] for t in types)
        True
    """
    records = iter(records)
    tally = {}
    consumed = []

    if hweight < 1:
        raise ValueError('`hweight` must be greater than or equal to 1!')

    if min_conf >= 1:
        raise ValueError('`min_conf must` be less than 1!')

    for record in records:
        if not tally:
            # take a first guess using the header
            ftypes = tt.guess_type_by_field(record.keys())
            tally = {t['id']: defaultdict(int) for t in ftypes}
            [iadd(tally[t['id']][t['type']], hweight) for t in ftypes]

        # now guess using the values
        for t in tt.guess_type_by_value(record):
            try:
                tally[t['id']][t['type']] += 1
            except KeyError:
                tally[t['id']] = defaultdict(int)
                tally[t['id']][t['type']] = 1

        types = list(gen_types(tally))
        confidence = min(gen_confidences(tally, types, hweight))
        consumed.append(record)
        count = len(consumed)

        if (confidence >= min_conf) or count >= max_iter:
            break

    records = it.chain(consumed, records)

    result = {
        'confidence': confidence,
        'types': types,
        'count': count,
        'accurate': confidence >= min_conf}

    return records, result
Esempio n. 35
0
def getDigitalSum(num):
    sum = 0
    for digit in str(num): sum = operator.iadd(sum, int(digit))
    return sum
Esempio n. 36
0
@file = operator_2
@author = Liangjisheng
@create_time = 2018/6/5 0005 下午 19:51
"""
import operator as op

# 原地操作符 即in-place操作,x += y 等同于x=iadd(x, y),
# 如果复制给其他变量比如z = iadd(x, y)等同与z = x; z += y
a, b = 3, 4
c = [1, 2]
d = ['a', 'b']
print('a =', a)
print('b =', b)
print('c =', c)
print('d =', d)
a = op.iadd(a, b)
print('a = operator.iadd(a, b) =>', a)
c = op.iconcat(c, d)
print('c = operator.iconcat(c, d) =>', c)
print()

# operator模块最特别的特性之一就是获取方法的概念,获取方法是运行时构造的一些可回调对象
# 用来获取对象的属性或序列的内容,获取方法在处理迭代器或生成器序列的时候特别有用
# 它们引入的开销会大大降低lambda或Python函数的开销


# 获取属性
class MyObj(object):
    def __init__(self, arg):
        super(MyObj, self).__init__()
        self.arg = arg
Esempio n. 37
0
print((b, a)[a < b])
c, d = 20, 30
print((lambda: d, lambda: c)[c < d]())

# Divison

a, b = 20, 30
print(b // a)  # Modulo Divison
print(b / a)  # Float Divison

# any , all

print(all([True, True, False]))
print(any([True, False, True]))

import operator
x = 10
y = 20

z = operator.add(x, y)
print(z)

d = operator.iadd(x, y)
print(d)

a = [1, 2, 3, 4]
z = operator.add(a, [5, 6, 7])
print(z)
print(a)
g = operator.iadd(a, [3, 4, 56, 6])
print(g)
Esempio n. 38
0
 def iadd_usecase(x, y):
     return operator.iadd(x, y)
Esempio n. 39
0
def str_sum_g(seq):
    out_str = ''
    for item in seq:
        op.iadd(out_str, item)
    return out_str
Esempio n. 40
0
 _iadd_ method.
"""

import operator

a = 10
b = 15

# inplace in immutable
x = operator.add(a, b)  # normal addition which is similar to x = a+b

print("a =", a, "b =", b, "\noperator.add(a, b)=", x)
print("a =", a, "b =", b)
print("--------------------")
operator.iadd(
    a, b
)  # inplace operator iadd which is similar to a = a+b or a+= b but this does't work on immutable

print("a =", a, "b =", b)
print("--------------------")
# inplace operator  in mutable objects

li = [12, 15, 25, 85, 98]

print("operator.add",
      operator.add(li, [5, 4, 3]))  # here the original ist is not changed

print("li", li)

# inplace operator which is similar to li = li + [1, 2, 3, 4]
print(li)
operator.delitem(li, slice(1, 4))
print(li)
print(operator.getitem(li, slice(0, 2)))
s1 = "testing "
s2 = "operator"
print(operator.concat(s1, s2))
if (operator.contains(s1, s2)):
    print("Contains")
else:
    print("It doesn't")
a = 1
b = 0
print(operator.and_(a, b))
print(operator.or_(a, b))
print(operator.invert(a))

x = 10
y = 5
print(operator.iadd(x, y))
print(operator.isub(x, y))
print(operator.iconcat(s1, s2))
print(operator.imul(x, y))
print(operator.itruediv(x, y))
print(operator.imod(x, y))
print(operator.ixor(x, y))
print(operator.ipow(x, y))
print(operator.iand(x, y))
print(operator.ior(x, y))
print(operator.ilshift(x, y))
print(operator.irshift(x, y))
Esempio n. 42
0
def main():
    count = 0
    for i in range(1, threshold):
        if isLychrel(i, 0): count = operator.iadd(count, 1)
    print 'answer:', count
Esempio n. 43
0
##判断是否是字典类型
print operator.isMappingType({1:"one",2:"two"})
print operator.isMappingType(1)

##判断是否是数字类型
print operator.isNumberType(1)
print operator.isNumberType(1.000001)

##判断是否是序列类型
print operator.isSequenceType([1,2,3])
print operator.isSequenceType((1,2,3))

#将与自身的值相加之和的值赋给自身 同 += 
#但是不改变自身的值,返回值返回相加的结果
a = 0
b = operator.iadd(a,2)
print a
print b

#将与自身序列相加的结果赋给自身 同 +=
#但是不改变自身的值,返回值返回相加的结果
a = [1,2]
b = [3,4]
c = operator.iconcat(a,b)
print c

#将与自身的值相减之和的值赋给自身 同 -= 
#但是不改变自身的值,返回值返回相减的结果
a = 2
b = operator.isub(a,1)
print a
def descriptive_analytics(company):

    company = str(company).lower()

    # Query and generate relevant posts and comments
    ## df_comments and df_posts to be sent for modeling sentiment analysis

    query_post = ("SELECT id, title, created_utc, num_comments, score "
                  "FROM `homework2-255022.redditbigdata.posts` "
                  "WHERE LOWER(title) LIKE LOWER('%" + company + "%');")

    job_post = bqclient.query(
        query_post,
        location="US",
    )  # API request - starts the query

    df_post = (job_post.result().to_dataframe())

    comments = []
    query_comments = (
        "SELECT body, link_id, score, created_utc "
        "FROM `homework2-255022.redditbigdata.comments` "
        "WHERE SUBSTR(link_id, STRPOS(link_id, '_') + 1, LENGTH(link_id)) IN ("
        "SELECT id "
        "FROM `homework2-255022.redditbigdata.posts`"
        "WHERE LOWER(title) LIKE LOWER('%" + company + "%'))")

    job_comments = bqclient.query(
        query_comments,
        location="US",
    )  # API request - starts the query

    df_comments = (job_comments.result().to_dataframe())

    comments.append(df_comments)
    df_comments = pd.concat(comments, ignore_index=True)
    df_comments = df_comments[(df_comments.body != '[removed]')
                              & (df_comments.body != '[deleted]')]

    df_post = df_post.drop_duplicates()
    df_comments = df_comments.drop_duplicates()

    # Sentiment Analysis

    df_comments['scaled_score'] = df_comments.apply(lambda x: 1
                                                    if x.score >= 50 else
                                                    (0 if x.score <= -10 else
                                                     (x.score + 10) / 60),
                                                    axis=1)
    scaled_sum = sum(df_comments.scaled_score)
    df_comments['weight'] = df_comments.apply(
        lambda x: x.scaled_score / scaled_sum, axis=1)

    analyzer = SentimentIntensityAnalyzer()

    def vader_score(comment):
        score = analyzer.polarity_scores(comment)["compound"]
        return score

    df_comments['vader'] = df_comments.apply(lambda x: vader_score(x.body),
                                             axis=1)
    df_comments['weighted_score'] = df_comments.apply(lambda x: x.weight *
                                                      ((x.vader * 50) + 50),
                                                      axis=1)
    sentiment_score = sum(df_comments.weighted_score)

    most_positive = []
    most_negative = []
    sorted_df = df_comments.sort_values(by=['vader'], ascending=False)
    for i in range(0, 10):
        most_negative.append([
            sorted_df.iloc[(i + 1) * (-1), 0],
            str(sorted_df.iloc[(i + 1) * (-1), 2]),
            str(sorted_df.iloc[(i + 1) * (-1), 6])
        ])
        most_positive.append([
            sorted_df.iloc[i, 0],
            str(sorted_df.iloc[i, 2]),
            str(sorted_df.iloc[i, 6])
        ])

    # Compute Metrics

    # Remove unnecessary characters
    df_post.title = df_post.title.apply(lambda x: [x.replace("*", "").\
                                                    replace("#", "").\
                                                    replace("-", "")][0])
    df_comments.body = df_comments.body.apply(lambda x: [x.replace("*", "").\
                                                    replace("#", "").\
                                                    replace("-", "")][0])

    # Download and remove set of stop words
    stop_words_set = set(stopwords.words('english'))
    if len(df_post) > 0:
        df_post.title = df_post.title.str.lower().str.split()
    if len(df_comments) > 0:
        df_comments.body = df_comments.body.str.lower().str.split()
    df_post.title = df_post.title.apply(
        lambda x: [item for item in x if item not in stop_words_set])
    df_comments.body = df_comments.body.apply(
        lambda x: [item for item in x if item not in stop_words_set])

    # Compute most common words
    if len(df_post) != 0 and len(df_comments) != 0:
        words = []
        df_post.title.apply(lambda x: iadd(words, x))
        df_comments.body.apply(lambda x: iadd(words, x))
        word_frequency = Counter(words)
    elif len(df_post) == 0:
        words = []
        df_comments.body.apply(lambda x: iadd(words, x))
        word_frequency = Counter(words)
    elif len(df_comments) == 0:
        words = []
        df_post.title.apply(lambda x: iadd(words, x))
        word_frequency = Counter(words)
    else:
        word_frequency = [("None", 0)]

    most_common_words = []
    for i in word_frequency.most_common(15):
        if i[0] != '[removed]' and len(most_common_words) < 10:
            most_common_words.append(i)

    # Compute top 4 metrics
    query_comment_mentions = (
        "SELECT body, link_id, ups, downs, score, created_utc "
        "FROM `homework2-255022.redditbigdata.comments` "
        "WHERE LOWER(body) LIKE LOWER('%" + company + "%')")

    job_comment_mentions = bqclient.query(
        query_comment_mentions,
        location="US",
    )  # API request - starts the query

    df_comment_mentions = job_comment_mentions.result().to_dataframe()
    comment_mentions = len(df_comment_mentions)
    mean_comments = df_post.num_comments.mean()

    # Compute periodical counts
    max_time = 1564617378
    interval = 3600

    week_count_posts = []
    month_count_posts = []
    year_count_posts = []
    for i in range(7, 0, -1):
        tmp = df_post[(df_post.created_utc > max_time - (i * interval * 24))
                      & (df_post.created_utc <= max_time -
                         ((i - 1) * interval * 24))]
        week_count_posts.append(len(tmp))
    for i in range(4, 0, -1):
        tmp = df_post[(df_post.created_utc > max_time -
                       (i * interval * 24 * 7))
                      & (df_post.created_utc <= max_time -
                         ((i - 1) * interval * 24 * 7))]
        month_count_posts.append(len(tmp))
    for i in range(7, 0, -1):
        tmp = df_post[(df_post.created_utc > max_time -
                       (i * interval * 24 * 30))
                      & (df_post.created_utc <= max_time -
                         ((i - 1) * interval * 24 * 30))]
        year_count_posts.append(len(tmp))

    week_count_comments = []
    month_count_comments = []
    year_count_comments = []
    for i in range(7, 0, -1):
        tmp = df_comment_mentions[
            (df_comment_mentions.created_utc > max_time - (i * interval * 24))
            & (df_comment_mentions.created_utc <= max_time -
               ((i - 1) * interval * 24))]
        week_count_comments.append(len(tmp))
    for i in range(4, 0, -1):
        tmp = df_comment_mentions[
            (df_comment_mentions.created_utc > max_time -
             (i * interval * 24 * 7))
            & (df_comment_mentions.created_utc <= max_time -
               ((i - 1) * interval * 24 * 7))]
        month_count_comments.append(len(tmp))
    for i in range(7, 0, -1):
        tmp = df_comment_mentions[
            (df_comment_mentions.created_utc > max_time -
             (i * interval * 24 * 30))
            & (df_comment_mentions.created_utc <= max_time -
               ((i - 1) * interval * 24 * 30))]
        year_count_comments.append(len(tmp))

    week_count = list(map(add, week_count_posts, week_count_comments))
    month_count = list(map(add, month_count_posts, month_count_comments))
    year_count = list(map(add, year_count_posts, year_count_comments))

    post_mentions = len(df_post)
    post_score = df_post.score.sum()

    # Output results to json
    if str(post_score) == "None":
        post_score = "0"
    if str(mean_comments) == "nan":
        mean_comments = "0"
    else:
        mean_comments = round(mean_comments, 2)

    total_mentions = int(post_mentions) + int(comment_mentions)
    # Output results to json
    results = {
        "entity_name": company,
        "total_mentions": "{0:,.0f}".format(total_mentions),
        "post_mentions": "{0:,.0f}".format(post_mentions),
        "post_scores": "{0:,.0f}".format(post_score),
        "comment_mentions": "{0:,.0f}".format(comment_mentions),
        "mean_comments": "{0:,.2f}".format(mean_comments),
        "most_common_words": most_common_words,
        "week_count": week_count,
        "month_count": month_count,
        "year_count": year_count,
        "sentiment_score": sentiment_score,
        "most_positive": most_positive,
        "most_negative": most_negative
    }
    # Output results to json
    print(results)

    return results
#Inplace operator change the first argument in case of mutable Targets e.g. iadd()
#Normal operator dont change e.g add()
import operator

l=[1,2,3,4]
z=operator.add(l,[5,6,7])
print(z,l,end=" ")
p=operator.iadd(l,[5,6,7])
print("\n",p,l,end=" ")
#Immutable Targets
#print(operator.add(3,4))
Esempio n. 46
0
#comparison between add and iadd
import operator

lst_1 = [1, 2, 3, 4]
lst_2 = [90, 98]
lst_3 = operator.add(lst_1, lst_2)
print(lst_3)
print(lst_1)
lst_4 = [5, 6, 7, 8]
lst_5 = [121, 234]
lst_6 = operator.iadd(lst_4, lst_5)
print(lst_6)
print(lst_4)
Esempio n. 47
0
			def interp(ins):
				nonlocal stack, block_stack, ip, env

				if ins.opname == 'LOAD_CONST': push(ins.argval)
				elif ins.opname == 'LOAD_NAME': push(env[ins.argval]) # TODO: use locals or global
				elif ins.opname == 'STORE_NAME': env[ins.argval] = pop()
				elif ins.opname == 'DELETE_NAME': del env[ins.argval]
				elif ins.opname == 'LOAD_GLOBAL': push(env[ins.argval]) # TODO: global env
				elif ins.opname == 'LOAD_FAST': push(localenv[ins.argval])
				elif ins.opname == 'STORE_FAST': localenv[ins.argval] = pop()
				elif ins.opname == 'LOAD_ATTR': push(getattr(pop(), ins.argval))
				elif ins.opname == 'STORE_ATTR': setattr(pop(), ins.argval, pop())
				elif ins.opname == 'CALL_FUNCTION':
					# TODO: handle more than just positional arguments
					argc = ins.argval
					positional = argc & 0xFF
					args = [pop() for _ in range(positional)]
					args.reverse()
					log("args:", args)
					f = pop()
					push(f(*args))
				elif ins.opname == 'MAKE_FUNCTION':
					argc = ins.argval
					positional = argc & 0xFF
					name = pop()
					code = pop()
					default_args = [pop() for _ in range(positional)]
					log("make function:", name, positional, code)
					push(Function(name, positional, code, interpret))
				elif ins.opname == 'POP_TOP': pop()
				elif ins.opname == 'DUP_TOP': push(stack[-1])
				elif ins.opname == 'RETURN_VALUE': raise Return(pop())
				elif ins.opname == 'COMPARE_OP':
					opname = ins.argrepr
					rhs = pop()
					lhs = pop()
					push({'<': operator.lt, '>': operator.gt,
						  '==': operator.eq, '!=': operator.ne,
						  '<=': operator.le, '>=': operator.ge}[opname](lhs, rhs))
				elif ins.opname == 'UNARY_NOT': push(not pop())
				elif ins.opname == 'INPLACE_MULTIPLY': rhs = pop(); push(operator.imul(pop(), rhs))
				elif ins.opname == 'INPLACE_SUBTRACT': rhs = pop(); push(operator.isub(pop(), rhs))
				elif ins.opname == 'INPLACE_ADD': rhs = pop(); push(operator.iadd(pop(), rhs))
				elif ins.opname == 'BINARY_ADD': push(pop() + pop())
				elif ins.opname == 'BINARY_SUBTRACT': rhs = pop(); push(pop() - rhs)
				elif ins.opname == 'BINARY_MULTIPLY': rhs = pop(); push(pop() * rhs)
				elif ins.opname == 'BINARY_MODULO': rhs = pop(); push(pop() % rhs)
				elif ins.opname == 'BINARY_TRUE_DIVIDE': rhs = pop(); push(pop() / rhs)
				elif ins.opname == 'BINARY_OR': rhs = pop(); push(pop() or rhs)
				elif ins.opname == 'BINARY_SUBSCR': i = pop(); push(pop()[i])
				elif ins.opname == 'STORE_SUBSCR': i = pop(); lhs = pop(); lhs[i] = pop()
				elif ins.opname == 'STORE_MAP': k = pop(); v = pop(); stack[-1][k] = v
				elif ins.opname == 'UNPACK_SEQUENCE': stack.extend([x for x in reversed(pop())])
				elif ins.opname == 'LIST_APPEND': v = pop(); stack[-ins.argval].append(v)
				elif ins.opname == 'MAP_ADD': k = pop(); d = stack[-ins.argval-1]; d[k] = pop()
				elif ins.opname == 'BUILD_MAP': push({})
				elif ins.opname == 'BUILD_TUPLE':
					push(tuple(reversed([pop() for _ in range(ins.argval)])))
				elif ins.opname == 'BUILD_LIST':
					push(list(reversed([pop() for _ in range(ins.argval)])))
				elif ins.opname == 'BUILD_SLICE':
					argc = ins.argval
					if argc == 2: # x[i:]
						i = pop(); push(slice(pop(), i))
					elif argc == 3: # x[i:j]
						j = pop(); i = pop(); push(slice(pop(), i, j))
				elif ins.opname == 'SETUP_LOOP':
					# (start, end) indices
					block_stack.append((ip, indices[ins.argval]))
				elif ins.opname == 'POP_BLOCK': block_stack.pop()
				elif ins.opname == 'JUMP_ABSOLUTE':
					log("jmp to {0} ({1})".format(ins.argval, indices[ins.argval]))
					ip = indices[ins.argval]
				elif ins.opname == 'JUMP_FORWARD':
					log("jmp forward to {0} ({1})".format(ins.argval, indices[ins.argval]))
					ip = indices[ins.argval]
				elif ins.opname == 'POP_JUMP_IF_FALSE':
					log("jmpf to {0} ({1})".format(ins.argval, indices[ins.argval]))
					if not pop(): ip = indices[ins.argval]
				elif ins.opname == 'POP_JUMP_IF_TRUE':
					log("jmpt to {0} ({1})".format(ins.argval, indices[ins.argval]))
					if pop(): ip = indices[ins.argval]
				elif ins.opname == 'GET_ITER': push(iter(pop()))
				elif ins.opname == 'FOR_ITER':
					iterator = stack[-1]
					try: push(next(iterator))
					except StopIteration:
						pop()
						ip = indices[ins.argval]
				else:
					raise NotImplementedError("instruction: " + repr(ins))
Esempio n. 48
0
 def __iadd__(self, other):
     return operator.iadd(self._wrapped(), other)
Esempio n. 49
0
  
# importing operator to handle operator operations 
import operator 
  
# Initializing list 
a = [1, 2, 4, 5] 
  
# using add() to add the arguments passed  
z = operator.add(a,[1, 2, 3]) 
  
# printing the modified value 
print ("Value after adding using normal operator : ",end="") 
print (z) 
  
# printing value of first argument 
# value is unchanged 
print ("Value of first argument using normal operator : ",end="") 
print (a) 
  
# using iadd() to add the arguments passed  
# performs a+=[1, 2, 3] 
p = operator.iadd(a,[1, 2, 3]) 
  
# printing the modified value 
print ("Value after adding using Inplace operator : ",end="") 
print (p) 
  
# printing value of first argument 
# value is changed 
print ("Value of first argument using Inplace operator : ",end="") 
print (a) 
Esempio n. 50
0
 def accumulate(x, s, c):
     t = tuple(x)
     col = c.next()
     iadd(s[t], [col])
     return None
Esempio n. 51
0
    def test_iadd(self):
        import operator

        list = []
        assert operator.iadd(list, [1, 2]) is list
        assert list == [1, 2]
 def accumulate(x, s, c):
     t = tuple(x)
     col = next(c)
     iadd(s[t], [col])
     return None
Esempio n. 53
0
	Author : Bhavya
	Date : Sept-06-2017
	Purpose :To demonstrate between inplace and standard operators
"""

import operator

#For immutable objects
x = 1
y = 2
a = 1
b = 2

z = operator.add(x, y)  #add using normal operator

c = operator.iadd(a, b)  #add using inplace operator

print "Addition with normal operator", z
print "Addition with inplace operator", c

print "First argument with normal operator", x  #unchanged
print "First argument with inplace operator", a  #unchanged

#For mutable objects

d = [1, 2, 3]
e = operator.add(d, [4, 5, 6])

print "Addition with normal operator", e
print "First argument", d