Пример #1
1
def walk(obj, should_iter=should_iter):

    def _walk(iterator):
        # pro-tip: step *into* this next line to debug generators
        for obj in iterator:
            if should_iter(obj):
                stack.append(iter(obj))
                return
            yield obj
        # iterator is now exhausted
        stack.pop()

    if not should_iter(obj):
        yield iter([obj])
        return

    # our stack of iterators - this is how we walk
    stack = [iter(obj)]

    # keep yielding generators until the stack is empty
    while stack:
        gen = _walk(stack[-1])
        yield gen
        # we expect that the generator will have been
        # exhausted by the time we get here, but we make
        # sure it is because otherwise the walk won't stop!
        for _ in gen:
            pass
Пример #2
1
    def child_params(self):
        """
        :return: parameter values for our nested stack based on our properties
        """
        params = {}
        for pname, pval in iter(self.properties.props.items()):
            if not pval.implemented():
                continue

            val = self.properties[pname]
            if val is not None:
                # take a list and create a CommaDelimitedList
                if pval.type() == properties.Schema.LIST:
                    if len(val) == 0:
                        params[pname] = ""
                    elif isinstance(val[0], dict):
                        flattened = []
                        for (count, item) in enumerate(val):
                            for (ik, iv) in iter(item.items()):
                                mem_str = ".member.%d.%s=%s" % (count, ik, iv)
                                flattened.append(mem_str)
                        params[pname] = ",".join(flattened)
                    else:
                        params[pname] = ",".join(val)
                else:
                    # for MAP, the JSON param takes either a collection or
                    # string, so just pass it on and let the param validate
                    # as appropriate
                    params[pname] = val

        return params
Пример #3
1
    def _read_row_helper(self, chunks, expected_result):
        from google.cloud._testing import _Monkey
        from unit_tests._testing import _FakeStub
        from google.cloud.bigtable import table as MUT

        client = _Client()
        instance = _Instance(self.INSTANCE_NAME, client=client)
        table = self._make_one(self.TABLE_ID, instance)

        # Create request_pb
        request_pb = object()  # Returned by our mock.
        mock_created = []

        def mock_create_row_request(table_name, row_key, filter_):
            mock_created.append((table_name, row_key, filter_))
            return request_pb

        # Create response_iterator
        if chunks is None:
            response_iterator = iter(())  # no responses at all
        else:
            response_pb = _ReadRowsResponsePB(chunks=chunks)
            response_iterator = iter([response_pb])

        # Patch the stub used by the API method.
        client._data_stub = stub = _FakeStub(response_iterator)

        # Perform the method and check the result.
        filter_obj = object()
        with _Monkey(MUT, _create_row_request=mock_create_row_request):
            result = table.read_row(self.ROW_KEY, filter_=filter_obj)

        self.assertEqual(result, expected_result)
        self.assertEqual(stub.method_calls, [("ReadRows", (request_pb,), {})])
        self.assertEqual(mock_created, [(table.name, self.ROW_KEY, filter_obj)])
Пример #4
1
 def _load_kv_partitions(partition):
     """Convert a partition where each row is key/value data."""
     partitionList = list(partition)
     if len(partitionList) > 0:
         return iter([pandas.DataFrame(data=partitionList)])
     else:
         return iter([])
Пример #5
1
    def test_mutators_against_iter(self):
        # testing a set modified against an iterator
        o = util.OrderedSet([3, 2, 4, 5])

        eq_(o.difference(iter([3, 4])), util.OrderedSet([2, 5]))
        eq_(o.intersection(iter([3, 4, 6])), util.OrderedSet([3, 4]))
        eq_(o.union(iter([3, 4, 6])), util.OrderedSet([2, 3, 4, 5, 6]))
Пример #6
1
def test_fromdb_mkcursor():

    # initial data
    data = (("a", 1), ("b", 2), ("c", 2.0))
    connection = sqlite3.connect(":memory:")
    c = connection.cursor()
    c.execute("create table foobar (foo, bar)")
    for row in data:
        c.execute("insert into foobar values (?, ?)", row)
    connection.commit()
    c.close()

    # test the function
    mkcursor = lambda: connection.cursor()
    actual = fromdb(mkcursor, "select * from foobar")
    expect = (("foo", "bar"), ("a", 1), ("b", 2), ("c", 2.0))
    ieq(expect, actual)
    ieq(expect, actual)  # verify can iterate twice

    # test iterators are isolated
    i1 = iter(actual)
    i2 = iter(actual)
    eq_(("foo", "bar"), i1.next())
    eq_(("a", 1), i1.next())
    eq_(("foo", "bar"), i2.next())
    eq_(("b", 2), i1.next())
Пример #7
1
def init_word_tables(ftable1, ftable2):
    fileptr = 0
    # unique = 0
    temp_train_emails = train_emails.split("\n")
    # for each training document for spam
    for i in range(0, 350):
        doc = temp_train_emails[i]
        doc = doc.split(" ")
        # skip first entry (becasue we already know class)
        iterdoc = iter(doc)
        next(iterdoc)
        for word in iterdoc:
            word = word.split(":")
            # print(word)
            if word[0] not in ftable1:
                ftable1[word[0]] = int(word[1])
            else:
                ftable1[word[0]] = ftable1[word[0]] + int(word[1])

                # do the same for nonspam
    for i in range(350, 700):
        doc = temp_train_emails[i]
        doc = doc.split(" ")
        # skip first entry (becasue we already know class)
        iterdoc = iter(doc)
        next(iterdoc)
        for word in iterdoc:
            word = word.split(":")
            # print(word)
            if word[0] not in ftable2:
                # unique = unique+1
                ftable2[word[0]] = int(word[1])
            else:
                ftable2[word[0]] = ftable2[word[0]] + int(word[1])
Пример #8
0
    def test_07_version_requirements(self):
        self.get_depspecs()
        vrc = self.pds.version_requirements

        self.assertEquals(len(list(vrc)), 1)
        self.assertEquals(iter(vrc).next().version_spec, VersionSpec("1"))
        self.assertEquals(iter(vrc).next().version_operator.value, VersionOperator(">=").value)
Пример #9
0
def DFS_search(G):
    """
    Generate sequence of triples (v,w,edgetype) for DFS of graph G.
    The subsequence for each root of each tree in the DFS forest starts
    with (root,root,forward) and ends with (root,root,reverse).
    If the initial vertex is given, it is used as the root and vertices
    not reachable from it are not searched.
    """
    visited = set()
    initials = G

    for v in initials:
        if v not in visited:
            yield v, v, forward
            visited.add(v)
            stack = [(v, iter(G[v]))]
            while stack:
                parent, children = stack[-1]
                try:
                    child = next(children)
                    if child in visited:
                        yield parent, child, nontree
                    else:
                        yield parent, child, forward
                        visited.add(child)
                        stack.append((child, iter(G[child])))
                except StopIteration:
                    stack.pop()
                    if stack:
                        yield stack[-1][0], parent, reverse

            yield v, v, reverse
Пример #10
0
    def make_barcode(self, tiles, refs=None, overhangs=None, code=('<' + 'rstsro'*5 + 'rstsr>')):
        """Make a single barcode from given tiles according to code. Tokens are:
        r = ref
        t = tile
        o = overhang
        s = spacer (e.g., AA)
        <, > = left- and right-hand sides

        :param tiles:
        :param refs:
        :param overhangs:
        :param code:
        :return:
        """
        if refs is None:
            refs = self.refs.set_index('name').loc[self.ref_order, 'sequence']
        overhangs = self.overhangs if overhangs is None else overhangs

        tokens = {'r': iter(refs),
                  't': iter(tiles),
                  'o': iter([rc(ov.upper()) for ov in overhangs]),
                  's': cycle([spacer]),
                  '<': cycle([rc(LHS)]),
                  '>': cycle([rc(RHS)])}

        return ''.join([rc(tokens[c].next()) for c in code])
Пример #11
0
    def test_overview(self):
        r = self.get_view_response("stats.overview_series", group="day", format="json")
        eq_(r.status_code, 200)
        # These are the dates from the fixtures. The return value will have
        # dates in between filled with zeroes.
        expected_data = [
            {"date": "2009-09-03", "data": {"downloads": 10, "updates": 0}},
            {"date": "2009-08-03", "data": {"downloads": 10, "updates": 0}},
            {"date": "2009-07-03", "data": {"downloads": 10, "updates": 0}},
            {"date": "2009-06-28", "data": {"downloads": 10, "updates": 0}},
            {"date": "2009-06-20", "data": {"downloads": 10, "updates": 0}},
            {"date": "2009-06-12", "data": {"downloads": 10, "updates": 0}},
            {"date": "2009-06-07", "data": {"downloads": 10, "updates": 0}},
            {"date": "2009-06-02", "data": {"downloads": 0, "updates": 1500}},
            {"date": "2009-06-01", "data": {"downloads": 10, "updates": 1000}},
        ]
        actual_data = json.loads(r.content)
        # Make sure they match up at the front and back.
        eq_(actual_data[0]["date"], expected_data[0]["date"])
        eq_(actual_data[-1]["date"], expected_data[-1]["date"])
        end_date = expected_data[-1]["date"]

        expected, actual = iter(expected_data), iter(actual_data)
        next_expected, next_actual = next(expected), next(actual)
        while 1:
            if next_expected["date"] == next_actual["date"]:
                # If they match it's a date we have data for.
                self.assertDictEqual(next_expected, next_actual)
                if next_expected["date"] == end_date:
                    break
                next_expected, next_actual = next(expected), next(actual)
            else:
                # Otherwise just check that the data is zeroes.
                self.assertDictEqual(next_actual["data"], {"downloads": 0, "updates": 0})
                next_actual = next(actual)
Пример #12
0
    def test_sync_request_fail_key(self):
        self.test_auth.app = FakeApp(iter([("204 No Content", {}, "")]), sync_key="secret")
        req = self._make_request(
            "/v1/AUTH_cfa/c/o",
            environ={"REQUEST_METHOD": "DELETE"},
            headers={"x-container-sync-key": "wrongsecret", "x-timestamp": "123.456"},
        )
        req.remote_addr = "127.0.0.1"
        resp = req.get_response(self.test_auth)
        self.assertEquals(resp.status_int, 401)

        self.test_auth.app = FakeApp(iter([("204 No Content", {}, "")]), sync_key="othersecret")
        req = self._make_request(
            "/v1/AUTH_cfa/c/o",
            environ={"REQUEST_METHOD": "DELETE"},
            headers={"x-container-sync-key": "secret", "x-timestamp": "123.456"},
        )
        req.remote_addr = "127.0.0.1"
        resp = req.get_response(self.test_auth)
        self.assertEquals(resp.status_int, 401)

        self.test_auth.app = FakeApp(iter([("204 No Content", {}, "")]), sync_key=None)
        req = self._make_request(
            "/v1/AUTH_cfa/c/o",
            environ={"REQUEST_METHOD": "DELETE"},
            headers={"x-container-sync-key": "secret", "x-timestamp": "123.456"},
        )
        req.remote_addr = "127.0.0.1"
        resp = req.get_response(self.test_auth)
        self.assertEquals(resp.status_int, 401)
Пример #13
0
    def setStatus(self, status, elementIDs=None, SubscriptionId=None, WorkflowName=None):
        """
        _setStatus_, throws an exception if no elements are updated

        """
        try:
            if not elementIDs:
                elementIDs = []
            iter(elementIDs)
            if type(elementIDs) in types.StringTypes:
                raise TypeError
        except TypeError:
            elementIDs = [elementIDs]

        if status == "Canceled":  # Cancel needs special actions
            return self.cancelWork(elementIDs, SubscriptionId, WorkflowName)

        args = {}
        if SubscriptionId:
            args["SubscriptionId"] = SubscriptionId
        if WorkflowName:
            args["RequestName"] = WorkflowName

        affected = self.backend.getElements(elementIDs=elementIDs, **args)
        if not affected:
            raise WorkQueueNoMatchingElements, "No matching elements"

        for x in affected:
            x["Status"] = status
        elements = self.backend.saveElements(*affected)
        if len(affected) != len(elements):
            raise RuntimeError, "Some elements not updated, see log for details"

        return elements
Пример #14
0
 def test_no_len_on_dict_iter(self):
     iterable = {1: 2, 3: 4}
     raises(TypeError, len, iter(iterable))
     iterable = {"1": 2, "3": 4}
     raises(TypeError, len, iter(iterable))
     iterable = {}
     raises(TypeError, len, iter(iterable))
Пример #15
0
 def __parse_gccxml_created_file(self, gccxml_file):
     scanner_ = scanner_t(gccxml_file, self.__decl_factory)
     scanner_.read()
     decls = scanner_.declarations()
     types = scanner_.types()
     files = {}
     for file_id, file_path in scanner_.files().items():
         files[file_id] = self.__produce_full_file(file_path)
     linker_ = linker.linker_t(
         decls=decls, types=types, access=scanner_.access(), membership=scanner_.members(), files=files
     )
     for type_ in list(types.values()):
         # I need this copy because internaly linker change types collection
         linker_.instance = type_
         apply_visitor(linker_, type_)
     for decl in decls.values():
         linker_.instance = decl
         apply_visitor(linker_, decl)
     bind_aliases(iter(decls.values()))
     # some times gccxml report typedefs defined in no namespace
     # it happens for example in next situation
     # template< typename X>
     # void ddd(){ typedef typename X::Y YY;}
     # if I will fail on this bug next time, the right way to fix it may be
     # different
     patcher.fix_calldef_decls(scanner_.calldefs(), scanner_.enums())
     decls = [inst for inst in iter(decls.values()) if isinstance(inst, namespace_t) and not inst.parent]
     return (decls, list(files.values()))
Пример #16
0
 def url_patterns(self):
     patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
     try:
         iter(patterns)
     except TypeError:
         raise ImproperlyConfigured("The included urlconf %s doesn't have any patterns in it" % self.urlconf_name)
     return patterns
Пример #17
0
def testGatherData():
    """Test GatherData with generated data"""

    test_vals = norm(loc=4, scale=0.6).rvs(800)

    simple_col = map(str, test_vals)

    out_data = GatherData(iter(simple_col))
    # need to use "almost_equal" since conterting to and from strings causes
    # some rounding related data-loss
    N.testing.assert_array_almost_equal(out_data, test_vals, decimal=1, err_msg="Did not parse simple file data")

    multi_col = "\tsomejujnk\n".join(simple_col)
    out_data = GatherData(iter(multi_col.split("\n")))
    N.testing.assert_array_almost_equal(out_data, test_vals, decimal=1, err_msg="Did not parse when in first-col")

    mk_str = lambda x: "453.12\t%s\t279\n" % x
    multi_col2 = map(mk_str, simple_col)
    out_data = GatherData(iter(multi_col2), COL=1)
    N.testing.assert_array_almost_equal(out_data, test_vals, decimal=1, err_msg="Did not parse when given COL kwarg")

    out_data = GatherData(iter(simple_col + ["lkdfhsd"]))
    N.testing.assert_array_almost_equal(out_data, test_vals, decimal=1, err_msg="Did not parse when given junk-data")

    nose.tools.assert_raises(ValueError, GatherData, iter(simple_col + ["lkdfhsd"]), SKIP_JUNK=False)
Пример #18
0
def non_string_iterable(obj):
    try:
        iter(obj)
    except TypeError:
        return False
    else:
        return not isinstance(obj, basestring)
Пример #19
0
def test_count():
    assert count((1, 2, 3)) == 3
    assert count([]) == 0
    assert count(iter((1, 2, 3, 4))) == 4

    assert count("hello") == 5
    assert count(iter("hello")) == 5
Пример #20
0
 def __reduce__(self):
     # FIXME: This does actually not feel right: We have to use the DataSet
     # method here, although we inherit from sequential dataset.
     _, _, state, _, _ = DataSet.__reduce__(self)
     creator = self.__class__
     args = self.statedim, self.actiondim
     return creator, args, state, iter([]), iter({})
Пример #21
0
 def loadFromKeyRow(partition):
     pll = list(partition)
     if len(pll) > 0:
         index, data = zip(*pll)
         return iter([pandas.DataFrame(list(data), columns=mycols, index=index)])
     else:
         return iter([])
Пример #22
0
def addChanges(remote, changei, src="git"):
    logging.debug("addChanges %s, %s" % (repr(remote), repr(changei)))

    def addChange(c):
        logging.info("New revision: %s" % c["revision"][:8])
        for key, value in c.iteritems():
            logging.debug("  %s: %s" % (key, value))

        d = remote.callRemote("addChange", c, src=src)
        return d

    finished_d = defer.Deferred()

    def iter():
        try:
            c = changei.next()
            d = addChange(c, src)
            # handle successful completion by re-iterating, but not immediately
            # as that will blow out the Python stack
            def cb(_):
                reactor.callLater(0, iter)

            d.addCallback(cb)
            # and pass errors along to the outer deferred
            d.addErrback(finished_d.errback)
        except StopIteration:
            remote.broker.transport.loseConnection()
            finished_d.callback(None)

    iter()
    return finished_d
Пример #23
0
def complexContractions():
    print([(x, y) for x in range(3) for y in range(5)])

    seq = range(3)
    res = [(i, j, k) for i in iter(seq) for j in iter(seq) for k in iter(seq)]

    print(res)
Пример #24
0
def test_records():
    table = (("foo", "bar"), ("a", 1), ("b", 2), ("c", 3))
    actual = records(table)
    # access items
    it = iter(actual)
    o = next(it)
    eq_("a", o["foo"])
    eq_(1, o["bar"])
    o = next(it)
    eq_("b", o["foo"])
    eq_(2, o["bar"])
    # access attributes
    it = iter(actual)
    o = next(it)
    eq_("a", o.foo)
    eq_(1, o.bar)
    o = next(it)
    eq_("b", o.foo)
    eq_(2, o.bar)
    # access with get() method
    it = iter(actual)
    o = next(it)
    eq_("a", o.get("foo"))
    eq_(1, o.get("bar"))
    eq_(None, o.get("baz"))
    eq_("qux", o.get("baz", default="qux"))
Пример #25
0
def test_eudx():

    # read bvals,gradients and data
    fimg, fbvals, fbvecs = get_data("small_64D")
    bvals = np.load(fbvals)
    gradients = np.load(fbvecs)
    img = ni.load(fimg)
    data = img.get_data()

    print(data.shape)
    gqs = GeneralizedQSampling(data, bvals, gradients)
    ten = Tensor(data, bvals, gradients, thresh=50)
    seed_list = np.dot(np.diag(np.arange(10)), np.ones((10, 3)))
    iT = iter(EuDX(gqs.qa(), gqs.ind(), seeds=seed_list))
    T = []
    for t in iT:
        T.append(t)
    iT2 = iter(EuDX(ten.fa(), ten.ind(), seeds=seed_list))
    T2 = []
    for t in iT2:
        T2.append(t)

    print("length T ", sum([length(t) for t in T]))
    print("length T2", sum([length(t) for t in T2]))

    print(gqs.QA[1, 4, 8, 0])
    print(gqs.QA.ravel()[ndarray_offset(np.array([1, 4, 8, 0]), np.array(gqs.QA.strides), 4, 8)])

    assert_almost_equal(
        gqs.QA[1, 4, 8, 0], gqs.QA.ravel()[ndarray_offset(np.array([1, 4, 8, 0]), np.array(gqs.QA.strides), 4, 8)]
    )

    assert_almost_equal(sum([length(t) for t in T]), 70.999996185302734, places=3)
    assert_almost_equal(sum([length(t) for t in T2]), 56.999997615814209, places=3)
Пример #26
0
def _is_iterable(value):
    try:
        iter(value)
    except TypeError:
        return False
    else:
        return True
 def iter(start, end):
     if end - start <= 1:
         return 0
     mid = (start + end) / 2
     count = iter(start, mid) + iter(mid, end)
     cache = [0] * (end - start)
     j = k = t = mid
     r = 0
     for i in xrange(start, mid):
         # j: First index that sum[i, j] >= lower
         # k: First index that sum[i, k] >  upper
         # This is merge sort, so no available value after k,
         # and we use t/cache to sort it.
         while j < end and sum[j] - sum[i] < lower:
             j += 1
         while k < end and sum[k] - sum[i] <= upper:
             k += 1
         while t < end and sum[t] < sum[i]:
             cache[r] = sum[t]
             t += 1
             r += 1
         cache[r] = sum[i]
         r += 1
         count += k - j
     for i in xrange(r):
         sum[start + i] = cache[i]
     return count
Пример #28
0
 def add(self, entity):
     """Add an entity to the agent"""
     self.log(LOG_DEBUG, "Add entity: %s" % entity)
     entity.validate()  # Fill in defaults etc.
     # Validate in the context of the existing entities for uniqueness
     self.schema.validate_full(chain(iter([entity]), iter(self.entities)))
     self.entities.append(entity)
Пример #29
0
def test_iterable(value):
    """Check if it's possible to iterate over an object."""
    try:
        iter(value)
    except TypeError:
        return False
    return True
Пример #30
0
def iterable(obj):
    try:
        iter(obj)
    except TypeError:
        return False

    return True