Example #1
0
def scan_field_id(token, state, stream, lexdata):
    # When a candidate is found, do as follows:
    # - save the candidate
    # - eat any whitespace
    # - if next is colon, candidate is an identifier, emit both
    # identifier and colon
    candidate = token
    token = stream.peek()
    if token.type == "WS":
        token = stream.peek()
    if token.type == "COLON":
        # We do have a identifier, so replace WORD token by the
        # right keyword token
        candidate = _new_token(META_FIELDS_ID[candidate.value], candidate)

    try:
        field_type = FIELD_TYPE[candidate.type]
    except KeyError:
        data = lexdata.splitlines()
        msg = ["Error while tokenizing %r (missing colon ?)" %  candidate.value]
        msg += ["    Line %d -> %r" % (candidate.lineno, data[candidate.lineno-1])]
        raise SyntaxError("\n".join(msg))
    try:
        state = _FIELD_TYPE_TO_STATE[field_type]
    except KeyError:
        raise ValueError("Unknown state transition for type %s" % field_type)

    queue = [candidate]
    queue.append(six.advance_iterator(stream))
    nxt = six.advance_iterator(stream)
    return queue, nxt, state
Example #2
0
 def __delitem__(self, key):
     tmp = self._slice.duplicate()
     op = tmp._call_stack[-1][0]
     if op == _IndexedComponent_slice.get_item:
         # If the last attribute of the slice gets an item,
         # change it to delete the item
         tmp._call_stack[-1] = (
             _IndexedComponent_slice.del_item,
             tmp._call_stack[-1][1] )
     elif op == _IndexedComponent_slice.slice_info:
         assert len(tmp._call_stack) == 1
         _iter = self._get_iter(tmp, key)
         try:
             advance_iterator(_iter)
             del _iter._iter_stack[0].component[_iter.get_last_index()]
             return
         except StopIteration:
             raise KeyError("KeyError: %s" % (key,))
     elif op == _IndexedComponent_slice.get_attribute:
         # If the last attribute of the slice retrieves an attribute,
         # change it to delete the attribute
         tmp._call_stack[-1] = (
             _IndexedComponent_slice.del_attribute,
             tmp._call_stack[-1][1] )
     else:
         raise DeveloperError(
             "Unexpected slice _call_stack operation: %s" % op)
     try:
         advance_iterator(self._get_iter(tmp, key))
     except StopIteration:
         pass
Example #3
0
    def run_concurrent_tests(self, input_iter):
        """Run tests concurrently."""

        it = iter(input_iter)
        numtests = 0
        try:
            for proc in self.procs:
                self.task_queue.put(advance_iterator(it))
                numtests += 1
        except StopIteration:
            pass
        else:
            try:
                while numtests:
                    result = self.done_queue.get()
                    yield result
                    numtests -= 1
                    if self.stop and result.status == 'FAIL':
                        break
                    self.task_queue.put(advance_iterator(it))
                    numtests += 1
            except StopIteration:
                pass

        for proc in self.procs:
            self.task_queue.put('STOP')

        for i in range(numtests):
            yield self.done_queue.get()

        for proc in self.procs:
            proc.join()
Example #4
0
 def _iter(self):
     rlist = []
     self._rdate.sort()
     self._genitem(rlist, iter(self._rdate))
     for gen in [iter(x) for x in self._rrule]:
         self._genitem(rlist, gen)
     rlist.sort()
     exlist = []
     self._exdate.sort()
     self._genitem(exlist, iter(self._exdate))
     for gen in [iter(x) for x in self._exrule]:
         self._genitem(exlist, gen)
     exlist.sort()
     lastdt = None
     total = 0
     while rlist:
         ritem = rlist[0]
         if not lastdt or lastdt != ritem.dt:
             while exlist and exlist[0] < ritem:
                 advance_iterator(exlist[0])
                 exlist.sort()
             if not exlist or ritem != exlist[0]:
                 total += 1
                 yield ritem.dt
             lastdt = ritem.dt
         advance_iterator(ritem)
         rlist.sort()
     self._len = total
Example #5
0
 def __setitem__(self, key, val):
     tmp = self._slice.duplicate()
     op = tmp._call_stack[-1][0]
     if op == _IndexedComponent_slice.get_item:
         tmp._call_stack[-1] = (
             _IndexedComponent_slice.set_item,
             tmp._call_stack[-1][1],
             val )
     elif op == _IndexedComponent_slice.slice_info:
         tmp._call_stack[-1] = (
             _IndexedComponent_slice.set_item,
             tmp._call_stack[-1][1],
             val )
     elif op == _IndexedComponent_slice.get_attribute:
         tmp._call_stack[-1] = (
             _IndexedComponent_slice.set_attribute,
             tmp._call_stack[-1][1],
             val )
     else:
         raise DeveloperError(
             "Unexpected slice _call_stack operation: %s" % op)
     try:
         advance_iterator(self._get_iter(tmp, key, get_if_not_present=True))
     except StopIteration:
         pass
Example #6
0
 def test_imap_unordered(self):
     p = pool.Pool(1)
     it = p.imap_unordered(divide_by, [1, 0, 2])
     self.assertEqual(six.advance_iterator(it), 1.0)
     self.assertRaises(ZeroDivisionError, six.advance_iterator, it)
     self.assertEqual(six.advance_iterator(it), 0.5)
     self.assertRaises(StopIteration, six.advance_iterator, it)
Example #7
0
 def test_imap_unordered_gc(self):
     it = self.pool.imap_unordered(sqr, range(10))
     result = []
     for i in range(10):
         result.append(six.advance_iterator(it))
         gc.collect()
     self.assertRaises(StopIteration, lambda: six.advance_iterator(it))
     self.assertEqual(sorted(result), [x * x for x in range(10)])
    def test_iter_one_item(self):
        self.r.request = Mock()
        self.r.request.return_value = Mock(), {'meta': {'key': 'foos', 'next_page_url': None}, 'foos': [{'sid': '123'}]}

        items = self.r.iter()
        advance_iterator(items)

        self.assertRaises(StopIteration, advance_iterator, items)
    def test_iter_one_item(self):
        self.r.request = Mock()
        self.r.request.return_value = Mock(), {"meta": {"key": "foos", "next_page_url": None}, "foos": [{"sid": "123"}]}

        items = self.r.iter()
        advance_iterator(items)

        self.assertRaises(StopIteration, advance_iterator, items)
    def testIterOneItem(self):
        self.r.request = Mock()
        self.r.request.return_value = Mock(), {self.r.key: [{'sid': 'foo'}]}

        items = self.r.iter()
        advance_iterator(items)

        self.assertRaises(StopIteration, advance_iterator, items)
    def testIterOneItem(self):
        self.r.request = Mock()
        self.r.request.return_value = Mock(), {self.r.key: [{'sid': 'foo'}]}

        items = self.r.iter()
        advance_iterator(items)

        with self.assertRaises(StopIteration):
            advance_iterator(items)
Example #12
0
def test_iterator():
    class myiter(six.Iterator):
        def __next__(self):
            return 13
    assert six.advance_iterator(myiter()) == 13
    class myitersub(myiter):
        def __next__(self):
            return 14
    assert six.advance_iterator(myitersub()) == 14
Example #13
0
 def popitem(self, last=True):
     if not self:
         raise KeyError('dictionary is empty')
     if last:
         key = six.advance_iterator(reversed(self))
     else:
         key = six.advance_iterator(iter(self))
     value = self.pop(key)
     return key, value
Example #14
0
def test_iterator():
    class myiter(six.Iterator):
        def __next__(self):
            return 13
    assert six.advance_iterator(myiter()) == 13
    class myitersub(myiter):
        def __next__(self):
            return 14
    assert six.advance_iterator(myitersub()) == 14
Example #15
0
    def sample(self, cursor):
        """Extract records randomly from the database.
        Continue until the target proportion of the items have been
        extracted, or until `min_items` if this is larger.
        If `max_items` is non-negative, do not extract more than these.

        This function is a generator, yielding items incrementally.

        :param cursor: Cursor to sample
        :type cursor: pymongo.cursor.Cursor
        :return: yields each item
        :rtype: dict
        :raise: ValueError, if max_items is valid and less than `min_items`
                or if target collection is empty
        """
        count = cursor.count()

        # special case: empty collection
        if count == 0:
            self._empty = True
            raise ValueError("Empty collection")

        # special case: entire collection
        if self.p >= 1 and self.max_items <= 0:
            for item in cursor:
                yield item
            return

        # calculate target number of items to select
        if self.max_items <= 0:
            n_target = max(self.min_items, self.p * count)
        else:
            if self.p <= 0:
                n_target = max(self.min_items, self.max_items)
            else:
                n_target = max(self.min_items,
                               min(self.max_items, self.p * count))
        if n_target == 0:
            raise ValueError("No items requested")

        # select first `n_target` items that pop up with
        # probability self.p
        # This is actually biased to items at the beginning
        # of the file if n_target is smaller than (p * count),
        n = 0
        while n < n_target:
            try:
                item = six.advance_iterator(cursor)
            except StopIteration:
                # need to keep looping through data until
                # we get all our items!
                cursor.rewind()
                item = six.advance_iterator(cursor)
            if self._keep():
                yield item
                n += 1
Example #16
0
    def test_single_message_list_construction(self):
        ev = make_error_value_from_msgs(self.error_def, *self.single_message)

        self.assertEqual(self.error_name, ev.name)
        messages = ev.get_field('messages')
        self.assertEqual(len(self.single_message), len(messages))
        it = iter(messages)
        self.assertEqual(_make_struct_value_from_message(self.message1),
                         six.advance_iterator(it))
        self.assertRaises(StopIteration, lambda it: six.advance_iterator(it), it)
Example #17
0
    def sample(self, cursor):
        """Extract records randomly from the database.
        Continue until the target proportion of the items have been
        extracted, or until `min_items` if this is larger.
        If `max_items` is non-negative, do not extract more than these.

        This function is a generator, yielding items incrementally.

        :param cursor: Cursor to sample
        :type cursor: pymongo.cursor.Cursor
        :return: yields each item
        :rtype: dict
        :raise: ValueError, if max_items is valid and less than `min_items`
                or if target collection is empty
        """
        count = cursor.count()

        # special case: empty collection
        if count == 0:
            self._empty = True
            raise ValueError("Empty collection")

        # special case: entire collection
        if self.p >= 1 and self.max_items <= 0:
            for item in cursor:
                yield item
            return

        # calculate target number of items to select
        if self.max_items <= 0:
            n_target = max(self.min_items, self.p * count)
        else:
            if self.p <= 0:
                n_target = max(self.min_items, self.max_items)
            else:
                n_target = max(self.min_items, min(self.max_items, self.p * count))
        if n_target == 0:
            raise ValueError("No items requested")

        # select first `n_target` items that pop up with
        # probability self.p
        # This is actually biased to items at the beginning
        # of the file if n_target is smaller than (p * count),
        n = 0
        while n < n_target:
            try:
                item = six.advance_iterator(cursor)
            except StopIteration:
                # need to keep looping through data until
                # we get all our items!
                cursor.rewind()
                item = six.advance_iterator(cursor)
            if self._keep():
                yield item
                n += 1
Example #18
0
def _skip_ws(tok, stream, state, internal):
    while tok.type  in ["NEWLINE", "WS"]:
        if tok.type == "NEWLINE" and len(internal.words_stack) == 0:
            nxt = stream.peek()
            if not nxt.type == "INDENT":
                state = "SCANNING_FIELD_ID"
            else:
                tok = six.advance_iterator(stream)
            return tok, state
        tok = six.advance_iterator(stream)
    return tok, state
Example #19
0
def _skip_ws(tok, stream, state, internal):
    while tok.type in ["NEWLINE", "WS"]:
        if tok.type == "NEWLINE" and len(internal.words_stack) == 0:
            nxt = stream.peek()
            if not nxt.type == "INDENT":
                state = "SCANNING_FIELD_ID"
            else:
                tok = six.advance_iterator(stream)
            return tok, state
        tok = six.advance_iterator(stream)
    return tok, state
Example #20
0
 def checkIteratorParallel(self):
     self._dostore()
     self._dostore()
     iter1 = self._storage.iterator()
     iter2 = self._storage.iterator()
     txn_info1 = six.advance_iterator(iter1)
     txn_info2 = six.advance_iterator(iter2)
     self.assertEqual(txn_info1.tid, txn_info2.tid)
     txn_info1 = six.advance_iterator(iter1)
     txn_info2 = six.advance_iterator(iter2)
     self.assertEqual(txn_info1.tid, txn_info2.tid)
     self.assertRaises(StopIteration, next, iter1)
     self.assertRaises(StopIteration, next, iter2)
Example #21
0
    def test_imap(self):
        it = self.pool.imap(sqr, range(10))
        self.assertEqual(list(it), list(map(squared, range(10))))

        it = self.pool.imap(sqr, range(10))
        for i in range(10):
            self.assertEqual(six.advance_iterator(it), i * i)
        self.assertRaises(StopIteration, lambda: six.advance_iterator(it))

        it = self.pool.imap(sqr, range(1000))
        for i in range(1000):
            self.assertEqual(six.advance_iterator(it), i * i)
        self.assertRaises(StopIteration, lambda: six.advance_iterator(it))
Example #22
0
 def checkIteratorParallel(self):
     self._dostore()
     self._dostore()
     iter1 = self._storage.iterator()
     iter2 = self._storage.iterator()
     txn_info1 = six.advance_iterator(iter1)
     txn_info2 = six.advance_iterator(iter2)
     self.assertEquals(txn_info1.tid, txn_info2.tid)
     txn_info1 = six.advance_iterator(iter1)
     txn_info2 = six.advance_iterator(iter2)
     self.assertEquals(txn_info1.tid, txn_info2.tid)
     self.assertRaises(StopIteration, next, iter1)
     self.assertRaises(StopIteration, next, iter2)
Example #23
0
def skip_until_eol(stream, t):
    try:
        prev = stream.previous()
    except ValueError:
        prev = None
    while t.type != "NEWLINE":
        t = six.advance_iterator(stream)
    # FIXME: ideally, we would like to remove EOL for comments which span the
    # full line, but we need access to the token before the comment delimiter
    # to do so, as we don't want to remove EOL for inline commeng (e.g. 'foo #
    # comment')
    if prev and t.type == "NEWLINE" and prev.type in ('NEWLINE', 'INDENT'):
        t = six.advance_iterator(stream)
    return t
Example #24
0
def skip_until_eol(stream, t):
    try:
        prev = stream.previous()
    except ValueError:
        prev = None
    while t.type != "NEWLINE":
        t = six.advance_iterator(stream)
    # FIXME: ideally, we would like to remove EOL for comments which span the
    # full line, but we need access to the token before the comment delimiter
    # to do so, as we don't want to remove EOL for inline commeng (e.g. 'foo #
    # comment')
    if prev and t.type == "NEWLINE" and prev.type in ('NEWLINE', 'INDENT'):
        t = six.advance_iterator(stream)
    return t
Example #25
0
 def __contains__(self, key):
     try:
         advance_iterator(self._get_iter(self._slice, key))
         return True
     except (StopIteration, KeyError):
         return False
     except SliceEllipsisLookupError:
         if type(key) is tuple and len(key) == 1:
             key = key[0]
         # Brute force (linear time) lookup
         _iter = iter(self._slice)
         for item in _iter:
             if _iter.get_last_index_wildcards() == key:
                 return True
         return False
Example #26
0
    def checkIteratorGCStorageTPCAborting(self):
        # The odd little jig we do below arises from the fact that the
        # CS iterator may not be constructed right away if the CS is wrapped.
        # We need to actually do some iteration to get the iterator created.
        # We do a store to make sure the iterator isn't exhausted right away.
        self._dostore()
        six.advance_iterator(self._storage.iterator())

        iid = list(self._storage._iterator_ids)[0]

        t = transaction.Transaction()
        self._storage.tpc_begin(t)
        self._storage.tpc_abort(t)
        self.assertEquals(0, len(self._storage._iterator_ids))
        self.assertRaises(KeyError, self._storage._server.iterator_next, iid)
Example #27
0
    def checkIteratorGCStorageTPCAborting(self):
        # The odd little jig we do below arises from the fact that the
        # CS iterator may not be constructed right away if the CS is wrapped.
        # We need to actually do some iteration to get the iterator created.
        # We do a store to make sure the iterator isn't exhausted right away.
        self._dostore()
        six.advance_iterator(self._storage.iterator())

        iid = list(self._storage._iterator_ids)[0]

        t = transaction.Transaction()
        self._storage.tpc_begin(t)
        self._storage.tpc_abort(t)
        self.assertEquals(0, len(self._storage._iterator_ids))
        self.assertRaises(KeyError, self._storage._server.iterator_next, iid)
Example #28
0
def test_PushbackAdapter():
    it = PushbackAdapter(iter([1, 2, 3, 4]))
    assert it.has_more()
    assert six.advance_iterator(it) == 1
    it.push_back(0)
    assert six.advance_iterator(it) == 0
    assert six.advance_iterator(it) == 2
    assert it.peek() == 3
    it.push_back(10)
    assert it.peek() == 10
    it.push_back(20)
    assert it.peek() == 20
    assert it.has_more()
    assert list(it) == [20, 10, 3, 4]
    assert not it.has_more()
Example #29
0
 def create(cls, doc, sheet_name, root_topic_name):
     """
     Create new sheet. Usually not used directly,
     use ``XMindDocument.create_sheet`` instead.
     """
     sheet_tag = doc.create_child(doc.doc_tag,
                                  "sheet",
                                  id=six.advance_iterator(_id_gen))
     sheet = Sheet(doc, sheet_tag)
     sheet.set_title(sheet_name)
     topic_tag = doc.create_child(sheet_tag,
                                  "topic",
                                  id=six.advance_iterator(_id_gen))
     doc.create_child(topic_tag, "title").text = root_topic_name
     return sheet
Example #30
0
    def checkIteratorExhaustionStorage(self):
        # Test the storage's garbage collection mechanism.
        self._dostore()
        iterator = self._storage.iterator()

        # At this point, a wrapping iterator might not have called the CS
        # iterator yet. We'll consume one item to make sure this happens.
        six.advance_iterator(iterator)
        self.assertEqual(1, len(self._storage._iterator_ids))
        iid = list(self._storage._iterator_ids)[0]
        self.assertEqual([], list(iterator))
        self.assertEqual(0, len(self._storage._iterator_ids))

        # The iterator has run through, so the server has already disposed it.
        self.assertRaises(KeyError, self._storage._call, 'iterator_next', iid)
Example #31
0
 def __contains__(self, key):
     try:
         advance_iterator(self._get_iter(self._slice, key))
         return True
     except (StopIteration, KeyError):
         return False
     except SliceEllipsisLookupError:
         if type(key) is tuple and len(key) == 1:
             key = key[0]
         # Brute force (linear time) lookup
         _iter = iter(self._slice)
         for item in _iter:
             if _iter.get_last_index_wildcards() == key:
                 return True
         return False
Example #32
0
def test_PushbackAdapter():
    it = PushbackAdapter(iter([1, 2, 3, 4]))
    assert it.has_more()
    assert six.advance_iterator(it) == 1
    it.push_back(0)
    assert six.advance_iterator(it) == 0
    assert six.advance_iterator(it) == 2
    assert it.peek() == 3
    it.push_back(10)
    assert it.peek() == 10
    it.push_back(20)
    assert it.peek() == 20
    assert it.has_more()
    assert list(it) == [20, 10, 3, 4]
    assert not it.has_more()
Example #33
0
    def checkIteratorExhaustionStorage(self):
        # Test the storage's garbage collection mechanism.
        self._dostore()
        iterator = self._storage.iterator()

        # At this point, a wrapping iterator might not have called the CS
        # iterator yet. We'll consume one item to make sure this happens.
        six.advance_iterator(iterator)
        self.assertEquals(1, len(self._storage._iterator_ids))
        iid = list(self._storage._iterator_ids)[0]
        self.assertEquals([], list(iterator))
        self.assertEquals(0, len(self._storage._iterator_ids))

        # The iterator has run through, so the server has already disposed it.
        self.assertRaises(KeyError, self._storage._call, 'iterator_next', iid)
Example #34
0
    def checkIteratorGCStorageTPCAborting(self):
        # The odd little jig we do below arises from the fact that the
        # CS iterator may not be constructed right away if the CS is wrapped.
        # We need to actually do some iteration to get the iterator created.
        # We do a store to make sure the iterator isn't exhausted right away.
        self._dostore()
        six.advance_iterator(self._storage.iterator())

        iid = list(self._storage._iterator_ids)[0]

        t = TransactionMetaData()
        self._storage._iterators._last_gc = -1
        self._storage.tpc_begin(t)
        self._storage.tpc_abort(t)
        self._assertIteratorIdsEmpty()
        self.assertRaises(KeyError, self._storage._call, 'iterator_next', iid)
Example #35
0
    def iterate(self, max_iter=None):
        """Yields items from the mux, and handles stream exhaustion and
        replacement.
        """
        if max_iter is None:
            max_iter = np.inf

        # Calls Streamer's __enter__, which calls activate()
        with self as active_mux:
            # Main sampling loop
            n = 0

            while n < max_iter and active_mux._streamers_available():
                # Pick a stream from the active set
                idx = active_mux._next_sample_index()

                # Can we sample from it?
                try:
                    # Then yield the sample
                    yield six.advance_iterator(active_mux.streams_[idx])

                    # Increment the sample counter
                    n += 1
                    active_mux.stream_counts_[idx] += 1

                except StopIteration:
                    # Oops, this stream is exhausted.

                    # Call child-class exhausted-stream behavior
                    active_mux._on_stream_exhausted(idx)

                    # Setup a new stream for this index
                    active_mux._replace_stream(idx)
Example #36
0
 def _split_with_quotes(self, value):
     parts = list(csv.reader(six.StringIO(value), escapechar='\\'))[0]
     iter_parts = iter(parts)
     new_parts = []
     for part in iter_parts:
         if part.count('"') == 1:
             quote_char = '"'
         elif part.count("'") == 1:
             quote_char = "'"
         else:
             new_parts.append(part)
             continue
         # Now that we've found a starting quote char, we
         # need to combine the parts until we encounter an end quote.
         current = part
         chunks = [current.replace(quote_char, '')]
         while True:
             try:
                 current = six.advance_iterator(iter_parts)
             except StopIteration:
                 raise ParamSyntaxError(value)
             chunks.append(current.replace(quote_char, ''))
             if quote_char in current:
                 break
         new_chunk = ','.join(chunks)
         new_parts.append(new_chunk)
     return new_parts
Example #37
0
def reduce(function, iterable, initial=None): 
    """reduce(function, iterable[, initial]) -> value

    Apply a left-associative dyadic function cumulatively
    to a iterable sequence, returning a single value.  For
    example, reduce(lambda x, y: x*y, [1, 2, 3, 4, 5, 6])
    returns 6 factorial.  The initial value, if present,
    is placed before the beginning of the results of the
    iterable in the calculation.
    """

    def op(a,b):
        return function(a, b)

    iterator = iter(iterable)
    if initial is not None:
        value = initial
    else:
        try:
            value = advance_iterator(iterator)
        except StopIteration:
            raise TypeError("reduce() of empty sequence with no initial value")
    for right in iterator:
        value = op(value, right)
    return value
Example #38
0
    def process(self, args):
        assert len(args) == 1, "Workflow id should be passed in"
        gen = self.sleepgenerator()
        workflow_id = args[0]
        resp = {}
        status = ''
        while status.lower() not in ('complete', 'error'):
            try:
                resp = self.get_workflow(workflow_id)
                status = resp.get('status', '')
                time.sleep(six.advance_iterator(gen))
            except KeyboardInterrupt:
                print()
                print("Job %s current status is '%s'" % (workflow_id, status))
                sys.exit()
        if status.lower() == 'error':
            raise errors.RemoteError('Error running workflow')
            return

        stageout_dir_uri = resp['json']['jobs'][-1]['stageout']
        if not stageout_dir_uri.endswith('/'):
            stageout_dir_uri += '/'

        dirs, stdout_files, stderr_files, other_files = self.get_listing(stageout_dir_uri)
        self.download_stageout_files(stageout_dir_uri, dirs, stdout_files, stderr_files, other_files)
Example #39
0
 def testDifferentEmb(self):
     s = sets.Set()
     for emb in ["WiCkqHbUtLpLxZkF", "J9nZh0Q7JdMxHTOF", "Ala"]:
         for x in range(0, 2000):
             d = six.advance_iterator(self.id_gen)
             self.failIf(d in s)
             s.add(d)
Example #40
0
    def __new__(meta, name, bases, dct):
        if name != 'Widget' and 'children' not in dct:
            new_children = []
            for d, v in list(dct.items()):
                if isinstance(v, type) and \
                   issubclass(v, Widget) and \
                   d not in reserved_names:

                    new_children.append((v, d))
                    del dct[d]

            children = meta._collect_base_children(bases)
            new_children = sorted(new_children, key=lambda t: t[0]._seq)
            children.extend(
                hasattr(v, 'id') and v or v(id=d) for v, d in new_children
            )
            if children:
                dct['children'] = children

        widget = super(WidgetMeta, meta).__new__(meta, name, bases, dct)

        widget._seq = six.advance_iterator(_widget_seq)
        for w in reversed(widget.__mro__):
            if 'post_define' in w.__dict__:
                w.post_define.__func__(widget)
        return widget
Example #41
0
    def __init__(self,
                 description=Default,
                 default=Default,
                 request_local=Default,
                 attribute=Default,
                 view_name=Default):
        self._seq = six.advance_iterator(_param_seq)

        self.description = None
        if description is not Default:
            self.description = description
        self.default = Required
        if default is not Default:
            self.default = default
        self.request_local = True
        if request_local is not Default:
            self.request_local = request_local
        self.attribute = False
        if attribute is not Default:
            self.attribute = attribute
        self.view_name = None
        if view_name is not Default:
            self.view_name = view_name

        self.specified = []
        args = [
            'description',
            'default',
            'request_local',
            'attribute',
            'view_name',
        ]
        for arg in args:
            if locals()[arg] is not Default:
                self.specified.append(arg)
Example #42
0
def to_dataframe(field_names, data):
    """
    Core method used by :func:`~dataframize`.
    Load T4-CSV data into a pandas DataFrame
    """
    _df = pd.DataFrame()  # default to be returned if exception is found
    try:
        if field_names and data:  # else return empty dataframe
            # put data in a file object and send it to pd.read_csv()
            fbuffer = cStringIO()
            fbuffer.writelines(('{0}\n'.format(line) for line in data))
            fbuffer.seek(0)
            # Multiple columns may have a 'sample time' alike column,
            # only use first (case insensitive search)
            df_timecol = (s for s in field_names
                          if DATETIME_TAG.upper() in s.upper())
            index_col = [advance_iterator(df_timecol)]
            _df = pd.read_csv(fbuffer,
                              header=None,
                              parse_dates=index_col,
                              index_col=index_col,
                              names=field_names)
            # Remove redundant time columns (if any)
            _df.drop(df_timecol, axis=1, inplace=True)
            # Remove duplicate columns to avoid problems with combine_first()
            _df = remove_duplicate_columns(_df)

    except (StopIteration, Exception) as exc:  # Not T4-compliant!
        raise ToDfError(exc)

    return _df
Example #43
0
 def _split_with_quotes(self, value):
     parts = list(csv.reader(six.StringIO(value), escapechar="\\"))[0]
     iter_parts = iter(parts)
     new_parts = []
     for part in iter_parts:
         if part.count('"') == 1:
             quote_char = '"'
         elif part.count("'") == 1:
             quote_char = "'"
         else:
             new_parts.append(part)
             continue
         # Now that we've found a starting quote char, we
         # need to combine the parts until we encounter an end quote.
         current = part
         chunks = [current.replace(quote_char, "")]
         while True:
             try:
                 current = six.advance_iterator(iter_parts)
             except StopIteration:
                 raise ParamSyntaxError(value)
             chunks.append(current.replace(quote_char, ""))
             if quote_char in current:
                 break
         new_chunk = ",".join(chunks)
         new_parts.append(new_chunk)
     return new_parts
Example #44
0
 def next(self):
     c = six.advance_iterator(self._gen)
     if len(self._cache) == 2:
         old, new = self._cache
         self._cache = [new]
     self._cache.append(c)
     return c
Example #45
0
 def _peek_no_dummy(self):
     if self._cache:
         return self._cache
     else:
         i = six.advance_iterator(self._it)
         self._cache = i
         return i
    def __next__(self):
        while 1:
            # Note: running off the end of the underlying iterator will
            # generate a StopIteration exception that will propagate up
            # and end this iterator.
            index = advance_iterator(self.component_iter)

            # We want a tuple of indices, so convert scalars to tuples
            _idx = index if type(index) is tuple else (index,)

            # Verify the number of indices: if there is a wildcard
            # slice, then there must be enough indices to at least match
            # the fixed indices.  Without the wildcard slice (ellipsis),
            # the number of indices must match exactly.
            if self.ellipsis is not None:
                if self.explicit_index_count > len(_idx):
                    continue
            elif len(_idx) != self.explicit_index_count:
                continue

            valid = True
            for key, val in iteritems(self.fixed):
                if not val == _idx[key]:
                    valid = False
                    break
            if valid:
                # Remember the index tuple corresponding to the last
                # component data returned by this iterator
                self.last_index = _idx
                # Note: it is important to use __getitem__, as the
                # derived class may implement a non-standard storage
                # mechanism (e.g., Param)
                return self.component[index]
Example #47
0
    def test_iter_one_item(self):
        self.r.request = Mock()
        self.r.request.return_value = Mock(), {
            'meta': {
                'key': 'foos',
                'next_page_url': None
            },
            'foos': [{
                'sid': '123'
            }]
        }

        items = self.r.iter()
        advance_iterator(items)

        self.assertRaises(StopIteration, advance_iterator, items)
Example #48
0
    def __init__(self, description=Default, default=Default,
                 request_local=Default, attribute=Default,
                 view_name=Default):
        self._seq = six.advance_iterator(_param_seq)

        self.description = None
        if description is not Default:
            self.description = description
        self.default = Required
        if default is not Default:
            self.default = default
        self.request_local = True
        if request_local is not Default:
            self.request_local = request_local
        self.attribute = False
        if attribute is not Default:
            self.attribute = attribute
        self.view_name = None
        if view_name is not Default:
            self.view_name = view_name

        self.specified = []
        args = [
            'description',
            'default',
            'request_local',
            'attribute',
            'view_name',
        ]
        for arg in args:
            if locals()[arg] is not Default:
                self.specified.append(arg)
Example #49
0
 def peek(self):
     try:
         obj = six.advance_iterator(self)
     except StopIteration:
         raise ValueError("no more data")
     self.push_back(obj)
     return obj
Example #50
0
    def pRec(x, bReg, l, i):
        while 1:
            try:
                nxtFind = advance_iterator(bReg)
                j = nxtFind.start()
            except:
                return i + 1
            c = l[j]
            if c == "<":  # add entry to array at this level
                if len(x[3]) == 0:
                    x[2] = l[i:j].strip(
                    )  # text after "{" and before "<Tabxyz>"
                i = j + 1  # save marker for start of descriptor
                x[3].append(["", "", "", []])

            elif c == "{":
                xn = x[3][len(x[3]) - 1]
                tx = l[i - 1:j].strip().split()
                xn[0] = tx[
                    0]  #x[0] & x[1] is the "<Tabxyz>" & "123" prior to "{"
                xn[1] = tx[1] if len(tx) > 1 else ""
                i = pRec(xn, bReg, l, j + 1)
            else:  #i.e. c="}" # go up one level of recursion
                if len(x[3]) == 0: x[2] = l[i:j].strip()
                return j + 1
Example #51
0
 def __next__(self):
     # NOTE(sileht): Our custom resource attribute columns don't
     # have the same name in database than in sqlalchemy model
     # so remove the additional "f_" for the model name
     n = six.advance_iterator(self.i)
     model_attr = n[2:] if n[:2] == "f_" else n
     return model_attr, getattr(self.model, n)
Example #52
0
 def _get_request_type(self):
     new = None
     try:
         new = advance_iterator(self._type_cycle)
     except StopIteration:
         raise RuntimeError("MultiTaskWorker has no work request types!")
     return new
Example #53
0
def loads(string):
    """
    Construct a GeoJSON `dict` from WKT (`string`).
    """
    sio = StringIO.StringIO(string)
    # NOTE: This is not the intended purpose of `tokenize`, but it works.
    tokens = (x[1] for x in tokenize.generate_tokens(sio.readline))
    tokens = _tokenize_wkt(tokens)
    geom_type = next(tokens)

    importer = _loads_registry.get(geom_type)

    if importer is None:
        _unsupported_geom_type(geom_type)

    peek = six.advance_iterator(tokens)
    if peek == 'EMPTY':
        if geom_type == 'GEOMETRYCOLLECTION':
            return dict(type='GeometryCollection', geometries=[])
        else:
            return dict(type=_type_map_caps_to_mixed[geom_type],
                        coordinates=[])

    # Put the peeked element back on the head of the token generator
    tokens = itertools.chain([peek], tokens)
    return importer(tokens, string)
Example #54
0
 def _get_request_type(self):
     new = None
     try:
         new = advance_iterator(self._type_cycle)
     except StopIteration:
         raise RuntimeError("MultiTaskWorker has no work request types!")
     return new
Example #55
0
 def peek(self):
     try:
         obj = six.advance_iterator(self)
     except StopIteration:
         raise ValueError("no more data")
     self.push_back(obj)
     return obj
Example #56
0
 def next(self):
     if self._cache:
         i = self._cache
         self._cache = None
         return i
     else:
         return six.advance_iterator(self._it)
Example #57
0
    def _make(self, commit=True, commit_related=True, _save_kwargs=None, _refresh_after_create=False, **attrs):
        _save_kwargs = _save_kwargs or {}

        self._clean_attrs(attrs)
        for field in self.get_fields():
            if self._skip_field(field):
                continue

            if isinstance(field, ManyToManyField):
                if field.name not in self.model_attrs:
                    self.m2m_dict[field.name] = self.m2m_value(field)
                else:
                    self.m2m_dict[field.name] = self.model_attrs.pop(field.name)
            elif field.name not in self.model_attrs:
                if not isinstance(field, ForeignKey) or '{0}_id'.format(field.name) not in self.model_attrs:
                    self.model_attrs[field.name] = self.generate_value(field, commit_related)
            elif callable(self.model_attrs[field.name]):
                self.model_attrs[field.name] = self.model_attrs[field.name]()
            elif field.name in self.iterator_attrs:
                try:
                    self.model_attrs[field.name] = advance_iterator(self.iterator_attrs[field.name])
                except StopIteration:
                    raise RecipeIteratorEmpty('{0} iterator is empty.'.format(field.name))

        instance = self.instance(self.model_attrs, _commit=commit, _save_kwargs=_save_kwargs)
        if commit:
            for related in self.get_related():
                self.create_by_related_name(instance, related)

        if _refresh_after_create:
            instance.refresh_from_db()

        return instance
Example #58
0
    def checkIteratorGCStorageTPCAborting(self):
        # The odd little jig we do below arises from the fact that the
        # CS iterator may not be constructed right away if the CS is wrapped.
        # We need to actually do some iteration to get the iterator created.
        # We do a store to make sure the iterator isn't exhausted right away.
        self._dostore()
        six.advance_iterator(self._storage.iterator())

        iid = list(self._storage._iterator_ids)[0]

        t = TransactionMetaData()
        self._storage._iterators._last_gc = -1
        self._storage.tpc_begin(t)
        self._storage.tpc_abort(t)
        self._assertIteratorIdsEmpty()
        self.assertRaises(KeyError, self._storage._call, 'iterator_next', iid)