Esempio n. 1
0
def parse_namespacepath(parser, event, node): #pylint: disable=unused-argument
    """Parse namespace path element and return tuple of
       host and namespace
           <!ELEMENT NAMESPACEPATH (HOST, LOCALNAMESPACEPATH)>
    """


    (next_event, next_node) = six.next(parser)

    if not _is_start(next_event, next_node, 'HOST'):
        raise ParseError('Expecting HOST')

    host = parse_host(parser, next_event, next_node)

    (next_event, next_node) = six.next(parser)

    if not _is_start(next_event, next_node, 'LOCALNAMESPACEPATH'):
        raise ParseError('Expecting LOCALNAMESPACEPATH')

    namespacepath = parse_localnamespacepath(parser, next_event, next_node)

    (next_event, next_node) = six.next(parser)

    if not _is_end(next_event, next_node, 'NAMESPACEPATH'):
        raise ParseError('Expecting end NAMESPACEPATH')

    return (host, namespacepath)
    def test_simple_read_several_pages(self):
        self.prepare_response("GET", "/ws/DataStream/test", GET_TEST_DATA_STREAM)

        # This test is a bit awkward as the pattern matching in httpretty is strange
        # and it I couldn't get it to work in a nicer fashion
        test_stream = self.dc.streams.get_stream("test")
        generator = test_stream.read(page_size=2)
        self.prepare_response("GET", "/ws/DataPoint/test", GET_DATA_POINTS_FIVE_PAGED[0])

        point1 = six.next(generator)
        self.assertEqual(point1.get_id(), "75b0e84b-0968-11e4-9041-fa163e8f4b62")
        point2 = six.next(generator)
        self.assertEqual(point2.get_id(), "75d56063-0968-11e4-9041-fa163e8f4b62")

        self.prepare_response("GET", "/ws/DataPoint/test", GET_DATA_POINTS_FIVE_PAGED[1])

        point3 = six.next(generator)
        self.assertEqual(point3.get_id(), "75f8901f-0968-11e4-ab44-fa163e7ebc6b")
        point4 = six.next(generator)
        self.assertEqual(point4.get_id(), "761eecbb-0968-11e4-9041-fa163e8f4b62")

        self.prepare_response("GET", "/ws/DataPoint/test", GET_DATA_POINTS_FIVE_PAGED[2])

        point5 = six.next(generator)
        self.assertEqual(point5.get_id(), "76459cf1-0968-11e4-98e9-fa163ecf1de4")
        self.assertRaises(StopIteration, six.next, generator)
Esempio n. 3
0
    def testUploadFileToFolder(self):
        filepath = os.path.join(self.libTestDir, 'sub0', 'f')

        stream_filename = 'uploaded_from_stream'
        disk_filename = 'uploaded_from_disk'

        # upload filepath as a stream and as a local file, and assert the end result is the same
        with open(filepath, 'rb') as infile:
            infile.seek(0, os.SEEK_END)
            size = infile.tell()
            infile.seek(0)

            self.client.uploadStreamToFolder(str(self.publicFolder['_id']), infile, stream_filename,
                                             size, mimeType='text/plain')

        self.client.uploadFileToFolder(str(self.publicFolder['_id']), filepath,
                                       filename=disk_filename)

        stream_item = six.next(self.client.listItem(str(self.publicFolder['_id']),
                                                    name=stream_filename))
        disk_item = six.next(self.client.listItem(str(self.publicFolder['_id']),
                                                  name=disk_filename))

        # assert names and sizes are correct
        self.assertEqual(stream_filename, stream_item['name'])
        self.assertEqual(size, stream_item['size'])
        self.assertEqual(disk_filename, disk_item['name'])
        self.assertEqual(size, disk_item['size'])

        # assert every other field (besides unique ones) are identical
        unique_attrs = ('_id', 'name', 'created', 'updated')
        self.assertEqual({k: v for (k, v) in six.viewitems(stream_item) if k not in unique_attrs},
                         {k: v for (k, v) in six.viewitems(disk_item) if k not in unique_attrs})
Esempio n. 4
0
def parse_localnamespacepath(parser, event, node):
    #pylint: disable=unused-argument
    """Parse LOCALNAMESPACEPATH for Namespace. Return assembled namespace
            <!ELEMENT LOCALNAMESPACEPATH (NAMESPACE+)>
    """

    (next_event, next_node) = six.next(parser)

    namespaces = []

    if not _is_start(next_event, next_node, 'NAMESPACE'):
        print(next_event, next_node)
        raise ParseError('Expecting NAMESPACE')

    namespaces.append(parse_namespace(parser, next_event, next_node))

    while 1:

        (next_event, next_node) = six.next(parser)

        if _is_end(next_event, next_node, 'LOCALNAMESPACEPATH'):
            break

        if _is_start(next_event, next_node, 'NAMESPACE'):
            namespaces.append(parse_namespace(parser, next_event, next_node))
        else:
            raise ParseError('Expecting NAMESPACE')

    return '/'.join(namespaces)
Esempio n. 5
0
    def lldp_parse(data):
        pkt = packet.Packet(data)
        i = iter(pkt)
        eth_pkt = six.next(i)
        assert type(eth_pkt) == ethernet.ethernet

        lldp_pkt = six.next(i)
        if type(lldp_pkt) != lldp.lldp:
            raise LLDPPacket.LLDPUnknownFormat()

        tlv_chassis_id = lldp_pkt.tlvs[0]
        if tlv_chassis_id.subtype != lldp.ChassisID.SUB_LOCALLY_ASSIGNED:
            raise LLDPPacket.LLDPUnknownFormat(
                msg='unknown chassis id subtype %d' % tlv_chassis_id.subtype)
        chassis_id = tlv_chassis_id.chassis_id.decode('utf-8')
        if not chassis_id.startswith(LLDPPacket.CHASSIS_ID_PREFIX):
            raise LLDPPacket.LLDPUnknownFormat(
                msg='unknown chassis id format %s' % chassis_id)
        src_dpid = str_to_dpid(chassis_id[LLDPPacket.CHASSIS_ID_PREFIX_LEN:])

        tlv_port_id = lldp_pkt.tlvs[1]
        if tlv_port_id.subtype != lldp.PortID.SUB_PORT_COMPONENT:
            raise LLDPPacket.LLDPUnknownFormat(
                msg='unknown port id subtype %d' % tlv_port_id.subtype)
        port_id = tlv_port_id.port_id
        if len(port_id) != LLDPPacket.PORT_ID_SIZE:
            raise LLDPPacket.LLDPUnknownFormat(
                msg='unknown port id %d' % port_id)
        (src_port_no, ) = struct.unpack(LLDPPacket.PORT_ID_STR, port_id)

        return src_dpid, src_port_no
Esempio n. 6
0
 def test_valid_chunk_size(self):
     s = StringIO('foob')
     reader = utils.read_in_chunks(s, 2)
     assert six.next(reader) == ('fo', 0)
     assert six.next(reader) == ('ob', 2)
     with pytest.raises(StopIteration):
         six.next(reader)
Esempio n. 7
0
def test_closed_cursor_raises_when_used(dsn, configuration):
    connection = connect(dsn, **get_credentials(configuration))
    cursor = connection.cursor()

    cursor.close()

    with pytest.raises(InterfaceError):
        cursor.execute("SELECT 42")

    with pytest.raises(InterfaceError):
        cursor.executemany("SELECT 42")

    with pytest.raises(InterfaceError):
        cursor.executemanycolumns("SELECT 42", [])

    with pytest.raises(InterfaceError):
        cursor.fetchone()

    with pytest.raises(InterfaceError):
        cursor.fetchmany()

    with pytest.raises(InterfaceError):
        cursor.fetchall()

    with pytest.raises(InterfaceError):
        six.next(cursor)
Esempio n. 8
0
    def _feed_dict_fn():
      if self.stopped:
        raise StopIteration
      inp = np.zeros(self.input_shape, dtype=self.input_dtype)
      if self.y is not None:
        out = np.zeros(self.output_shape, dtype=self.output_dtype)
      for i in xrange(self.batch_size):
        # Add handling when queue ends.
        try:
          inp[i, :] = six.next(self.X)
        except StopIteration:
          self.stopped = True
          inp = inp[:i, :]
          if self.y is not None:
            out = out[:i]
          break

        if self.y is not None:
          y = six.next(self.y)
          if self.n_classes is not None and self.n_classes > 1:
            if len(self.output_shape) == 2:
              out.itemset((i, y), 1.0)
            else:
              for idx, value in enumerate(y):
                out.itemset(tuple([i, idx, value]), 1.0)
          else:
            out[i] = y
      if self.y is None:
        return {self._input_placeholder.name: inp}
      return {self._input_placeholder.name: inp,
              self._output_placeholder.name: out}
Esempio n. 9
0
def _collect_linear_sum(exp, idMap, multiplier, coef, varmap, compute_values):

    coef[None] += multiplier * exp._const  # None is the constant term in the coefficient map.

    arg_coef_iterator = exp._coef.__iter__()
    for arg in exp._args:
        # an arg can be anything - a product, a variable, whatever.

        # Special case... <sigh>
        if ((arg.__class__ is _GeneralVarData) or isinstance(arg, _VarData)) and (not arg.fixed):
            # save an expensive recursion - this is by far the most common case.
            id_ = id(arg)
            if id_ in idMap[None]:
                key = idMap[None][id_]
            else:
                key = len(idMap) - 1
                idMap[None][id_] = key
                idMap[key] = arg
            #
            varmap[key]=arg
            if key in coef:
                coef[key] += multiplier * six.next(arg_coef_iterator)
            else:
                coef[key] = multiplier * six.next(arg_coef_iterator)
        else:
            _linear_collectors[arg.__class__](arg, idMap, multiplier * six.next(arg_coef_iterator), coef, varmap, compute_values)
Esempio n. 10
0
 def _data_collector(self, level, prefix, visibility=None, docMode=False):
     if visibility is not None and visibility < self._visibility:
         return
     if docMode:
         # In documentation mode, we do NOT list the documentation
         # for any sub-data, and instead document the *domain*
         # information (as all the entries should share the same
         # domain, potentially duplicating that documentation is
         # somewhat redundant, and worse, if the list is empty, then
         # no documentation is generated at all!)
         yield( level, prefix.rstrip(), self )
         subDomain = self._domain._data_collector(
             level+1, '- ', visibility, docMode )
         # Pop off the (empty) block entry
         six.next(subDomain)
         for v in subDomain:
             yield v
         return
     if prefix:
         if not self._data:
             yield( level, prefix.rstrip()+' []', self )
         else:
             yield( level, prefix.rstrip(), self )
             level += 1
     for value in self._data:
         for v in value._data_collector(level, '- ', visibility, docMode):
             yield v
Esempio n. 11
0
File: oeis.py Progetto: goulu/Goulib
def has_primitive_root(n):
    if n==1 : return True # to match A033948, but why ?
    try:
        six.next(primitive_root_gen(n))
        return True
    except StopIteration:
        return False
Esempio n. 12
0
def parse_title_page(lines):
    """Parse the title page.

    Spec: http://fountain.io/syntax#section-titlepage
    Returns None if the document does not have a title page section,
    otherwise a dictionary with the data.
    """
    result = {}

    it = iter(lines)
    try:
        line = next(it)
        while True:
            key_match = title_page_key_re.match(line)
            if not key_match:
                return None
            key, value = key_match.groups()
            if value:
                # Single line key/value
                result.setdefault(key, []).append(value)
                line = next(it)
            else:
                for line in it:
                    value_match = title_page_value_re.match(line)
                    if not value_match:
                        break
                    result.setdefault(key, []).append(value_match.group(1))
                else:
                    # Last line has been processed
                    break
    except StopIteration:
        pass
    return result
Esempio n. 13
0
    def test_load_directory_caching_with_files_updated(self):
        self.create_config_file('policy.d/a.conf', POLICY_A_CONTENTS)

        self.enforcer.load_rules(False)
        self.assertIsNotNone(self.enforcer.rules)

        old = six.next(six.itervalues(
            self.enforcer._policy_dir_mtimes))
        self.assertEqual(1, len(self.enforcer._policy_dir_mtimes))

        # Touch the file
        conf_path = os.path.join(self.config_dir, 'policy.d/a.conf')
        stinfo = os.stat(conf_path)
        os.utime(conf_path, (stinfo.st_atime + 10, stinfo.st_mtime + 10))

        self.enforcer.load_rules(False)
        self.assertEqual(1, len(self.enforcer._policy_dir_mtimes))
        self.assertEqual(old, six.next(six.itervalues(
            self.enforcer._policy_dir_mtimes)))

        loaded_rules = jsonutils.loads(str(self.enforcer.rules))
        self.assertEqual('is_admin:True', loaded_rules['admin'])
        self.check_loaded_files([
            'policy.json',
            'policy.d/a.conf',
            'policy.d/a.conf',
        ])
Esempio n. 14
0
def take_nth(rank, size, seq):
    """
    Iterate returning every nth value.

    Return an iterator over the sequence that returns every
    nth element of seq based on the given rank within a group of
    the given size.  For example, if size = 2, a rank of 0 returns
    even indexed elements and a rank of 1 returns odd indexed elements.

    Parameters
    ----------
    rank : int
        MPI rank of this process.
    size : int
        Size of the array we're taking nth entries from.
    seq : iter
        Iterator containing the values being returned.
    """
    assert(rank < size)
    it = iter(seq)
    while True:
        for proc in range(size):
            if rank == proc:
                yield six.next(it)
            else:
                six.next(it)
Esempio n. 15
0
def build_from_obj(obj):
    if isinstance(obj, list):
        return [build_from_obj(item) for item in obj]
    if not isinstance(obj, dict):
        return obj
    _class = get_node_class(next(iterkeys(obj)))
    return _class(next(itervalues(obj))) if _class else obj
    def test_iterate(self):
        path = "/foo"
        item1 = {"name": "1"}
        item2 = {"name": "2"}
        api_request = mock.Mock(return_value={"items": [item1, item2]})
        iterator = page_iterator.HTTPIterator(
            mock.sentinel.client,
            api_request,
            path=path,
            item_to_value=page_iterator._item_to_value_identity,
        )

        assert iterator.num_results == 0

        items_iter = iter(iterator)

        val1 = six.next(items_iter)
        assert val1 == item1
        assert iterator.num_results == 1

        val2 = six.next(items_iter)
        assert val2 == item2
        assert iterator.num_results == 2

        with pytest.raises(StopIteration):
            six.next(items_iter)

        api_request.assert_called_once_with(method="GET", path=path, query_params={})
    def test_iterator_calls_parent_item_to_value(self):
        parent = mock.sentinel.parent

        item_to_value = mock.Mock(
            side_effect=lambda iterator, value: value, spec=["__call__"]
        )

        page = page_iterator.Page(parent, (10, 11, 12), item_to_value)
        page._remaining = 100

        assert item_to_value.call_count == 0
        assert page.remaining == 100

        assert six.next(page) == 10
        assert item_to_value.call_count == 1
        item_to_value.assert_called_with(parent, 10)
        assert page.remaining == 99

        assert six.next(page) == 11
        assert item_to_value.call_count == 2
        item_to_value.assert_called_with(parent, 11)
        assert page.remaining == 98

        assert six.next(page) == 12
        assert item_to_value.call_count == 3
        item_to_value.assert_called_with(parent, 12)
        assert page.remaining == 97
Esempio n. 18
0
def create_data(snames, fnames, sess, max_rolls=500000):
    p_src = complex_player_source(snames, fnames, sess)
    g_src = add_to_session_passthru(
        Game.generate_game(
            player_source=p_src,
            num_source=random_number_around(4, 1)
        ),
        sess
    )
    r_src = add_to_session_passthru(
        Roll.generate_roll(
            game_source=g_src,
            number_source=random_number_around(15, 5),
            dice_source=arbitrary_dice_pattern(),
            limit=max_rolls
        ),
        sess
    )
    try:
        while 1:
            next(r_src)
    except StopIteration:
        print_("Database loaded.")
    finally:
        sess.commit()
Esempio n. 19
0
    def test_run_iterations(self):
        flow = lf.Flow("root")
        tasks = test_utils.make_many(
            1, task_cls=test_utils.TaskNoRequiresNoReturns)
        flow.add(*tasks)

        rt = self._make_runtime(flow, initial_state=st.RUNNING)
        self.assertTrue(rt.runner.runnable())

        it = rt.runner.run_iter()
        state, failures = six.next(it)
        self.assertEqual(st.RESUMING, state)
        self.assertEqual(0, len(failures))

        state, failures = six.next(it)
        self.assertEqual(st.SCHEDULING, state)
        self.assertEqual(0, len(failures))

        state, failures = six.next(it)
        self.assertEqual(st.WAITING, state)
        self.assertEqual(0, len(failures))

        state, failures = six.next(it)
        self.assertEqual(st.ANALYZING, state)
        self.assertEqual(0, len(failures))

        state, failures = six.next(it)
        self.assertEqual(st.SUCCESS, state)
        self.assertEqual(0, len(failures))

        self.assertRaises(StopIteration, six.next, it)
Esempio n. 20
0
def _Net_forward_all(self, blobs=None, **kwargs):
    """
    Run net forward in batches.

    Take
    blobs: list of blobs to extract as in forward()
    kwargs: Keys are input blob names and values are blob ndarrays.
            Refer to forward().

    Give
    all_outs: {blob name: list of blobs} dict.
    """
    # Collect outputs from batches
    all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
    for batch in self._batch(kwargs):
        outs = self.forward(blobs=blobs, **batch)
        for out, out_blob in six.iteritems(outs):
            all_outs[out].extend(out_blob.copy())
    # Package in ndarray.
    for out in all_outs:
        all_outs[out] = np.asarray(all_outs[out])
    # Discard padding.
    pad = len(six.next(six.itervalues(all_outs))) - len(six.next(six.itervalues(kwargs)))
    if pad:
        for out in all_outs:
            all_outs[out] = all_outs[out][:-pad]
    return all_outs
Esempio n. 21
0
def _Net_batch(self, blobs):
    """
    Batch blob lists according to net's batch size.

    Take
    blobs: Keys blob names and values are lists of blobs (of any length).
           Naturally, all the lists should have the same length.

    Give (yield)
    batch: {blob name: list of blobs} dict for a single batch.
    """
    num = len(six.next(six.itervalues(blobs)))
    batch_size = six.next(six.itervalues(self.blobs)).num
    remainder = num % batch_size
    num_batches = num // batch_size

    # Yield full batches.
    for b in range(num_batches):
        i = b * batch_size
        yield {name: blobs[name][i:i + batch_size] for name in blobs}

    # Yield last padded batch, if any.
    if remainder > 0:
        padded_batch = {}
        for name in blobs:
            padding = np.zeros((batch_size - remainder,)
                               + blobs[name].shape[1:])
            padded_batch[name] = np.concatenate([blobs[name][-remainder:],
                                                 padding])
        yield padded_batch
Esempio n. 22
0
    def generator(row_iter, delim=","):
        # TODO: this is where we are spending times (~80%). I think things
        # could be made more efficiently:
        #   - We could for example "compile" the function, because some values
        #   do not change here.
        #   - The function to convert a line to dtyped values could also be
        #   generated on the fly from a string and be executed instead of
        #   looping.
        #   - The regex are overkill: for comments, checking that a line starts
        #   by % should be enough and faster, and for empty lines, same thing
        #   --> this does not seem to change anything.

        # We do not abstract skipping comments and empty lines for performances
        # reason.
        raw = next(row_iter)
        while r_empty.match(raw):
            raw = next(row_iter)
        while r_comment.match(raw):
            raw = next(row_iter)

        # 'compiling' the range since it does not change
        # Note, I have already tried zipping the converters and
        # row elements and got slightly worse performance.
        elems = list(range(ni))

        row = raw.split(delim)
        yield tuple([convertors[i](row[i]) for i in elems])
        for raw in row_iter:
            while r_comment.match(raw):
                raw = next(row_iter)
            while r_empty.match(raw):
                raw = next(row_iter)
            row = raw.split(delim)
            yield tuple([convertors[i](row[i]) for i in elems])
    def test_region_mapping(self, service_credentials_conf,
                            resource_plugin_conf, mock_bulk):
        mock_engine = mock.Mock()
        plugin = fake_plugins.FakeSimplePlugin(es_engine=mock_engine)

        resource_plugin_conf.include_region_name = True
        service_credentials_conf.os_region_name = 'test-region'

        indexing_helper = helper.IndexingHelper(plugin)

        _, mapping = six.next(plugin.get_full_mapping())
        self.assertIn('region_name', mapping['properties'])

        count = len(plugin.get_objects())
        fake_versions = range(1, count + 1)
        indexing_helper.save_documents(plugin.get_objects(),
                                       fake_versions)

        self.assertEqual(1, len(mock_bulk.call_args_list))
        actions = list(mock_bulk.call_args_list[0][1]['actions'])
        self.assertEqual(['test-region'],
                         actions[0]['_source']['region_name'])

        # Test without a region
        resource_plugin_conf.include_region_name = False
        mock_bulk.reset_mock()

        _, mapping = six.next(plugin.get_full_mapping())
        self.assertNotIn('region_name', mapping['properties'])
        indexing_helper.save_documents(plugin.get_objects(),
                                       fake_versions)
        actions = list(mock_bulk.call_args_list[0][1]['actions'])
        self.assertNotIn('region_name', actions[0]['_source'])
Esempio n. 24
0
 def __init__(self, filename, sample=None):
     """An iterator over the VCF file format
     
     This reads VCF files and has been tested on VCF 4.0.
     The returned items are VCFEntry
     
     """
     super(VCFIterator, self).__init__(filename)
     #process our meta information
     row = six.next(self.filename).strip()
     self.vcf_file = structure.VCFFile(filename)
     self.inum=0
     while row[:2] == '##':
         if row.startswith('##INFO='):
             assert(self.vcf_file.add_info(row)==True)
         elif row.startswith('##FILTER='):
             assert(self.vcf_file.add_filter(row))
         elif row.startswith('##FORMAT='):
             assert(self.vcf_file.add_format(row))
         elif row.startswith('##CONTIG='):
             assert(self.vcf_file.add_contig(row))
         elif row.startswith('##ALT='):
             assert(self.vcf_file.add_alt(row))
         elif row.startswith('##'):
             assert(self.vcf_file.add_extra(row))
         row = six.next(self.filename).strip()
     #got to the end of meta information, we now have the header
     assert(self.vcf_file.add_header(row))
     self.sample = sample
Esempio n. 25
0
 def _next(self):
     row = six.next(self.filename)
     while not row:#skip blanks
         row = six.next(self.filename)
     ob = structure.BedObject()
     ob.parse(row)
     return ob
Esempio n. 26
0
def _partition_patches(patches, regex):
    if regex is None:
        return [patches]

    def take(_patch):
        return not bool(regex.search(_patch[1]))

    def _stacker(buckets):
        while True:
            item, new_bucket = yield
            if new_bucket:
                buckets.append([item])
            else:
                buckets[-1].append(item)

    def _filter(check, stacker):
        start_bucket = True
        while True:
            item = yield
            if check(item):
                stacker.send((item, start_bucket))
                start_bucket = False
            else:
                start_bucket = True

    buckets = []
    stacker = _stacker(buckets)
    six.next(stacker)
    filter = _filter(take, stacker)
    six.next(filter)

    for patch in patches:
        filter.send(patch)
    return buckets
Esempio n. 27
0
	def does_event(self,event):
		if self._simple:
			return self.args == event
		ie = iter(event)
		ia = iter(self.args)
		ctx = {}
		pos = 0
		while True:
			try: e = six.next(ie)
			except StopIteration: e = StopIteration
			try: a = six.next(ia)
			except StopIteration: a = StopIteration
			if e is StopIteration and a is StopIteration:
				return True
			if e is StopIteration or a is StopIteration:
				return False
			if hasattr(a,"startswith") and a.startswith('*'):
				if a == '*':
					pos += 1
					a = str(pos)
				else:
					a = a[1:]
				ctx[a] = e
			elif str(a) != str(e):
				return False
Esempio n. 28
0
def read_header(ofile):
    """Read the header of the iterable ofile."""
    i = next(ofile)

    # Pass first comments
    while r_comment.match(i):
        i = next(ofile)

    # Header is everything up to DATA attribute ?
    relation = None
    attributes = []
    while not r_datameta.match(i):
        m = r_headerline.match(i)
        if m:
            isattr = r_attribute.match(i)
            if isattr:
                name, type, i = tokenize_attribute(ofile, i)
                attributes.append((name, type))
            else:
                isrel = r_relation.match(i)
                if isrel:
                    relation = isrel.group(1)
                else:
                    raise ValueError("Error parsing line %s" % i)
                i = next(ofile)
        else:
            i = next(ofile)

    return relation, attributes
Esempio n. 29
0
def parse_instancepath(parser, event, node):
    #pylint: disable=unused-argument
    """Parse the CIM/XML INSTANCEPATH element and return an
       instancname

       <!ELEMENT INSTANCEPATH (NAMESPACEPATH, INSTANCENAME)>
    """

    (next_event, next_node) = six.next(parser)

    if not _is_start(next_event, next_node, 'NAMESPACEPATH'):
        raise ParseError('Expecting NAMESPACEPATH')

    host, namespacepath = parse_namespacepath(parser, next_event, next_node)

    (next_event, next_node) = six.next(parser)

    if not _is_start(next_event, next_node, 'INSTANCENAME'):
        print(next_event, next_node)
        raise ParseError('Expecting INSTANCENAME')

    instancename = parse_instancename(parser, next_event, next_node)

    instancename.host = host
    instancename.namespace = namespacepath

    return instancename
Esempio n. 30
0
    def test_page_non_empty_response(self):
        import six
        from google.cloud.storage.blob import Blob

        blob_name = 'blob-name'
        response = {'items': [{'name': blob_name}], 'prefixes': ['foo']}
        connection = _Connection()
        client = _Client(connection)
        name = 'name'
        bucket = self._make_one(client=client, name=name)

        def dummy_response():
            return response

        iterator = bucket.list_blobs()
        iterator._get_next_page_response = dummy_response

        page = six.next(iterator.pages)
        self.assertEqual(page.prefixes, ('foo',))
        self.assertEqual(page.num_items, 1)
        blob = six.next(page)
        self.assertEqual(page.remaining, 0)
        self.assertIsInstance(blob, Blob)
        self.assertEqual(blob.name, blob_name)
        self.assertEqual(iterator.prefixes, set(['foo']))
Esempio n. 31
0
 def py_func():
     if first_ex_list:
         example = first_ex_list.pop()
     else:
         example = six.next(gen)
     return tf.contrib.framework.nest.flatten(example)
Esempio n. 32
0
    def __init__(
        self,
        row_ids,
        col_ids,
        values,
        header_row_id='__col__',  # formerly header_row['id']
        header_row_type='string',  # formerly header_row['type']
        header_col_id='__row__',  # formerly header_col['id']
        header_col_type='string',  # formerly header_col['type']
        col_type=None,
        class_col_id=None,
        missing=None,
        _cached_row_id=None,
        row_mapping=None,
        col_mapping=None,
    ):
        """Initialize a Table.

        :param row_ids: list of items (usually strings) used as row ids

        :param col_ids: list of items (usually strings) used as col ids

        :param values: dictionary containing storing values of the table's
        cells; keys are (row_id, col_id) tuples. Or it can be a numpy array
        that will be used directly as a dense matrix or a scipy.sparse matrix.
        In the case of a dictionnary, all col_types are expected to be
        identical.

        :param header_row_id: id of header row (default '__col__')

        :param header_row_type: a string indicating the type of header row
        (default 'string', other possible values 'continuous' and 'discrete')

        :param header_col_id: id of header col (default '__row__')

        :param header_col_type: a string indicating the type of header col
        (default 'string', other possible values 'continuous' and 'discrete')

        :param col_type: a dictionary where keys are col_ids and value are the
        corresponding types ('string', 'continuous' or 'discrete')

        :param class_col_id: id of the col that indicates the class associated
        with each row, if any (default None).

        :param missing: value assigned to missing values (default None)

        :param _cached_row_id: not for use by client code.

        :param row_mapping: dictionary mapping between the row name and the
        row index. Only mandatory if the constructor is provided with
        an already existing dok_matrix or numpy array.

        :param col_mapping: dictionary mappaing between the row name and the
        row index. Only mandatory if the constructor is provided with
        an already existing dok_matrix or numpy array.
        """

        if col_type is None:
            col_type = dict()

        self.row_ids = row_ids
        self.col_ids = col_ids

        if isinstance(values, np.ndarray) or isspmatrix(values):
            self.row_mapping = row_mapping
            self.col_mapping = col_mapping
            self.values = values
        else:
            # it is a dictionnary -> create a dok_matrix
            # here we guess the content type.
            # Could be better if we could get it from the constructor
            dtype = np.dtype(type(six.next(six.itervalues(values))))
            # build the mapping by taking the initial order of
            # row/col_ids
            self.row_mapping = dict(map(reversed, enumerate(row_ids)))
            self.col_mapping = dict(map(reversed, enumerate(col_ids)))
            self.values = dok_matrix((len(row_ids), len(col_ids)), dtype=dtype)
            for k, v in iteritems(values):
                self.values[self.row_mapping[k[0]], self.col_mapping[k[1]]] = v
        self.header_row_id = header_row_id
        self.header_row_type = header_row_type
        self.header_col_id = header_col_id
        self.header_col_type = header_col_type
        self.col_type = col_type
        self.class_col_id = class_col_id
        self.missing = missing
        self._cached_row_id = _cached_row_id
 def _traverse_leaf(comp, transform, context_tree, identifier_seq):
   """Helper function holding traversal logic for leaf nodes."""
   _ = six.next(identifier_seq)
   return transform(comp, context_tree)
Esempio n. 34
0
def convert_to_torch_tensor(word_to_float_list_dict, vocab):
    dim = len(six.next(six.itervalues(word_to_float_list_dict)))
    tensor = torch.zeros((len(vocab), dim))
    for word, values in word_to_float_list_dict.items():
        tensor[vocab.stoi[word]] = torch.Tensor(values)
    return tensor
Esempio n. 35
0
def build_cursor(results, key, limit=100, cursor=None, hits=None, max_hits=None):
    if cursor is None:
        cursor = Cursor(0, 0, 0)

    value = cursor.value
    offset = cursor.offset
    is_prev = cursor.is_prev

    num_results = len(results)

    if is_prev:
        has_prev = num_results > limit
        num_results = len(results)
    elif value or offset:
        # It's likely that there's a previous page if they passed us either offset values
        has_prev = True
    else:
        # we don't know
        has_prev = False

    # Default cursor if not present
    if is_prev:
        next_value = value
        next_offset = offset
        has_next = True
    elif num_results:
        if not value:
            value = int(key(results[0]))

        # Are there more results than whats on the current page?
        has_next = num_results > limit

        # Determine what our next cursor is by ensuring we have a unique offset
        next_value = int(key(results[-1]))

        if next_value == value:
            next_offset = offset + limit
        else:
            next_offset = 0
            result_iter = reversed(results)
            # skip the last result
            six.next(result_iter)
            for result in result_iter:
                if int(key(result)) == next_value:
                    next_offset += 1
                else:
                    break
    else:
        next_value = value
        next_offset = offset
        has_next = False

    # Determine what our pervious cursor is by ensuring we have a unique offset
    if is_prev and num_results:
        prev_value = int(key(results[0]))

        if num_results > 2:
            i = 1
            while i < num_results and prev_value == int(key(results[i])):
                i += 1
            i -= 1
        else:
            i = 0

        # if we iterated every result and the offset didn't change, we need
        # to simply add the current offset to our total results (visible)
        if prev_value == value:
            prev_offset = offset + i
        else:
            prev_offset = i
    else:
        # previous cursor is easy if we're paginating forward
        prev_value = value
        prev_offset = offset

    # Truncate the list to our original result size now that we've determined the next page
    results = results[:limit]

    next_cursor = Cursor(next_value or 0, next_offset, False, has_next)
    prev_cursor = Cursor(prev_value or 0, prev_offset, True, has_prev)

    return CursorResult(
        results=results,
        next=next_cursor,
        prev=prev_cursor,
        hits=hits,
        max_hits=max_hits,
    )
Esempio n. 36
0
import struct
Esempio n. 37
0
def _get_expiry(loader):
    expired_gcp_conf = (item for item in loader._config.value.get("users")
                        if item.get("name") == "expired_gcp")
    return next(expired_gcp_conf).get("user").get("auth-provider") \
        .get("config").get("expiry")
Esempio n. 38
0
item_batch = tf.placeholder(tf.int32, shape=[None], name="id_item")
rate_batch = tf.placeholder(tf.float32, shape=[None])

infer, regularizer = model(user_batch, item_batch, user_num=u_num, item_num=i_num, dim=dims, device=place_device)
_, train_op = loss(infer, regularizer, rate_batch, learning_rate=0.10, reg=0.05, device=place_device)

saver = tf.train.Saver()
init_op = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init_op)
    print("%s\t%s\t%s\t%s" % ("Epoch", "Train Error", "Val Error", "Elapsed Time"))
    errors = deque(maxlen=samples_per_batch)
    start = time.time()
    for i in range(max_epochs * samples_per_batch):
        users, items, rates = next(iter_train)
        _, pred_batch = sess.run([train_op, infer], feed_dict={user_batch: users,
                                                               item_batch: items,
                                                               rate_batch: rates})
        pred_batch = clip(pred_batch)
        errors.append(np.power(pred_batch - rates, 2))
        if i % samples_per_batch == 0:
            train_err = np.sqrt(np.mean(errors))
            test_err2 = np.array([])
            for users, items, rates in iter_test:
                pred_batch = sess.run(infer, feed_dict={user_batch: users,
                                                        item_batch: items})
                pred_batch = clip(pred_batch)
                test_err2 = np.append(test_err2, np.power(pred_batch - rates, 2))
            end = time.time()
            
Esempio n. 39
0
 def inspect_memory_usage(instance, duration):
     return six.next(next_value)
Esempio n. 40
0
def shunting_yard(expression, named_ranges, ref = None, tokenize_range = False):
    """
    Tokenize an excel formula expression into reverse polish notation

    Core algorithm taken from wikipedia with varargs extensions from
    http://www.kallisti.net.nz/blog/2008/02/extension-to-the-shunting-yard-algorithm-to-allow-variable-numbers-of-arguments-to-functions/


    The ref is the cell address which is passed down to the actual compiled python code.
    Range basic operations signature require this reference, so it has to be written during OperatorNode.emit()
    https://github.com/iOiurson/koala/blob/master/koala/ast/graph.py#L292.

    This is needed because Excel range basic operations (+, -, * ...) are applied on matching cells.

    Example:
    Cell C2 has the following formula 'A1:A3 + B1:B3'.
    The output will actually be A2 + B2, because the formula is relative to cell C2.
    """

    #remove leading =
    if expression.startswith('='):
        expression = expression[1:]

    p = ExcelParser(tokenize_range=tokenize_range)
    p.parse(expression)

    # insert tokens for '(' and ')', to make things clearer below
    tokens = []
    for t in p.tokens.items:
        if t.ttype == "function" and t.tsubtype == "start":
            t.tsubtype = ""
            tokens.append(t)
            tokens.append(f_token('(','arglist','start'))
        elif t.ttype == "function" and t.tsubtype == "stop":
            tokens.append(f_token(')','arglist','stop'))
        elif t.ttype == "subexpression" and t.tsubtype == "start":
            t.tvalue = '('
            tokens.append(t)
        elif t.ttype == "subexpression" and t.tsubtype == "stop":
            t.tvalue = ')'
            tokens.append(t)
        elif t.ttype == "operand" and t.tsubtype == "range" and t.tvalue in named_ranges:
            t.tsubtype = "named_range"
            tokens.append(t)
        else:
            tokens.append(t)

    #http://office.microsoft.com/en-us/excel-help/calculation-operators-and-precedence-HP010078886.aspx
    operators = {}
    operators[':'] = Operator(':',8,'left')
    operators[''] = Operator(' ',8,'left')
    operators[','] = Operator(',',8,'left')
    operators['u-'] = Operator('u-',7,'left') #unary negation
    operators['%'] = Operator('%',6,'left')
    operators['^'] = Operator('^',5,'left')
    operators['*'] = Operator('*',4,'left')
    operators['/'] = Operator('/',4,'left')
    operators['+'] = Operator('+',3,'left')
    operators['-'] = Operator('-',3,'left')
    operators['&'] = Operator('&',2,'left')
    operators['='] = Operator('=',1,'left')
    operators['<'] = Operator('<',1,'left')
    operators['>'] = Operator('>',1,'left')
    operators['<='] = Operator('<=',1,'left')
    operators['>='] = Operator('>=',1,'left')
    operators['<>'] = Operator('<>',1,'left')

    output = collections.deque()
    stack = []
    were_values = []
    arg_count = []

    new_tokens = []

    # reconstruct expressions with ':' and replace the corresponding tokens by the reconstructed expression
    if not tokenize_range:
        for index, token in enumerate(tokens):
            new_tokens.append(token)

            if type(token.tvalue) == str or type(token.tvalue) == unicode:

                if token.tvalue.startswith(':'): # example -> :OFFSET( or simply :A10
                    depth = 0
                    expr = ''

                    rev = reversed(tokens[:index])

                    for t in rev: # going backwards, 'stop' starts, 'start' stops
                        if t.tsubtype == 'stop':
                            depth += 1
                        elif depth > 0 and t.tsubtype == 'start':
                            depth -= 1

                        expr = t.tvalue + expr

                        new_tokens.pop()

                        if depth == 0:
                            new_tokens.pop() # these 2 lines are needed to remove INDEX()
                            new_tokens.pop()
                            expr = six.next(rev).tvalue + expr
                            break

                    expr += token.tvalue

                    depth = 0

                    if token.tvalue[1:] in ['OFFSET', 'INDEX']:
                        for t in tokens[(index + 1):]:
                            if t.tsubtype == 'start':
                                depth += 1
                            elif depth > 0 and t.tsubtype == 'stop':
                                depth -= 1

                            expr += t.tvalue

                            tokens.remove(t)

                            if depth == 0:
                                break

                    new_tokens.append(f_token(expr, 'operand', 'pointer'))

                elif ':OFFSET' in token.tvalue or ':INDEX' in token.tvalue: # example -> A1:OFFSET(
                    depth = 0
                    expr = ''

                    expr += token.tvalue

                    for t in tokens[(index + 1):]:
                        if t.tsubtype == 'start':
                            depth += 1
                        elif t.tsubtype == 'stop':
                            depth -= 1

                        expr += t.tvalue

                        tokens.remove(t)

                        if depth == 0:
                            new_tokens.pop()
                            break

                    new_tokens.append(f_token(expr, 'operand', 'pointer'))


    tokens = new_tokens if new_tokens else tokens

    for t in tokens:

        if t.ttype == "operand":
            output.append(create_node(t, ref))
            if were_values:
                were_values.pop()
                were_values.append(True)

        elif t.ttype == "function":
            stack.append(t)
            arg_count.append(0)
            if were_values:
                were_values.pop()
                were_values.append(True)
            were_values.append(False)

        elif t.ttype == "argument":

            while stack and (stack[-1].tsubtype != "start"):
                output.append(create_node(stack.pop(), ref))

            if were_values.pop(): arg_count[-1] += 1
            were_values.append(False)

            if not len(stack):
                raise Exception("Mismatched or misplaced parentheses")

        elif t.ttype.startswith('operator'):

            if t.ttype.endswith('-prefix') and t.tvalue =="-":
                o1 = operators['u-']
            else:
                o1 = operators[t.tvalue]

            while stack and stack[-1].ttype.startswith('operator'):

                if stack[-1].ttype.endswith('-prefix') and stack[-1].tvalue =="-":
                    o2 = operators['u-']
                else:
                    o2 = operators[stack[-1].tvalue]

                if ( (o1.associativity == "left" and o1.precedence <= o2.precedence)
                        or
                      (o1.associativity == "right" and o1.precedence < o2.precedence) ):
                    output.append(create_node(stack.pop(), ref))
                else:
                    break
            stack.append(t)

        elif t.tsubtype == "start":
            stack.append(t)

        elif t.tsubtype == "stop":

            while stack and stack[-1].tsubtype != "start":
                output.append(create_node(stack.pop(), ref))

            if not stack:
                raise Exception("Mismatched or misplaced parentheses")
            stack.pop()

            if stack and stack[-1].ttype == "function":
                f = create_node(stack.pop(), ref)
                a = arg_count.pop()
                w = were_values.pop()
                if w: a += 1
                f.num_args = a
                #print f, "has ",a," args"
                output.append(f)



    while stack:
        if (stack[-1].tsubtype == "start" or stack[-1].tsubtype == "stop"):
            raise Exception("Mismatched or misplaced parentheses")

        output.append(create_node(stack.pop(), ref))

    # convert to list
    return [x for x in output]
Esempio n. 41
0
 def fake_get_line():
     count = six.next(counter)
     if count == 0:
         return b'* 99 EXISTS'
     else:
         raise socket.timeout
Esempio n. 42
0
    def create_compute_environment(self, compute_environment_name, _type,
                                   state, compute_resources, service_role):
        # Validate
        if COMPUTE_ENVIRONMENT_NAME_REGEX.match(
                compute_environment_name) is None:
            raise InvalidParameterValueException(
                "Compute environment name does not match ^[A-Za-z0-9][A-Za-z0-9_-]{1,126}[A-Za-z0-9]$"
            )

        if self.get_compute_environment_by_name(
                compute_environment_name) is not None:
            raise InvalidParameterValueException(
                "A compute environment already exists with the name {0}".
                format(compute_environment_name))

        # Look for IAM role
        try:
            self.iam_backend.get_role_by_arn(service_role)
        except IAMNotFoundException:
            raise InvalidParameterValueException(
                "Could not find IAM role {0}".format(service_role))

        if _type not in ("MANAGED", "UNMANAGED"):
            raise InvalidParameterValueException(
                "type {0} must be one of MANAGED | UNMANAGED".format(
                    service_role))

        if state is not None and state not in ("ENABLED", "DISABLED"):
            raise InvalidParameterValueException(
                "state {0} must be one of ENABLED | DISABLED".format(state))

        if compute_resources is None and _type == "MANAGED":
            raise InvalidParameterValueException(
                "computeResources must be specified when creating a {0} environment"
                .format(state))
        elif compute_resources is not None:
            self._validate_compute_resources(compute_resources)

        # By here, all values except SPOT ones have been validated
        new_comp_env = ComputeEnvironment(
            compute_environment_name,
            _type,
            state,
            compute_resources,
            service_role,
            region_name=self.region_name,
        )
        self._compute_environments[new_comp_env.arn] = new_comp_env

        # Ok by this point, everything is legit, so if its Managed then start some instances
        if _type == "MANAGED":
            cpus = int(
                compute_resources.get("desiredvCpus",
                                      compute_resources["minvCpus"]))
            instance_types = compute_resources["instanceTypes"]
            needed_instance_types = self.find_min_instances_to_meet_vcpus(
                instance_types, cpus)
            # Create instances

            # Will loop over and over so we get decent subnet coverage
            subnet_cycle = cycle(compute_resources["subnets"])

            for instance_type in needed_instance_types:
                reservation = self.ec2_backend.add_instances(
                    image_id="ami-03cf127a",  # Todo import AMIs
                    count=1,
                    user_data=None,
                    security_group_names=[],
                    instance_type=instance_type,
                    region_name=self.region_name,
                    subnet_id=six.next(subnet_cycle),
                    key_name=compute_resources.get("ec2KeyPair", "AWS_OWNED"),
                    security_group_ids=compute_resources["securityGroupIds"],
                )

                new_comp_env.add_instance(reservation.instances[0])

        # Create ECS cluster
        # Should be of format P2OnDemand_Batch_UUID
        cluster_name = "OnDemand_Batch_" + str(uuid.uuid4())
        ecs_cluster = self.ecs_backend.create_cluster(cluster_name)
        new_comp_env.set_ecs(ecs_cluster.arn, cluster_name)

        return compute_environment_name, new_comp_env.arn
Esempio n. 43
0
def remove_ovn_controller(module, idl, txn):
    ovs_table = idl.tables.get(OPEN_VSWITCH)
    ovs_config = six.next(six.itervalues(ovs_table.rows))
    for arg in args:
        if args[arg] in ovs_config.external_ids:
            ovs_config.delkey('external_ids', args[arg])
Esempio n. 44
0
 def fake_get_response():
     count = six.next(counter)
     if count == 0:
         return b'* 99 EXISTS'
     client._imap.tagged_commands[sentinel.tag] = ('OK', [b'Idle done'])
Esempio n. 45
0
	def modules (lib):
		return six.next(os.walk(lib))[1]
Esempio n. 46
0
def _enqueue_data(data,
                  capacity,
                  shuffle=False,
                  min_after_dequeue=None,
                  num_threads=1,
                  seed=None,
                  name="enqueue_input",
                  enqueue_size=1,
                  num_epochs=None,
                  pad_value=None):
  """Creates a queue filled from a numpy array or pandas `DataFrame`.

    Returns a queue filled with the rows of the given (`OrderedDict` of) array
    or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
    `Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
    numpy arrays, the first enqueued `Tensor` contains the row number.

  Args:
    data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or a generator
       yielding `dict`s of numpy arrays or pandas `DataFrame` that will be read
       into the queue.
    capacity: the capacity of the queue.
    shuffle: whether or not to shuffle the rows of the array.
    min_after_dequeue: minimum number of elements that can remain in the queue
    after a dequeue operation. Only used when `shuffle` is true. If not set,
    defaults to `capacity` / 4.
    num_threads: number of threads used for reading and enqueueing.
    seed: used to seed shuffling and reader starting points.
    name: a scope name identifying the data.
    enqueue_size: the number of rows to enqueue per step.
    num_epochs: limit enqueuing to a specified number of epochs, if provided.
    pad_value: default value for dynamic padding of data samples, if provided.

  Returns:
    A queue filled with the rows of the given (`OrderedDict` of) array or
      `DataFrame`.

  Raises:
    TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
      arrays, a numpy `ndarray`, or a generator producing these.
    NotImplementedError: padding and shuffling data at the same time.
    NotImplementedError: padding usage with non generator data type.
  """ 
  with ops.name_scope(name):
    if isinstance(data, np.ndarray):
      types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
      queue_shapes = [(), data.shape[1:]]
      get_feed_fn = _ArrayFeedFn
    elif isinstance(data, collections.OrderedDict):
      types = [dtypes.int64] + [
          dtypes.as_dtype(col.dtype) for col in data.values()
      ]
      queue_shapes = [()] + [col.shape[1:] for col in data.values()]
      get_feed_fn = _OrderedDictNumpyFeedFn
    elif isinstance(data, tp.FunctionType):
      x_first_el = six.next(data())
      x_first_keys = sorted(x_first_el.keys())
      x_first_values = [x_first_el[key] for key in x_first_keys]
      types = [dtypes.as_dtype(col.dtype) for col in x_first_values]
      queue_shapes = [col.shape for col in x_first_values]
      get_feed_fn = _GeneratorFeedFn
    elif HAS_PANDAS and isinstance(data, pd.DataFrame):
      types = [
          dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
      ]
      queue_shapes = [() for _ in types]
      get_feed_fn = _PandasFeedFn
    else:
      raise TypeError(
          "data must be either a numpy array or pandas DataFrame if pandas is "
          "installed; got {}".format(type(data).__name__))

    pad_data = pad_value is not None
    if pad_data and get_feed_fn is not _GeneratorFeedFn:
      raise NotImplementedError(
          "padding is only available with generator usage")
    if shuffle and pad_data:
      raise NotImplementedError(
          "padding and shuffling data at the same time is not implemented")

    # TODO(jamieas): TensorBoard warnings for all warnings below once available.

    if num_threads > 1 and num_epochs is not None:
      logging.warning(
          "enqueue_data was called with num_epochs and num_threads > 1. "
          "num_epochs is applied per thread, so this will produce more "
          "epochs than you probably intend. "
          "If you want to limit epochs, use one thread.")

    if shuffle and num_threads > 1 and num_epochs is not None:
      logging.warning(
          "enqueue_data was called with shuffle=True, num_threads > 1, and "
          "num_epochs. This will create multiple threads, all reading the "
          "array/dataframe in order adding to the same shuffling queue; the "
          "results will likely not be sufficiently shuffled.")

    if not shuffle and num_threads > 1:
      logging.warning(
          "enqueue_data was called with shuffle=False and num_threads > 1. "
          "This will create multiple threads, all reading the "
          "array/dataframe in order. If you want examples read in order, use"
          " one thread; if you want multiple threads, enable shuffling.")

    if shuffle:
      min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
                              min_after_dequeue)
      queue = data_flow_ops.RandomShuffleQueue(
          capacity,
          min_after_dequeue,
          dtypes=types,
          shapes=queue_shapes,
          seed=seed)
    elif pad_data:
      min_after_dequeue = 0  # just for the summary text
      queue_shapes = list(map(
        lambda x: tuple(list(x[:-1]) + [None]) if len(x) > 0 else x,
        queue_shapes))
      queue = data_flow_ops.PaddingFIFOQueue(
        capacity, dtypes=types, shapes=queue_shapes)
    else:
      min_after_dequeue = 0  # just for the summary text
      queue = data_flow_ops.FIFOQueue(
          capacity, dtypes=types, shapes=queue_shapes)

    enqueue_ops = []
    feed_fns = []

    for i in range(num_threads):
      # Note the placeholders have no shapes, so they will accept any
      # enqueue_size.  enqueue_many below will break them up.
      placeholders = [array_ops.placeholder(t) for t in types]

      enqueue_ops.append(queue.enqueue_many(placeholders))
      seed_i = None if seed is None else (i + 1) * seed

      if not pad_data:
        feed_fns.append(
          get_feed_fn(
              placeholders,
              data,
              enqueue_size,
              random_start=shuffle,
              seed=seed_i,
              num_epochs=num_epochs))
      else:
        feed_fns.append(
          get_feed_fn(
              placeholders,
              data,
              enqueue_size,
              random_start=shuffle,
              seed=seed_i,
              num_epochs=num_epochs,
              pad_value=pad_value))

    runner = fqr._FeedingQueueRunner(  # pylint: disable=protected-access
        queue=queue, enqueue_ops=enqueue_ops, feed_fns=feed_fns)
    queue_runner.add_queue_runner(runner)

    full = (math_ops.cast(
        math_ops.maximum(0, queue.size() - min_after_dequeue),
        dtypes.float32) * (1. / (capacity - min_after_dequeue)))
    # Note that name contains a '/' at the end so we intentionally do not place
    # a '/' after %s below.
    summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
                    (queue.name, min_after_dequeue,
                     capacity - min_after_dequeue))
    summary.scalar(summary_name, full)
    return queue
Esempio n. 47
0
    def _on_event(self, event):
        new_route = event.route_entry
        filtered_new_route = FilteredRouteEntry(new_route)

        entry = self._route_2_tracked_entry(new_route)

        if entry is None:
            self.log.debug("Route not mapped to a tracked entry, ignoring: %s",
                           new_route)
            return

        self.log.debug("tracked_entry for this route: %s (type: %s)",
                       TrackerWorker._display_entry(entry), type(entry))

        self._dump_state()

        all_routes = self.tracked_entry_2_routes.setdefault(entry, [])

        self.log.debug("We currently have %d route%s for this entry",
                       len(all_routes), utils.plural(all_routes))

        if event.type == engine.RouteEvent.ADVERTISE:

            withdrawn_best_routes = []

            best_routes = self.tracked_entry_2_best_routes.get(entry)

            if best_routes is None:
                self.log.debug("We had no route for this entry (%s)")
                self.tracked_entry_2_best_routes[entry] = set([new_route])
                best_routes = set()
                self.log.debug("Calling new_best_route")
                self._call_new_best_route(entry, filtered_new_route)
            else:
                if event.replaced_route is not None:
                    self.log.debug("Will remove replaced route from all_routes"
                                   " and best_routes: %s",
                                   event.replaced_route)
                    try:
                        all_routes.remove(event.replaced_route)
                    except ValueError:
                        # we did not have any route for this entry
                        self.log.error("replaced_route is an entry for which "
                                       "we had no route ??? (bug ?)")

                    if event.replaced_route in best_routes:
                        self.log.debug(
                            "Removing replaced_route from best_routes")
                        best_routes.remove(event.replaced_route)
                        withdrawn_best_routes.append(event.replaced_route)
                    else:
                        self.log.debug("replaced_route is not in best_routes")
                        self.log.debug("best_routes: %s", best_routes)
                else:
                    self.log.debug("No replaced route to remove")

                call_new_best_route_4_all = False
                if len(best_routes) == 0:
                    self.log.debug("All best routes have been replaced")
                    self._recompute_best_routes(all_routes, best_routes)
                    if best_routes:
                        current_best = six.next(iter(best_routes))
                        self.log.debug("We'll need to call new_best_route for "
                                       "all our new best routes")
                        call_new_best_route_4_all = True
                    else:
                        current_best = None
                        call_new_best_route_4_all = False
                else:
                    # (if there is more than one route in the best routes, we
                    # take the first one)
                    current_best = six.next(iter(best_routes))

                    self.log.debug("Current best route: %s", current_best)

                    if new_route == current_best:
                        self.log.info("New route is a route we already had, "
                                      "nothing to do.")
                        # nothing to do
                        return

                # let's find if we need to update our best routes
                if current_best:
                    route_comparison = self._compare_routes(self, new_route,
                                                            current_best)
                else:
                    route_comparison = 1

                self.log.debug("route_comparison: %d", route_comparison)

                if route_comparison > 0:
                    # new_route is a strictly better route than any current
                    # one, discard all the current best routes
                    self.log.debug("Replacing all best routes with new one")
                    withdrawn_best_routes.extend(best_routes.copy())
                    best_routes.clear()
                    best_routes.add(new_route)
                    self._call_new_best_route(entry, filtered_new_route)
                    call_new_best_route_4_all = False
                elif route_comparison == 0:
                    # new_route is as good as the current ones
                    self.log.debug("Adding new_route to best_routes...")

                    if call_new_best_route_4_all:
                        self._call_new_best_route_for_routes(entry,
                                                             best_routes)

                    # We'll do a call to self._new_best_route... *only* if the
                    # new_route is different from all current best routes. This
                    # comparison uses FilteredRouteEntry to *not* take into
                    # account .source (the BGP peer which advertized the route)
                    # and only takes into account a specific set of BGP
                    # attributes.
                    # TODO(tmorin): explain more on theses BGP attributes
                    #   related to the cases where a route is re-advertized
                    #   with updated attributes
                    is_really_new = (FilteredRouteEntry(new_route) not in
                                     filtered_routes(best_routes))

                    best_routes.add(new_route)

                    if is_really_new:
                        self.log.debug("Calling self._new_best_route since we "
                                       "yet had no such route in best routes")
                        self._call_new_best_route(entry, filtered_new_route)
                    else:
                        self.log.debug("Not calling _new_best_route since we "
                                       "had received a similar route already")
                else:
                    self.log.debug("The route is no better than current "
                                   "best ones")

                    if call_new_best_route_4_all:
                        self._call_new_best_route_for_routes(entry,
                                                             best_routes)

            # We need to call self._best_route_removed for routes that where
            # implicitly withdrawn, but only if they don't have an equal route
            # (in the sense of FilteredRouteEntry) in best_routes
            filtered_best_routes = filtered_routes(best_routes)
            self.log.debug("Considering implicitly withdrawn best routes")
            for r in withdrawn_best_routes:
                filtered_r = FilteredRouteEntry(r)
                if filtered_r not in filtered_best_routes:
                    self.log.debug("   calling self._best_route_removed for "
                                   "route: %s (not last)", filtered_r)
                    self._call_best_route_removed(entry,
                                                  filtered_r,
                                                  last=False)
                else:
                    self.log.debug("   not calling self._best_route_removed "
                                   "for route: %s", filtered_r)

            # add the route to the list of routes for this entry
            self.log.debug("Adding route to all_routes for this entry")
            all_routes.append(new_route)

        else:  # RouteEvent.WITHDRAW

            withdrawn_route = new_route

            self.log.debug("Removing route from all_routes for this entry")

            # let's update known routes for this entry
            try:
                all_routes.remove(withdrawn_route)
            except ValueError:
                # we did not have any route for this entry
                self.log.error("Withdraw received for an entry for which we"
                               " had no route ??? (not supposed to happen)")

            # let's now update best routes
            best_routes = self.tracked_entry_2_best_routes.get(entry)

            if best_routes is None:
                # we did not have any route for this entry
                self.log.error("Withdraw received for an entry for which we "
                               "had no route: not supposed to happen!")
                return

            if withdrawn_route in best_routes:
                self.log.debug("The event received is about a route which"
                               " is among the best routes for this entry")
                # remove the route from best_routes
                best_routes.remove(withdrawn_route)

                withdrawn_route_is_last = False
                if len(best_routes) == 0:
                    # we don't have any best route left...
                    self._recompute_best_routes(all_routes, best_routes)

                    if len(best_routes) > 0:
                        self._call_new_best_route_for_routes(entry,
                                                             best_routes)
                    else:
                        self.log.debug("Cleanup all_routes and best_routes")
                        withdrawn_route_is_last = True
                        del self.tracked_entry_2_best_routes[entry]
                        del self.tracked_entry_2_routes[entry]

                self.log.debug("Calling best_route_removed...?")
                # We need to call self._best_route_removed, but only if the
                # withdrawn route does not have an equal route in
                # best_routes (in the sense of FilteredRouteEntry)
                filtered_withdrawn_route = FilteredRouteEntry(withdrawn_route)
                if (filtered_withdrawn_route not
                        in filtered_routes(best_routes)):
                    self.log.debug("Calling best_route_removed: %s(last:%s)",
                                   filtered_withdrawn_route,
                                   withdrawn_route_is_last)
                    self._call_best_route_removed(entry,
                                                  filtered_withdrawn_route,
                                                  withdrawn_route_is_last)
                else:
                    self.log.debug("No need to call bestRouteRemved: %s",
                                   filtered_withdrawn_route)

            else:
                self.log.debug("The event received is not related to any "
                               "of the best routes for this entry")
                # no need to update our best route list
                pass

        self.log.info("We now have %d route%s for this entry.",
                      len(all_routes), utils.plural(all_routes))

        self._dump_state()
Esempio n. 48
0
def paint_reconstruction(data, graph, reconstruction):
    """Set the color of the points from the color of the tracks."""
    for k, point in iteritems(reconstruction.points):
        point.color = six.next(six.itervalues(graph[k]))['feature_color']
Esempio n. 49
0
    def add(self,
            patchlabel='',
            flows=None,
            orientations=None,
            labels='',
            trunklength=1.0,
            pathlengths=0.25,
            prior=None,
            connect=(0, 0),
            rotation=0,
            **kwargs):
        """
        Add a simple Sankey diagram with flows at the same hierarchical level.

        Return value is the instance of :class:`Sankey`.

        Optional keyword arguments:

          ===============   ===================================================
          Keyword           Description
          ===============   ===================================================
          *patchlabel*      label to be placed at the center of the diagram
                            Note: *label* (not *patchlabel*) will be passed to
                            the patch through ``**kwargs`` and can be used to
                            create an entry in the legend.
          *flows*           array of flow values
                            By convention, inputs are positive and outputs are
                            negative.
          *orientations*    list of orientations of the paths
                            Valid values are 1 (from/to the top), 0 (from/to
                            the left or right), or -1 (from/to the bottom).  If
                            *orientations* == 0, inputs will break in from the
                            left and outputs will break away to the right.
          *labels*          list of specifications of the labels for the flows
                            Each value may be *None* (no labels), '' (just
                            label the quantities), or a labeling string.  If a
                            single value is provided, it will be applied to all
                            flows.  If an entry is a non-empty string, then the
                            quantity for the corresponding flow will be shown
                            below the string.  However, if the *unit* of the
                            main diagram is None, then quantities are never
                            shown, regardless of the value of this argument.
          *trunklength*     length between the bases of the input and output
                            groups
          *pathlengths*     list of lengths of the arrows before break-in or
                            after break-away
                            If a single value is given, then it will be applied
                            to the first (inside) paths on the top and bottom,
                            and the length of all other arrows will be
                            justified accordingly.  The *pathlengths* are not
                            applied to the horizontal inputs and outputs.
          *prior*           index of the prior diagram to which this diagram
                            should be connected
          *connect*         a (prior, this) tuple indexing the flow of the
                            prior diagram and the flow of this diagram which
                            should be connected
                            If this is the first diagram or *prior* is *None*,
                            *connect* will be ignored.
          *rotation*        angle of rotation of the diagram [deg]
                            *rotation* is ignored if this diagram is connected
                            to an existing one (using *prior* and *connect*).
                            The interpretation of the *orientations* argument
                            will be rotated accordingly (e.g., if *rotation*
                            == 90, an *orientations* entry of 1 means to/from
                            the left).
          ===============   ===================================================

        Valid kwargs are :meth:`matplotlib.patches.PathPatch` arguments:

        %(Patch)s

        As examples, ``fill=False`` and ``label='A legend entry'``.
        By default, ``facecolor='#bfd1d4'`` (light blue) and
        ``linewidth=0.5``.

        The indexing parameters (*prior* and *connect*) are zero-based.

        The flows are placed along the top of the diagram from the inside out
        in order of their index within the *flows* list or array.  They are
        placed along the sides of the diagram from the top down and along the
        bottom from the outside in.

        If the sum of the inputs and outputs is nonzero, the discrepancy
        will appear as a cubic Bezier curve along the top and bottom edges of
        the trunk.

        .. seealso::

            :meth:`finish`
        """
        # Check and preprocess the arguments.
        if flows is None:
            flows = np.array([1.0, -1.0])
        else:
            flows = np.array(flows)
        n = flows.shape[0]  # Number of flows
        if rotation is None:
            rotation = 0
        else:
            # In the code below, angles are expressed in deg/90.
            rotation /= 90.0
        if orientations is None:
            orientations = [0, 0]
        if len(orientations) != n:
            raise ValueError(
                "orientations and flows must have the same length.\n"
                "orientations has length %d, but flows has length %d." %
                (len(orientations), n))
        if labels != '' and getattr(labels, '__iter__', False):
            # iterable() isn't used because it would give True if labels is a
            # string
            if len(labels) != n:
                raise ValueError(
                    "If labels is a list, then labels and flows must have the "
                    "same length.\nlabels has length %d, but flows has length %d."
                    % (len(labels), n))
        else:
            labels = [labels] * n
        if trunklength < 0:
            raise ValueError(
                "trunklength is negative.\nThis isn't allowed, because it would "
                "cause poor layout.")
        if np.abs(np.sum(flows)) > self.tolerance:
            verbose.report(
                "The sum of the flows is nonzero (%f).\nIs the "
                "system not at steady state?" % np.sum(flows), 'helpful')
        scaled_flows = self.scale * flows
        gain = sum(max(flow, 0) for flow in scaled_flows)
        loss = sum(min(flow, 0) for flow in scaled_flows)
        if not (0.5 <= gain <= 2.0):
            verbose.report(
                "The scaled sum of the inputs is %f.\nThis may "
                "cause poor layout.\nConsider changing the scale so"
                " that the scaled sum is approximately 1.0." % gain, 'helpful')
        if not (-2.0 <= loss <= -0.5):
            verbose.report(
                "The scaled sum of the outputs is %f.\nThis may "
                "cause poor layout.\nConsider changing the scale so"
                " that the scaled sum is approximately 1.0." % gain, 'helpful')
        if prior is not None:
            if prior < 0:
                raise ValueError("The index of the prior diagram is negative.")
            if min(connect) < 0:
                raise ValueError(
                    "At least one of the connection indices is negative.")
            if prior >= len(self.diagrams):
                raise ValueError(
                    "The index of the prior diagram is %d, but there are "
                    "only %d other diagrams.\nThe index is zero-based." %
                    (prior, len(self.diagrams)))
            if connect[0] >= len(self.diagrams[prior].flows):
                raise ValueError(
                    "The connection index to the source diagram is %d, but "
                    "that diagram has only %d flows.\nThe index is zero-based."
                    % (connect[0], len(self.diagrams[prior].flows)))
            if connect[1] >= n:
                raise ValueError(
                    "The connection index to this diagram is %d, but this diagram"
                    "has only %d flows.\n The index is zero-based." %
                    (connect[1], n))
            if self.diagrams[prior].angles[connect[0]] is None:
                raise ValueError(
                    "The connection cannot be made.  Check that the magnitude "
                    "of flow %d of diagram %d is greater than or equal to the "
                    "specified tolerance." % (connect[0], prior))
            flow_error = (self.diagrams[prior].flows[connect[0]] +
                          flows[connect[1]])
            if abs(flow_error) >= self.tolerance:
                raise ValueError(
                    "The scaled sum of the connected flows is %f, which is not "
                    "within the tolerance (%f)." %
                    (flow_error, self.tolerance))

        # Determine if the flows are inputs.
        are_inputs = [None] * n
        for i, flow in enumerate(flows):
            if flow >= self.tolerance:
                are_inputs[i] = True
            elif flow <= -self.tolerance:
                are_inputs[i] = False
            else:
                verbose.report(
                    "The magnitude of flow %d (%f) is below the "
                    "tolerance (%f).\nIt will not be shown, and it "
                    "cannot be used in a connection." %
                    (i, flow, self.tolerance), 'helpful')

        # Determine the angles of the arrows (before rotation).
        angles = [None] * n
        for i, (orient, is_input) in enumerate(zip(orientations, are_inputs)):
            if orient == 1:
                if is_input:
                    angles[i] = DOWN
                elif not is_input:
                    # Be specific since is_input can be None.
                    angles[i] = UP
            elif orient == 0:
                if is_input is not None:
                    angles[i] = RIGHT
            else:
                if orient != -1:
                    raise ValueError("The value of orientations[%d] is %d, "
                                     "but it must be [ -1 | 0 | 1 ]." %
                                     (i, orient))
                if is_input:
                    angles[i] = UP
                elif not is_input:
                    angles[i] = DOWN

        # Justify the lengths of the paths.
        if iterable(pathlengths):
            if len(pathlengths) != n:
                raise ValueError(
                    "If pathlengths is a list, then pathlengths and flows must "
                    "have the same length.\npathlengths has length %d, but flows "
                    "has length %d." % (len(pathlengths), n))
        else:  # Make pathlengths into a list.
            urlength = pathlengths
            ullength = pathlengths
            lrlength = pathlengths
            lllength = pathlengths
            d = dict(RIGHT=pathlengths)
            pathlengths = [d.get(angle, 0) for angle in angles]
            # Determine the lengths of the top-side arrows
            # from the middle outwards.
            for i, (angle, is_input,
                    flow) in enumerate(zip(angles, are_inputs, scaled_flows)):
                if angle == DOWN and is_input:
                    pathlengths[i] = ullength
                    ullength += flow
                elif angle == UP and not is_input:
                    pathlengths[i] = urlength
                    urlength -= flow  # Flow is negative for outputs.
            # Determine the lengths of the bottom-side arrows
            # from the middle outwards.
            for i, (angle, is_input, flow) in enumerate(
                    reversed(list(zip(angles, are_inputs, scaled_flows)))):
                if angle == UP and is_input:
                    pathlengths[n - i - 1] = lllength
                    lllength += flow
                elif angle == DOWN and not is_input:
                    pathlengths[n - i - 1] = lrlength
                    lrlength -= flow
            # Determine the lengths of the left-side arrows
            # from the bottom upwards.
            has_left_input = False
            for i, (angle, is_input, spec) in enumerate(
                    reversed(
                        list(
                            zip(angles, are_inputs,
                                zip(scaled_flows, pathlengths))))):
                if angle == RIGHT:
                    if is_input:
                        if has_left_input:
                            pathlengths[n - i - 1] = 0
                        else:
                            has_left_input = True
            # Determine the lengths of the right-side arrows
            # from the top downwards.
            has_right_output = False
            for i, (angle, is_input, spec) in enumerate(
                    zip(angles, are_inputs, list(zip(scaled_flows,
                                                     pathlengths)))):
                if angle == RIGHT:
                    if not is_input:
                        if has_right_output:
                            pathlengths[i] = 0
                        else:
                            has_right_output = True

        # Begin the subpaths, and smooth the transition if the sum of the flows
        # is nonzero.
        urpath = [
            (
                Path.MOVETO,
                [
                    (self.gap - trunklength / 2.0),  # Upper right
                    gain / 2.0
                ]),
            (Path.LINETO, [(self.gap - trunklength / 2.0) / 2.0, gain / 2.0]),
            (Path.CURVE4, [(self.gap - trunklength / 2.0) / 8.0, gain / 2.0]),
            (Path.CURVE4, [(trunklength / 2.0 - self.gap) / 8.0, -loss / 2.0]),
            (Path.LINETO, [(trunklength / 2.0 - self.gap) / 2.0, -loss / 2.0]),
            (Path.LINETO, [(trunklength / 2.0 - self.gap), -loss / 2.0])
        ]
        llpath = [
            (
                Path.LINETO,
                [
                    (trunklength / 2.0 - self.gap),  # Lower left
                    loss / 2.0
                ]),
            (Path.LINETO, [(trunklength / 2.0 - self.gap) / 2.0, loss / 2.0]),
            (Path.CURVE4, [(trunklength / 2.0 - self.gap) / 8.0, loss / 2.0]),
            (Path.CURVE4, [(self.gap - trunklength / 2.0) / 8.0, -gain / 2.0]),
            (Path.LINETO, [(self.gap - trunklength / 2.0) / 2.0, -gain / 2.0]),
            (Path.LINETO, [(self.gap - trunklength / 2.0), -gain / 2.0])
        ]
        lrpath = [(
            Path.LINETO,
            [
                (trunklength / 2.0 - self.gap),  # Lower right
                loss / 2.0
            ])]
        ulpath = [(
            Path.LINETO,
            [
                self.gap - trunklength / 2.0,  # Upper left
                gain / 2.0
            ])]

        # Add the subpaths and assign the locations of the tips and labels.
        tips = np.zeros((n, 2))
        label_locations = np.zeros((n, 2))
        # Add the top-side inputs and outputs from the middle outwards.
        for i, (angle, is_input, spec) in enumerate(
                zip(angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
            if angle == DOWN and is_input:
                tips[i, :], label_locations[i, :] = self._add_input(
                    ulpath, angle, *spec)
            elif angle == UP and not is_input:
                tips[i, :], label_locations[i, :] = self._add_output(
                    urpath, angle, *spec)
        # Add the bottom-side inputs and outputs from the middle outwards.
        for i, (angle, is_input, spec) in enumerate(
                reversed(
                    list(
                        zip(angles, are_inputs,
                            list(zip(scaled_flows, pathlengths)))))):
            if angle == UP and is_input:
                tip, label_location = self._add_input(llpath, angle, *spec)
                tips[n - i - 1, :] = tip
                label_locations[n - i - 1, :] = label_location
            elif angle == DOWN and not is_input:
                tip, label_location = self._add_output(lrpath, angle, *spec)
                tips[n - i - 1, :] = tip
                label_locations[n - i - 1, :] = label_location
        # Add the left-side inputs from the bottom upwards.
        has_left_input = False
        for i, (angle, is_input, spec) in enumerate(
                reversed(
                    list(
                        zip(angles, are_inputs,
                            list(zip(scaled_flows, pathlengths)))))):
            if angle == RIGHT and is_input:
                if not has_left_input:
                    # Make sure the lower path extends
                    # at least as far as the upper one.
                    if llpath[-1][1][0] > ulpath[-1][1][0]:
                        llpath.append(
                            (Path.LINETO, [ulpath[-1][1][0],
                                           llpath[-1][1][1]]))
                    has_left_input = True
                tip, label_location = self._add_input(llpath, angle, *spec)
                tips[n - i - 1, :] = tip
                label_locations[n - i - 1, :] = label_location
        # Add the right-side outputs from the top downwards.
        has_right_output = False
        for i, (angle, is_input, spec) in enumerate(
                zip(angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
            if angle == RIGHT and not is_input:
                if not has_right_output:
                    # Make sure the upper path extends
                    # at least as far as the lower one.
                    if urpath[-1][1][0] < lrpath[-1][1][0]:
                        urpath.append(
                            (Path.LINETO, [lrpath[-1][1][0],
                                           urpath[-1][1][1]]))
                    has_right_output = True
                tips[i, :], label_locations[i, :] = self._add_output(
                    urpath, angle, *spec)
        # Trim any hanging vertices.
        if not has_left_input:
            ulpath.pop()
            llpath.pop()
        if not has_right_output:
            lrpath.pop()
            urpath.pop()

        # Concatenate the subpaths in the correct order (clockwise from top).
        path = (urpath + self._revert(lrpath) + llpath + self._revert(ulpath) +
                [(Path.CLOSEPOLY, urpath[0][1])])

        # Create a patch with the Sankey outline.
        codes, vertices = list(zip(*path))
        vertices = np.array(vertices)

        def _get_angle(a, r):
            if a is None:
                return None
            else:
                return a + r

        if prior is None:
            if rotation != 0:  # By default, none of this is needed.
                angles = [_get_angle(angle, rotation) for angle in angles]
                rotate = Affine2D().rotate_deg(rotation * 90).transform_affine
                tips = rotate(tips)
                label_locations = rotate(label_locations)
                vertices = rotate(vertices)
            text = self.ax.text(0, 0, s=patchlabel, ha='center', va='center')
        else:
            rotation = (self.diagrams[prior].angles[connect[0]] -
                        angles[connect[1]])
            angles = [_get_angle(angle, rotation) for angle in angles]
            rotate = Affine2D().rotate_deg(rotation * 90).transform_affine
            tips = rotate(tips)
            offset = self.diagrams[prior].tips[connect[0]] - tips[connect[1]]
            translate = Affine2D().translate(*offset).transform_affine
            tips = translate(tips)
            label_locations = translate(rotate(label_locations))
            vertices = translate(rotate(vertices))
            kwds = dict(s=patchlabel, ha='center', va='center')
            text = self.ax.text(*offset, **kwds)
        if False:  # Debug
            print("llpath\n", llpath)
            print("ulpath\n", self._revert(ulpath))
            print("urpath\n", urpath)
            print("lrpath\n", self._revert(lrpath))
            xs, ys = list(zip(*vertices))
            self.ax.plot(xs, ys, 'go-')
        if rcParams['_internal.classic_mode']:
            fc = kwargs.pop('fc', kwargs.pop('facecolor', '#bfd1d4'))
            lw = kwargs.pop('lw', kwargs.pop('linewidth', 0.5))
        else:
            fc = kwargs.pop('fc', kwargs.pop('facecolor', None))
            lw = kwargs.pop('lw', kwargs.pop('linewidth', None))
        if fc is None:
            fc = six.next(self.ax._get_patches_for_fill.prop_cycler)['color']
        patch = PathPatch(Path(vertices, codes), fc=fc, lw=lw, **kwargs)
        self.ax.add_patch(patch)

        # Add the path labels.
        texts = []
        for number, angle, label, location in zip(flows, angles, labels,
                                                  label_locations):
            if label is None or angle is None:
                label = ''
            elif self.unit is not None:
                quantity = self.format % abs(number) + self.unit
                if label != '':
                    label += "\n"
                label += quantity
            texts.append(
                self.ax.text(x=location[0],
                             y=location[1],
                             s=label,
                             ha='center',
                             va='center'))
        # Text objects are placed even they are empty (as long as the magnitude
        # of the corresponding flow is larger than the tolerance) in case the
        # user wants to provide labels later.

        # Expand the size of the diagram if necessary.
        self.extent = (min(np.min(vertices[:, 0]),
                           np.min(label_locations[:, 0]), self.extent[0]),
                       max(np.max(vertices[:, 0]),
                           np.max(label_locations[:, 0]), self.extent[1]),
                       min(np.min(vertices[:, 1]),
                           np.min(label_locations[:, 1]), self.extent[2]),
                       max(np.max(vertices[:, 1]),
                           np.max(label_locations[:, 1]), self.extent[3]))
        # Include both vertices _and_ label locations in the extents; there are
        # where either could determine the margins (e.g., arrow shoulders).

        # Add this diagram as a subdiagram.
        self.diagrams.append(
            Bunch(patch=patch,
                  flows=flows,
                  angles=angles,
                  tips=tips,
                  text=text,
                  texts=texts))

        # Allow a daisy-chained call structure (see docstring for the class).
        return self
Esempio n. 50
0
	tags = os.popen('git tag').read().split('-')[0].strip()
	versions = [[int(_) for _ in tag.split('.')]  for tag in tags.split('\n')
                    if tag.count('.') == 2 and tag[0].isdigit()]
	latest = sorted(versions)[-1]
	next = [
		'.'.join([str(_) for _ in (latest[0], latest[1], latest[2]+1)]),
		'.'.join([str(_) for _ in (latest[0], latest[1]+1, 0)]),
		'.'.join([str(_) for _ in (latest[0]+1, 0, 0)]),
	]

	print('valid versions are:', ', '.join(next))
	print('checking the CHANGELOG uses one of them')

	with open('CHANGELOG') as changelog:
		six.next(changelog)  # skip the word version on the first line
		for line in changelog:
			if 'version' in line.lower():
				version = line.split()[1]
				if version in next:
					break
				print('invalid new version in CHANGELOG')
				sys.exit(1)

	print('ok, next release is %s' % version)
	print('checking that this release is not already tagged')

	if version in tags.split('\n'):
		print('this tag was already released')
		sys.exit(1)
    def test_list_jobs_defaults(self):
        import six
        from google.cloud.bigquery.job import LoadTableFromStorageJob
        from google.cloud.bigquery.job import CopyJob
        from google.cloud.bigquery.job import ExtractTableToStorageJob
        from google.cloud.bigquery.job import QueryJob
        PROJECT = 'PROJECT'
        DATASET = 'test_dataset'
        SOURCE_TABLE = 'source_table'
        DESTINATION_TABLE = 'destination_table'
        QUERY_DESTINATION_TABLE = 'query_destination_table'
        SOURCE_URI = 'gs://test_bucket/src_object*'
        DESTINATION_URI = 'gs://test_bucket/dst_object*'
        JOB_TYPES = {
            'load_job': LoadTableFromStorageJob,
            'copy_job': CopyJob,
            'extract_job': ExtractTableToStorageJob,
            'query_job': QueryJob,
        }
        PATH = 'projects/%s/jobs' % PROJECT
        TOKEN = 'TOKEN'
        QUERY = 'SELECT * from test_dataset:test_table'
        ASYNC_QUERY_DATA = {
            'id': '%s:%s' % (PROJECT, 'query_job'),
            'jobReference': {
                'projectId': PROJECT,
                'jobId': 'query_job',
            },
            'state': 'DONE',
            'configuration': {
                'query': {
                    'query': QUERY,
                    'destinationTable': {
                        'projectId': PROJECT,
                        'datasetId': DATASET,
                        'tableId': QUERY_DESTINATION_TABLE,
                    },
                    'createDisposition': 'CREATE_IF_NEEDED',
                    'writeDisposition': 'WRITE_TRUNCATE',
                }
            },
        }
        EXTRACT_DATA = {
            'id': '%s:%s' % (PROJECT, 'extract_job'),
            'jobReference': {
                'projectId': PROJECT,
                'jobId': 'extract_job',
            },
            'state': 'DONE',
            'configuration': {
                'extract': {
                    'sourceTable': {
                        'projectId': PROJECT,
                        'datasetId': DATASET,
                        'tableId': SOURCE_TABLE,
                    },
                    'destinationUris': [DESTINATION_URI],
                }
            },
        }
        COPY_DATA = {
            'id': '%s:%s' % (PROJECT, 'copy_job'),
            'jobReference': {
                'projectId': PROJECT,
                'jobId': 'copy_job',
            },
            'state': 'DONE',
            'configuration': {
                'copy': {
                    'sourceTables': [{
                        'projectId': PROJECT,
                        'datasetId': DATASET,
                        'tableId': SOURCE_TABLE,
                    }],
                    'destinationTable': {
                        'projectId': PROJECT,
                        'datasetId': DATASET,
                        'tableId': DESTINATION_TABLE,
                    },
                }
            },
        }
        LOAD_DATA = {
            'id': '%s:%s' % (PROJECT, 'load_job'),
            'jobReference': {
                'projectId': PROJECT,
                'jobId': 'load_job',
            },
            'state': 'DONE',
            'configuration': {
                'load': {
                    'destinationTable': {
                        'projectId': PROJECT,
                        'datasetId': DATASET,
                        'tableId': SOURCE_TABLE,
                    },
                    'sourceUris': [SOURCE_URI],
                }
            },
        }
        DATA = {
            'nextPageToken': TOKEN,
            'jobs': [
                ASYNC_QUERY_DATA,
                EXTRACT_DATA,
                COPY_DATA,
                LOAD_DATA,
            ]
        }
        creds = _Credentials()
        client = self._makeOne(PROJECT, creds)
        conn = client.connection = _Connection(DATA)

        iterator = client.list_jobs()
        page = six.next(iterator.pages)
        jobs = list(page)
        token = iterator.next_page_token

        self.assertEqual(len(jobs), len(DATA['jobs']))
        for found, expected in zip(jobs, DATA['jobs']):
            name = expected['jobReference']['jobId']
            self.assertIsInstance(found, JOB_TYPES[name])
            self.assertEqual(found.name, name)
        self.assertEqual(token, TOKEN)

        self.assertEqual(len(conn._requested), 1)
        req = conn._requested[0]
        self.assertEqual(req['method'], 'GET')
        self.assertEqual(req['path'], '/%s' % PATH)
        self.assertEqual(req['query_params'], {'projection': 'full'})
Esempio n. 52
0
    def _fetch_single_page(table):
        import six

        iterator = table.fetch_data()
        page = six.next(iterator.pages)
        return list(page)
Esempio n. 53
0
 def _process_clients_input(self, clients_needing_prompt):
     for client in list(six.itervalues(self._server.clients)):
         if not client.active or not client.cmd_ready:
             continue
         client_name = client.addrport()
         client_cmd = client.get_command()
         client_cmd = client_cmd.strip()
         if not client.authed:
             if client_cmd != self.password:
                 client.deactivate()
             else:
                 client.authed = True
                 client.password_mode_off()
                 buf = six.StringIO()
                 buf.write("\n")
                 buf.write(self._make_welcome() + "\n")
                 buf.write(self._make_prompt())
                 client.send_cc(buf.getvalue())
             continue
         if client_cmd.lower() in ('quit', 'logout', 'exit', 'bye'):
             client.deactivate()
         elif client_cmd.lower() == 'ping':
             buf = six.StringIO()
             buf.write(_rainbow_colorize("pong"))
             buf.write("\n")
             client.send_cc(buf.getvalue())
             clients_needing_prompt.add(client_name)
         else:
             clients_needing_prompt.add(client_name)
             if client_cmd:
                 m_headers = {
                     message.VALIDATED_HEADER: True,
                     message.TO_ME_HEADER: True,
                     message.CHECK_AUTH_HEADER: False,
                 }
                 ts = str(six.next(self._ts_counter))
                 thread_ts = None
                 t_m = re.match(r"^@(\d+)\s+(.*)$", client_cmd)
                 if t_m:
                     thread_ts = t_m.group(1)
                     client_cmd = t_m.group(2)
                 # NOTE: Made to mostly look like a slack message body so
                 # that existing handlers don't care about
                 # the differences.
                 m_body = munch.Munch({
                     'text': client_cmd,
                     'ts': ts,
                     'thread_ts': thread_ts,
                     'text_no_links': client_cmd,
                     'user_id': client_name,
                     'user_name': client_name,
                     'channel': client_name,
                     'channel_id': client_name,
                     'quick_link': '',
                     'directed': True,
                 })
                 m_kind = "telnet/message"
                 m = TelnetMessage(m_kind, m_headers, m_body)
                 if thread_ts is None:
                     fut = self.bot.submit_message(m, c.TARGETED)
                 else:
                     fut = self.bot.submit_message(m, c.FOLLOWUP)
                 client.futs.append(fut)
                 buf = six.StringIO()
                 buf.write("Submitted thread %s" % _cook_message_ts(m))
                 buf.write("\n")
                 client.send_cc(buf.getvalue())
Esempio n. 54
0
    def getTokens(self, formula):
        def currentChar():
            return formula[offset]

        def doubleChar():
            return formula[offset:offset + 2]

        def nextChar():
            # JavaScript returns an empty string if the index is out of bounds,
            # Python throws an IndexError.  We mimic this behaviour here.
            try:
                formula[offset + 1]
            except IndexError:
                return ""
            else:
                return formula[offset + 1]

        def EOF():
            return offset >= len(formula)

        tokens = f_tokens()
        tokenStack = f_tokenStack()
        offset = 0
        token = ""
        inString = False
        inPath = False
        inRange = False
        inError = False

        while (len(formula) > 0):
            if (formula[0] in (" ", "\n")):
                formula = formula[1:]
            else:
                if (formula[0] == "="):
                    formula = formula[1:]
                break

        # state-dependent character evaluation (order is important)
        while not EOF():
            # double-quoted strings
            # embeds are doubled
            # end marks token
            if inString:
                if currentChar() == "\"":
                    if nextChar() == "\"":
                        token += "\""
                        offset += 1
                    else:
                        inString = False
                        tokens.add(token, self.TOK_TYPE_OPERAND,
                                   self.TOK_SUBTYPE_TEXT)
                        token = ""
                else:
                    token += currentChar()
                offset += 1
                continue

            # single-quoted strings (links)
            # embeds are double
            # end does not mark a token
            if inPath:
                if currentChar() == "'":
                    if nextChar() == "'":
                        token += "'"
                        offset += 1
                    else:
                        inPath = False
                else:
                    token += currentChar()
                offset += 1
                continue

            # bracketed strings (range offset or linked workbook name)
            # no embeds (changed to "()" by Excel)
            # end does not mark a token
            if inRange:
                if currentChar() == "]":
                    inRange = False
                token += currentChar()
                offset += 1
                continue

            # error values
            # end marks a token, determined from absolute list of values
            if inError:
                token += currentChar()
                offset += 1
                if ",#NULL!,#DIV/0!,#VALUE!,#REF!,#NAME?,#NUM!,#N/A,".find(
                        "," + token + ",") != -1:
                    inError = False
                    tokens.add(token, self.TOK_TYPE_OPERAND,
                               self.TOK_SUBTYPE_ERROR)
                    token = ""
                continue

            # scientific notation check
            regexSN = '^[1-9]{1}(\.[0-9]+)?[eE]{1}$'
            if (("+-").find(currentChar()) != -1):
                if len(token) > 1:
                    if re.match(regexSN, token):
                        token += currentChar()
                        offset += 1
                        continue

            # independent character evaulation (order not important)
            #
            # establish state-dependent character evaluations
            if currentChar() == "\"":
                if len(token) > 0:
                    # not expected
                    tokens.add(token, self.TOK_TYPE_UNKNOWN)
                    token = ""
                inString = True
                offset += 1
                continue

            if currentChar() == "'":
                if len(token) > 0:
                    # not expected
                    tokens.add(token, self.TOK_TYPE_UNKNOWN)
                    token = ""
                inPath = True
                offset += 1
                continue

            if (currentChar() == "["):
                inRange = True
                token += currentChar()
                offset += 1
                continue

            if (currentChar() == "#"):
                if (len(token) > 0):
                    # not expected
                    tokens.add(token, self.TOK_TYPE_UNKNOWN)
                    token = ""
                inError = True
                token += currentChar()
                offset += 1
                continue

            # mark start and end of arrays and array rows
            if (currentChar() == "{"):
                if (len(token) > 0):
                    # not expected
                    tokens.add(token, self.TOK_TYPE_UNKNOWN)
                    token = ""
                tokenStack.push(
                    tokens.add("ARRAY", self.TOK_TYPE_FUNCTION,
                               self.TOK_SUBTYPE_START))
                tokenStack.push(
                    tokens.add("ARRAYROW", self.TOK_TYPE_FUNCTION,
                               self.TOK_SUBTYPE_START))
                offset += 1
                continue

            if (currentChar() == ";"):
                if (len(token) > 0):
                    tokens.add(token, self.TOK_TYPE_OPERAND)
                    token = ""
                tokens.addRef(tokenStack.pop())
                tokens.add(",", self.TOK_TYPE_ARGUMENT)
                tokenStack.push(
                    tokens.add("ARRAYROW", self.TOK_TYPE_FUNCTION,
                               self.TOK_SUBTYPE_START))
                offset += 1

                continue

            if (currentChar() == "}"):
                if (len(token) > 0):
                    tokens.add(token, self.TOK_TYPE_OPERAND)
                    token = ""
                tokens.addRef(tokenStack.pop())
                tokens.addRef(tokenStack.pop())
                offset += 1
                continue

            # trim white-space
            if (currentChar() in (" ", "\n")):
                if (len(token) > 0):
                    tokens.add(token, self.TOK_TYPE_OPERAND)
                    token = ""
                tokens.add("", self.TOK_TYPE_WSPACE)
                offset += 1
                while ((currentChar() in (" ", "\n")) and (not EOF())):
                    offset += 1
                continue

            # multi-character comparators
            if (",>=,<=,<>,".find("," + doubleChar() + ",") != -1):
                if (len(token) > 0):
                    tokens.add(token, self.TOK_TYPE_OPERAND)
                    token = ""
                tokens.add(doubleChar(), self.TOK_TYPE_OP_IN,
                           self.TOK_SUBTYPE_LOGICAL)
                offset += 2
                continue

            # standard infix operators
            if (self.OPERATORS.find(currentChar()) != -1):
                if (len(token) > 0):
                    tokens.add(token, self.TOK_TYPE_OPERAND)
                    token = ""
                tokens.add(currentChar(), self.TOK_TYPE_OP_IN)
                offset += 1
                continue

            # standard postfix operators
            if ("%".find(currentChar()) != -1):
                if (len(token) > 0):
                    tokens.add(old_div(float(token), 100),
                               self.TOK_TYPE_OPERAND)
                    token = ""
                else:
                    tokens.add('*', self.TOK_TYPE_OP_IN)
                    tokens.add(0.01, self.TOK_TYPE_OPERAND)
                # tokens.add(currentChar(), self.TOK_TYPE_OP_POST)
                offset += 1
                continue

            # start subexpression or function
            if (currentChar() == "("):
                if (len(token) > 0):
                    tokenStack.push(
                        tokens.add(token, self.TOK_TYPE_FUNCTION,
                                   self.TOK_SUBTYPE_START))
                    token = ""
                else:
                    tokenStack.push(
                        tokens.add("", self.TOK_TYPE_SUBEXPR,
                                   self.TOK_SUBTYPE_START))
                offset += 1
                continue

            # function, subexpression, array parameters
            if (currentChar() == ","):
                if (len(token) > 0):
                    tokens.add(token, self.TOK_TYPE_OPERAND)
                    token = ""
                if (not (tokenStack.type() == self.TOK_TYPE_FUNCTION)):
                    tokens.add(currentChar(), self.TOK_TYPE_OP_IN,
                               self.TOK_SUBTYPE_UNION)
                else:
                    tokens.add(currentChar(), self.TOK_TYPE_ARGUMENT)
                offset += 1
                if (currentChar() == ","):
                    tokens.add('None', self.TOK_TYPE_OPERAND,
                               self.TOK_SUBTYPE_NONE)
                    token = ""
                continue

            # stop subexpression
            if (currentChar() == ")"):
                if (len(token) > 0):
                    tokens.add(token, self.TOK_TYPE_OPERAND)
                    token = ""
                tokens.addRef(tokenStack.pop())
                offset += 1
                continue

            # token accumulation
            token += currentChar()
            offset += 1

        # dump remaining accumulation
        if (len(token) > 0):
            tokens.add(token, self.TOK_TYPE_OPERAND)

        # move all tokens to a new collection, excluding all unnecessary white-space tokens
        tokens2 = f_tokens()

        while (tokens.moveNext()):
            token = tokens.current()

            if (token.ttype == self.TOK_TYPE_WSPACE):
                if ((tokens.BOF()) or (tokens.EOF())):
                    pass
                elif (not (
                    ((tokens.previous().ttype == self.TOK_TYPE_FUNCTION) and
                     (tokens.previous().tsubtype == self.TOK_SUBTYPE_STOP)) or
                    ((tokens.previous().ttype == self.TOK_TYPE_SUBEXPR) and
                     (tokens.previous().tsubtype == self.TOK_SUBTYPE_STOP)) or
                    (tokens.previous().ttype == self.TOK_TYPE_OPERAND))):
                    pass
                elif (not (
                    ((six.next(tokens).ttype == self.TOK_TYPE_FUNCTION) and
                     (tokens.next().tsubtype == self.TOK_SUBTYPE_START)) or
                    ((six.next(tokens).ttype == self.TOK_TYPE_SUBEXPR) and
                     (tokens.next().tsubtype == self.TOK_SUBTYPE_START)) or
                    (six.next(tokens).ttype == self.TOK_TYPE_OPERAND))):
                    pass
                else:
                    tokens2.add(token.tvalue, self.TOK_TYPE_OP_IN,
                                self.TOK_SUBTYPE_INTERSECT)
                continue

            tokens2.addRef(token)

        # switch infix "-" operator to prefix when appropriate, switch infix "+" operator to noop when appropriate, identify operand
        # and infix-operator subtypes, pull "@" from in front of function names
        while (tokens2.moveNext()):
            token = tokens2.current()
            if ((token.ttype == self.TOK_TYPE_OP_IN)
                    and (token.tvalue == "-")):
                if (tokens2.BOF()):
                    token.ttype = self.TOK_TYPE_OP_PRE
                elif (((tokens2.previous().ttype == self.TOK_TYPE_FUNCTION) and
                       (tokens2.previous().tsubtype == self.TOK_SUBTYPE_STOP))
                      or
                      ((tokens2.previous().ttype == self.TOK_TYPE_SUBEXPR) and
                       (tokens2.previous().tsubtype == self.TOK_SUBTYPE_STOP))
                      or (tokens2.previous().ttype == self.TOK_TYPE_OP_POST)
                      or (tokens2.previous().ttype == self.TOK_TYPE_OPERAND)):
                    token.tsubtype = self.TOK_SUBTYPE_MATH
                else:
                    token.ttype = self.TOK_TYPE_OP_PRE
                continue

            if ((token.ttype == self.TOK_TYPE_OP_IN)
                    and (token.tvalue == "+")):
                if (tokens2.BOF()):
                    token.ttype = self.TOK_TYPE_NOOP
                elif (((tokens2.previous().ttype == self.TOK_TYPE_FUNCTION) and
                       (tokens2.previous().tsubtype == self.TOK_SUBTYPE_STOP))
                      or
                      ((tokens2.previous().ttype == self.TOK_TYPE_SUBEXPR) and
                       (tokens2.previous().tsubtype == self.TOK_SUBTYPE_STOP))
                      or (tokens2.previous().ttype == self.TOK_TYPE_OP_POST)
                      or (tokens2.previous().ttype == self.TOK_TYPE_OPERAND)):
                    token.tsubtype = self.TOK_SUBTYPE_MATH
                else:
                    token.ttype = self.TOK_TYPE_NOOP
                continue

            if ((token.ttype == self.TOK_TYPE_OP_IN)
                    and (len(token.tsubtype) == 0)):
                if (("<>=").find(token.tvalue[0:1]) != -1):
                    token.tsubtype = self.TOK_SUBTYPE_LOGICAL
                elif (token.tvalue == "&"):
                    token.tsubtype = self.TOK_SUBTYPE_CONCAT
                else:
                    token.tsubtype = self.TOK_SUBTYPE_MATH
                continue

            if ((token.ttype == self.TOK_TYPE_OPERAND)
                    and (len(token.tsubtype) == 0)):
                try:
                    float(token.tvalue)
                except ValueError as e:
                    if ((token.tvalue == 'TRUE') or (token.tvalue == 'FALSE')):
                        token.tsubtype = self.TOK_SUBTYPE_LOGICAL
                    else:
                        token.tsubtype = self.TOK_SUBTYPE_RANGE
                else:
                    token.tsubtype = self.TOK_SUBTYPE_NUMBER
                continue

            if (token.ttype == self.TOK_TYPE_FUNCTION):
                if (token.tvalue[0:1] == "@"):
                    token.tvalue = token.tvalue[1:]
                continue

        tokens2.reset()

        # move all tokens to a new collection, excluding all noops
        tokens = f_tokens()
        while (tokens2.moveNext()):
            if (tokens2.current().ttype != self.TOK_TYPE_NOOP):
                tokens.addRef(tokens2.current())

        tokens.reset()
        return tokens
Esempio n. 55
0
def discriminator(x,
                  progress,
                  num_filters_fn,
                  resolution_schedule,
                  num_blocks=None,
                  kernel_size=3,
                  simple_arch=False,
                  scope='progressive_gan_discriminator',
                  reuse=None):
  """Discriminator network for the progressive GAN model.

  Args:
    x: A `Tensor`of NHWC format representing images of size `resolution`.
    progress: A scalar float `Tensor` of training progress.
    num_filters_fn: A function that maps `block_id` to # of filters for the
        block.
    resolution_schedule: An object of `ResolutionSchedule`.
    num_blocks: An integer of number of blocks. None means maximum number of
        blocks, i.e. `resolution.schedule.num_resolutions`. Defaults to None.
    kernel_size: An integer of convolution kernel size.
    simple_arch: Bool, use a simple architecture.
    scope: A string or variable scope.
    reuse: Whether to reuse `scope`. Defaults to None which means to inherit
        the reuse option of the parent scope.

  Returns:
    A `Tensor` of model output and a dictionary of model end points.
  """
  he_init = contrib_layers.variance_scaling_initializer()

  if num_blocks is None:
    num_blocks = resolution_schedule.num_resolutions

  def _conv2d(scope, x, kernel_size, filters, padding='SAME'):
    return layers.custom_conv2d(
        x=x,
        filters=filters,
        kernel_size=kernel_size,
        padding=padding,
        activation=tf.nn.leaky_relu,
        he_initializer_slope=0.0,
        scope=scope)

  def _from_rgb(x, block_id):
    return _conv2d('from_rgb', x, 1, num_filters_fn(block_id))

  if resolution_schedule.scale_mode == 'H':
    strides = (resolution_schedule.scale_base, 1)
  else:
    strides = (resolution_schedule.scale_base,
               resolution_schedule.scale_base)

  end_points = {}

  with tf.variable_scope(scope, reuse=reuse):
    x0 = x
    end_points['rgb'] = x0

    lods = []
    for block_id in range(num_blocks, 0, -1):
      with tf.variable_scope(block_name(block_id)):
        scale = resolution_schedule.scale_factor(block_id)
        lod = resolution_schedule.downscale(x0, scale)
        end_points['downscaled_rgb_{}'.format(block_id)] = lod
        if simple_arch:
          lod = tf.layers.conv2d(
              lod,
              num_filters_fn(block_id),
              kernel_size=1,
              padding='SAME',
              name='from_rgb',
              kernel_initializer=he_init)
          lod = tf.nn.relu(lod)
        else:
          lod = _from_rgb(lod, block_id)
        # alpha_i is used to replace lod_select.
        alpha = _discriminator_alpha(block_id, progress)
        end_points['alpha_{}'.format(block_id)] = alpha
      lods.append((lod, alpha))

    lods_iter = iter(lods)
    x, _ = six.next(lods_iter)
    for block_id in range(num_blocks, 1, -1):
      with tf.variable_scope(block_name(block_id)):
        if simple_arch:
          x = tf.layers.conv2d(
              x,
              num_filters_fn(block_id-1),
              strides=strides,
              kernel_size=kernel_size,
              padding='SAME',
              name='conv',
              kernel_initializer=he_init)
          x = tf.nn.relu(x)
        else:
          x = _conv2d('conv0', x, kernel_size, num_filters_fn(block_id))
          x = _conv2d('conv1', x, kernel_size, num_filters_fn(block_id - 1))
          x = resolution_schedule.downscale(x, resolution_schedule.scale_base)
        lod, alpha = six.next(lods_iter)
        x = alpha * lod + (1.0 - alpha) * x

    with tf.variable_scope(block_name(1)):
      x = layers.scalar_concat(x, layers.minibatch_mean_stddev(x))
      if simple_arch:
        x = tf.reshape(x, [tf.shape(x)[0], -1])  # flatten
        x = tf.layers.dense(x, num_filters_fn(0), name='last_conv',
                            kernel_initializer=he_init)
        x = tf.reshape(x, [tf.shape(x)[0], 1, 1, num_filters_fn(0)])
        x = tf.nn.relu(x)
      else:
        x = _conv2d('conv0', x, kernel_size, num_filters_fn(1))
        x = _conv2d('conv1', x, resolution_schedule.start_resolutions,
                    num_filters_fn(0), 'VALID')
      end_points['last_conv'] = x
      if simple_arch:
        logits = tf.layers.dense(x, 1, name='logits',
                                 kernel_initializer=he_init)
      else:
        logits = layers.custom_dense(x=x, units=1, scope='logits')
      end_points['logits'] = logits

  return logits, end_points
Esempio n. 56
0
    def test_list_subscriptions_with_paging(self):
        import six
        from google.cloud.pubsub.subscription import Subscription
        from google.cloud.pubsub.topic import Topic

        SUB_INFO = {'name': self.SUB_PATH, 'topic': self.TOPIC_PATH}
        creds = _make_credentials()
        client = self._make_one(project=self.PROJECT,
                                credentials=creds,
                                use_gax=False)

        # Set up the mock response.
        ACK_DEADLINE = 42
        PUSH_ENDPOINT = 'https://push.example.com/endpoint'
        SUB_INFO = {
            'name': self.SUB_PATH,
            'topic': self.TOPIC_PATH,
            'ackDeadlineSeconds': ACK_DEADLINE,
            'pushConfig': {
                'pushEndpoint': PUSH_ENDPOINT
            }
        }
        TOKEN1 = 'TOKEN1'
        TOKEN2 = 'TOKEN2'
        SIZE = 1
        returned = {
            'subscriptions': [SUB_INFO],
            'nextPageToken': TOKEN2,
        }
        client._connection = _Connection(returned)

        iterator = client.list_subscriptions(SIZE, TOKEN1)
        page = six.next(iterator.pages)
        subscriptions = list(page)
        next_page_token = iterator.next_page_token

        # Check the token returned.
        self.assertEqual(next_page_token, TOKEN2)
        # Check the subscription object returned.
        self.assertEqual(len(subscriptions), 1)
        subscription = subscriptions[0]
        self.assertIsInstance(subscription, Subscription)
        self.assertEqual(subscription.name, self.SUB_NAME)
        self.assertIsInstance(subscription.topic, Topic)
        self.assertEqual(subscription.topic.name, self.TOPIC_NAME)
        self.assertIs(subscription._client, client)
        self.assertEqual(subscription._project, self.PROJECT)
        self.assertEqual(subscription.ack_deadline, ACK_DEADLINE)
        self.assertEqual(subscription.push_endpoint, PUSH_ENDPOINT)

        called_with = client._connection._called_with
        expected_path = '/projects/%s/subscriptions' % (self.PROJECT, )
        self.assertEqual(
            called_with, {
                'method': 'GET',
                'path': expected_path,
                'query_params': {
                    'pageSize': SIZE,
                    'pageToken': TOKEN1,
                },
            })
Esempio n. 57
0
def movie_ratings(user_id):
    logging.debug("User %s rating requested for movie %s", user_id)
    # recomendacion_usuario = db.recomendaciones.find({}).sort({ "fecha" : -1 })
    # .limit(1)['normal']['colaborativo'][str(user_id)]
    obtener_recomendacion = \
        json.loads(dumps(db.recomendaciones.find().sort("fecha", -1).limit(1)))[
            0]
    recomendadores_generales = {}

    def obtener_recomendaciones(recomendador):
        try:
            if obtener_recomendacion[recomendador]:
                recomendadores_generales[recomendador] = obtener_recomendacion[
                    recomendador]
        except:
            pass

    obtener_recomendaciones('evento')
    obtener_recomendaciones('empresa')
    obtener_recomendaciones('experto')
    obtener_recomendaciones('proveedor')
    obtener_recomendaciones('frecuencia')
    obtener_recomendaciones('tradicional')

    item1 = obtener_recomendacion.get('colaborativo_item_als', False)
    item2 = obtener_recomendacion.get('colaborativo_item_cosine', False)
    item3 = obtener_recomendacion.get('colaborativo_item_bm25', False)
    usuario = obtener_recomendacion.get('colaborativo_usuario', False)

    if (item1 or item2 or item3 or usuario):

        def cargar_recomendaciones(recomendadores):
            for recomendador in recomendadores:
                try:
                    recomendadores_generales[recomendador] = \
                        obtener_recomendacion[recomendador].get(
                            str(user_id), {})
                except:
                    pass

        cargar_recomendaciones([
            'colaborativo_usuario', 'colaborativo_item_als',
            'colaborativo_item_cosine', 'colaborativo_item_bm25'
        ])

        item1r = recomendadores_generales.get('colaborativo_item_als', False)
        item2r = recomendadores_generales.get('colaborativo_item_cosine',
                                              False)
        item3r = recomendadores_generales.get('colaborativo_item_bm25', False)

        usuariosad = recomendadores_generales.get('colaborativo_usuario',
                                                  False)

        if not bool(item1r) and not bool(item2r) and not bool(item3r):
            recomendadores_generales['random'] = {}
            # obtener una recomendación según historial
            # obtener historial
            historial_usuario = historial_usuarios(user_id, formato=False)
            if bool(historial_usuario):
                # obtener el primer elemento del historial
                item_historial = historial_usuario[0]['item']
                recomendacion_item_raw = recommend_item(item_historial,
                                                        formato=False)
                rec_item = recomendacion_item_raw.get('distancia_item', {})
                if bool(rec_item):
                    recomendadores_generales['random'] = rec_item
            else:

                recomendadores_generales['random'] = six.next(
                    six.itervalues(
                        obtener_recomendacion['colaborativo_item_als']))
    return json.dumps(recomendadores_generales)
    def _fill_example_queue(self):
        """Reads data from file and processes into Examples which are then placed into the example queue."""

        input_gen = self.text_generator(
            data.example_generator(self._data_path, self._single_pass))
        cnt = 0
        fail = 0
        while True:
            try:
                # read the next example from file. article and abstract are
                # both strings.
                (article_id, article_text, abstract_sents, labels,
                 section_names, sections) = six.next(input_gen)
            except StopIteration:  # if there are no more examples:
                tf.logging.info(
                    "The example generator for this example queue filling thread has exhausted data."
                )
                if self._single_pass:
                    tf.logging.info(
                        "single_pass mode is on, so we've finished reading dataset. This thread is stopping."
                    )
                    self._finished_reading = True
                    break
                else:
                    raise Exception(
                        "single_pass mode is off but the example generator is out of data; error."
                    )

            # Use the <s> and </s> tags in abstract to get a list of sentences.
#       abstract_sentences = [sent.strip() for sent in data.abstract2sents(''.join(abstract_sents))]
            abstract_sentences = [
                e.replace(data.SENTENCE_START,
                          '').replace(data.SENTENCE_END, '').strip()
                for e in abstract_sents
            ]

            # at least 2 sections, some articles do not have sections
            if "_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _" in article_text:
                continue

            if len(sections) <= 1:
                continue

            if not sections or len(sections) == 0:
                continue
            # do not process that are too long
            if len(article_text) > self._hps.max_article_sents:
                continue

            # Do not process documents with unusually long or short abstracts
            abst_len = len(' '.join(abstract_sentences).split())
            if abst_len > self._hps.max_abstract_len or\
                    abst_len < self._hps.min_abstract_len:
                continue

            # Process into an Example.
            example = Example(article_text, abstract_sentences, article_id,
                              sections, section_names, labels, self._vocab,
                              self._hps)
            # place the Example in the example queue.
            if example.discard:
                fail += 1
            cnt += 1
            if example is not None and not example.discard:
                self._example_queue.put(example)
            if cnt % 100 == 0:
                print('total in queue: {} of {}'.format(cnt - fail, cnt))
Esempio n. 59
0
def decompress_file(archive, dir_, leading_directories='strip'):
    """Decompress `archive` into a directory `dir_`

    Parameters
    ----------
    archive: str
    dir_: str
    leading_directories: {'strip', None}
      If `strip`, and archive contains a single leading directory under which
      all content is stored, all the content will be moved one directory up
      and that leading directory will be removed.
    """
    if not exists(dir_):
        lgr.debug("Creating directory %s to extract archive into" % dir_)
        os.makedirs(dir_)

    with swallow_outputs() as cmo:
        archive = assure_bytes(archive)
        dir_ = assure_bytes(dir_)
        patoolib.util.check_existing_filename(archive)
        patoolib.util.check_existing_filename(dir_, onlyfiles=False)
        # Call protected one to avoid the checks on existence on unixified path
        outdir = unixify_path(dir_)
        if not PY2:
            # should be supplied in PY3 to avoid b''
            outdir = assure_unicode(outdir)
            archive = assure_unicode(archive)
        patoolib._extract_archive(unixify_path(archive),
                                  outdir=outdir,
                                  verbosity=100)
        if cmo.out:
            lgr.debug("patool gave stdout:\n%s" % cmo.out)
        if cmo.err:
            lgr.debug("patool gave stderr:\n%s" % cmo.err)

    # Note: (ben) Experienced issue, where extracted tarball
    # lacked execution bit of directories, leading to not being
    # able to delete them while having write permission.
    # Can't imagine a situation, where we would want to fail on
    # that kind of mess. So, to be sure set it.

    if not on_windows:
        os.chmod(dir_,
                 os.stat(dir_).st_mode |
                 os.path.stat.S_IEXEC)
        for root, dirs, files in os.walk(dir_, followlinks=False):
            for d in dirs:
                subdir = opj(root, d)
                os.chmod(subdir,
                         os.stat(subdir).st_mode |
                         os.path.stat.S_IEXEC)

    if leading_directories == 'strip':
        _, dirs, files = next(os.walk(dir_))
        if not len(files) and len(dirs) == 1:
            # move all the content under dirs[0] up 1 level
            widow_dir = opj(dir_, dirs[0])
            lgr.debug("Moving content within %s upstairs" % widow_dir)
            subdir, subdirs_, files_ = next(os.walk(opj(dir_, dirs[0])))
            for f in subdirs_ + files_:
                os.rename(opj(subdir, f), opj(dir_, f))
            rmdir(widow_dir)
    elif leading_directories is None:
        pass   # really do nothing
    else:
        raise NotImplementedError("Not supported %s" % leading_directories)
Esempio n. 60
0
 def _get_existing_device(self):
     device_mappings = helpers.parse_mappings(
         cfg.CONF.FDB.shared_physical_device_mappings, unique_keys=False)
     DEVICES = six.next(six.itervalues(device_mappings))
     return DEVICES[0]