def testBasic(self): with self.cached_session() as sess: reader = io_ops.WholeFileReader("test_reader") queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=()) queue.enqueue_many([["test://foo"]]).run() queue.close().run() key, value = self.evaluate(reader.read(queue)) self.assertEqual(key, compat.as_bytes("test://foo")) self.assertEqual(value, compat.as_bytes("AAAAAAAAAA"))
def t2estBasic(self): with self.test_session() as sess: reader = io_ops.WholeFileReader("test_reader") queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=()) queue.enqueue_many([["iptf://repo/root/tenAs"]]).run() queue.close().run() key, value = sess.run(reader.read(queue)) self.assertEqual(key, compat.as_bytes("iptf://repo/root/tenAs")) self.assertEqual(value, compat.as_bytes("AAAAAAAAAA"))
def testOneEpoch(self): reader = io_ops.WholeFileReader("test_reader") queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=()) self.evaluate(queue.enqueue_many([self._filenames])) self.evaluate(queue.close()) key, value = reader.read(queue) self._ExpectRead(key, value, 0) self._ExpectRead(key, value, 1) self._ExpectRead(key, value, 2) with self.assertRaisesOpError("is closed and has insufficient elements " "\\(requested 1, current size 0\\)"): self.evaluate([key, value])
def testInfiniteEpochs(self): reader = io_ops.WholeFileReader("test_reader") queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=()) enqueue = queue.enqueue_many([self._filenames]) key, value = reader.read(queue) self.evaluate(enqueue) self._ExpectRead(key, value, 0) self._ExpectRead(key, value, 1) self.evaluate(enqueue) self._ExpectRead(key, value, 2) self._ExpectRead(key, value, 0) self._ExpectRead(key, value, 1) self.evaluate(enqueue) self._ExpectRead(key, value, 2) self._ExpectRead(key, value, 0)
def testInfiniteEpochs(self): with self.cached_session() as sess: reader = io_ops.WholeFileReader("test_reader") queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=()) enqueue = queue.enqueue_many([self._filenames]) key, value = reader.read(queue) enqueue.run() self._ExpectRead(sess, key, value, 0) self._ExpectRead(sess, key, value, 1) enqueue.run() self._ExpectRead(sess, key, value, 2) self._ExpectRead(sess, key, value, 0) self._ExpectRead(sess, key, value, 1) enqueue.run() self._ExpectRead(sess, key, value, 2) self._ExpectRead(sess, key, value, 0)
def testReadWrite(self): with self.test_session() as sess: contents = "ASDASDASDASDASDAS" filename = "iptf://repo/root/foo" meta_filename = "iptf://meta/repo/root/foo" wf = io_ops.write_file(filename=constant_op.constant(filename), contents=constant_op.constant(contents)) reader = io_ops.WholeFileReader("test_reader") queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=()) queue.enqueue_many([[filename]]).run() queue.close().run() with sess.graph.control_dependencies([wf]): key, value = sess.run(reader.read(queue)) self.assertEqual(key, compat.as_bytes(filename)) self.assertEqual(value, compat.as_bytes(contents)) queue2 = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=()) queue2.enqueue_many([[meta_filename]]).run() queue2.close().run() key, value = sess.run(reader.read(queue2)) d = json.loads(compat.as_str(value)) ipfs_path = d["IpfsPath"] queue3 = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=()) queue3.enqueue_many([[ipfs_path]]).run() queue3.close().run() with sess.graph.control_dependencies([wf]): key, value = sess.run(reader.read(queue3)) self.assertEqual(key, compat.as_bytes(ipfs_path)) self.assertEqual(value, compat.as_bytes(contents)) with gfile.Open(meta_filename, "wb") as f: f.write(compat.as_bytes('{"command": "publish"}')) ipns_path = d["IpnsPath"] queue4 = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=()) queue4.enqueue_many([[ipns_path]]).run() queue4.close().run() with sess.graph.control_dependencies([wf]): key, value = sess.run(reader.read(queue4)) self.assertEqual(key, compat.as_bytes(ipns_path)) self.assertEqual(value, compat.as_bytes(contents))