예제 #1
0
def main():

    dataset_count = 10

    def create_dataset(i):
        return Dataset.range(4 * i, 4 * (i + 1))

    dataset = Dataset.range(dataset_count).map(create_dataset)

    for d in dataset:
        show_dataset(d)

    d = dataset.flat_map(lambda x: x)
    show_dataset(d)

    d = dataset.interleave(lambda x: x, cycle_length=2, block_length=3)
    show_dataset(d)

    # Repeat two datasets of different lengths and interleave them.
    a = Dataset.from_tensor_slices(np.arange(10)).repeat()
    b = Dataset.from_tensor_slices(100 + np.arange(17)).repeat()
    datasets = [a, b]
    n = len(datasets)
    c = Dataset.from_tensor_slices(datasets)
    d = c.interleave(lambda x: x, cycle_length=n).take(50)
    show_dataset(d)
예제 #2
0
    def prepare_train_generator(self):
        image_names = glob.glob(self.dir_name +
                                "/training_data/images/images/*.jpg")
        image_names.extend(
            glob.glob(self.dir_name + "/training_data/images/images/*.png"))
        image_names.extend(
            glob.glob(self.dir_name + "/training_data/images/images/*.bmp"))
        image_names.extend(
            glob.glob(self.dir_name + "/training_data/images/images/*.tif"))
        sample_img = cv2.imread(image_names[0])
        target_shape = (sample_img.shape[0], sample_img.shape[1])

        crop_generator = CropGenerator(self.dir_name, target_shape)

        #image_dataset = tf.data.Dataset.list_files(self.dir_name + '/training_data/images/images/*')
        total_dataset = Dataset.range(1, 8).interleave(
            lambda x: Dataset.from_generator(
                CropGenerator(self.dir_name, target_shape),
                output_types=(tf.float32, tf.float32)),
            cycle_length=8)
        total_dataset = total_dataset.shuffle(buffer_size=20)
        #total_dataset = total_dataset.cache("./data_cache.")
        total_dataset = total_dataset.repeat()
        total_dataset = total_dataset.prefetch(buffer_size=20)
        data_tf = total_dataset.make_one_shot_iterator().get_next()
        return data_tf, crop_generator()
예제 #3
0
 def create_dataset(i):
     return Dataset.range(4 * i, 4 * (i + 1))
예제 #4
0
def compose_datasets(datasets):
    n = len(datasets)
    return Dataset.range(n).map(lambda i: datasets[i])
예제 #5
0
import tensorflow as tf
from tensorflow.data import Dataset, Iterator

dataset_train = Dataset.range(10)
dataset_val = Dataset.range(90, 100)

iter_train_handle = dataset_train.make_one_shot_iterator().string_handle()
iter_val_handle = dataset_val.make_one_shot_iterator().string_handle()

handle = tf.placeholder(tf.string, shape=[])
iterator = Iterator.from_string_handle(handle, dataset_train.output_types,
                                       dataset_train.output_shapes)
next_batch = iterator.get_next()

with tf.train.MonitoredTrainingSession() as sess:
    handle_train, handle_val = sess.run([iter_train_handle, iter_val_handle])

    for step in range(10):
        print('train', sess.run(next_batch, feed_dict={handle: handle_train}))

        if step % 3 == 0:
            print('val', sess.run(next_batch, feed_dict={handle: handle_val}))
예제 #6
0
from tensorflow.data import Dataset
from numpy import array



num_words = 10000
words_per_example = 20
examples_per_batch = 100

words_per_batch = examples_per_batch * words_per_example

num_batches = num_words // words_per_batch
num_words =  num_batches * words_per_batch
num_examples = num_words // words_per_example

a = Dataset.range(num_words).batch(words_per_example)
batch_stride = num_batches

a = a.batch(batch_stride)
a = a.interleave(lambda a: Dataset.from_tensor_slices(a),
        cycle_length=examples_per_batch)

a = a.batch(examples_per_batch)
print(a.output_shapes)

n = a.make_one_shot_iterator().get_next()


print("-"*80)
with tf.Session() as sess:
    try:
예제 #7
0
파일: data.py 프로젝트: Danealor/denoiser
def extract_examples(audio, length, stride):
    indices = Dataset.range(
        0, tf.cast(tf.shape(audio)[0] - length + 1, dtype='int64'), stride)
    return indices.map(lambda i: audio[i:i + length, ...])