def test_large_minibatch(tmpdir): tmpfile = _write_data(tmpdir, MBDATA_DENSE_2) mb_source = MinibatchSource(CTFDeserializer(tmpfile, StreamDefs( features = StreamDef(field='S0', shape=1), labels = StreamDef(field='S1', shape=1))), randomization_window_in_chunks=0) features_si = mb_source.stream_info('features') labels_si = mb_source.stream_info('labels') mb = mb_source.next_minibatch(1000) features = mb[features_si] labels = mb[labels_si] # Actually, the minibatch spans over multiple sweeps, # not sure if this is an artificial situation, but # maybe instead of a boolean flag we should indicate # the largest sweep index the data was taken from. assert features.end_of_sweep assert labels.end_of_sweep assert features.num_samples == 1000 - 1000 % 7 assert labels.num_samples == 5 * (1000 // 7) assert mb[features_si].num_sequences == (1000 // 7) assert mb[labels_si].num_sequences == (1000 // 7)
def test_MinibatchData_and_Value_as_input(tmpdir): mbdata = r'''0 |S0 100''' tmpfile = str(tmpdir/'mbtest.txt') with open(tmpfile, 'w') as f: f.write(mbdata) defs = StreamDefs(f1 = StreamDef(field='S0', shape=1)) mb_source = MinibatchSource(CTFDeserializer(tmpfile, defs), randomize=False) f1_si = mb_source.stream_info('f1') mb = mb_source.next_minibatch(1) f1 = input_variable(shape=(1,), needs_gradient=True, name='f') res = f1 * 2 assert res.eval({f1: mb[f1_si]}) == [[200]] # Test MinibatchData assert res.eval(mb[f1_si]) == [[200]] # Test Value assert res.eval(mb[f1_si].data) == [[200]] # Test NumPy (converted back from MinibatchData) assert res.eval(mb[f1_si].value) == [[200]] # Test Value assert res.eval(mb[f1_si].data) == [[200]]
def test_base64_image_deserializer(tmpdir): import io, base64, uuid; from PIL import Image images, b64_images = [], [] np.random.seed(1) for i in range(10): data = np.random.randint(0, 2**8, (5,7,3)) image = Image.fromarray(data.astype('uint8'), "RGB") buf = io.BytesIO() image.save(buf, format='PNG') assert image.width == 7 and image.height == 5 b64_images.append(base64.b64encode(buf.getvalue())) images.append(np.array(image)) image_data = str(tmpdir / 'mbdata1.txt') seq_ids = [] uid = uuid.uuid1().int >> 64 with open(image_data, 'wb') as f: for i,data in enumerate(b64_images): seq_id = uid ^ i seq_id = str(seq_id).encode('ascii') seq_ids.append(seq_id) line = seq_id + b'\t' label = str(i).encode('ascii') line += label + b'\t' + data + b'\n' f.write(line) ctf_data = str(tmpdir / 'mbdata2.txt') with open(ctf_data, 'wb') as f: for i, sid in enumerate(seq_ids): line = sid + b'\t' + b'|index '+str(i).encode('ascii') + b'\n' f.write(line) transforms = [xforms.scale(width=7, height=5, channels=3)] b64_deserializer = Base64ImageDeserializer(image_data, StreamDefs( images=StreamDef(field='image', transforms=transforms), labels=StreamDef(field='label', shape=10))) ctf_deserializer = CTFDeserializer(ctf_data, StreamDefs(index=StreamDef(field='index', shape=1))) mb_source = MinibatchSource([ctf_deserializer, b64_deserializer]) assert isinstance(mb_source, MinibatchSource) for j in range(100): mb = mb_source.next_minibatch(10) index_stream = mb_source.streams['index'] index = mb[index_stream].asarray().flatten() image_stream = mb_source.streams['images'] results = mb[image_stream].asarray() for i in range(10): # original images are RBG, openCV produces BGR images, # reverse the last dimension of the original images bgrImage = images[int(index[i])][:,:,::-1] assert (bgrImage == results[i][0]).all()
def test_minibatch_defined_by_labels(tmpdir): input_dim = 1000 num_output_classes = 5 def assert_data(mb_source): features_si = mb_source.stream_info('features') labels_si = mb_source.stream_info('labels') mb = mb_source.next_minibatch(2) features = mb[features_si] # 2 samples, max seq len 4, 1000 dim assert features.shape == (2, 4, input_dim) assert features.end_of_sweep assert features.num_sequences == 2 assert features.num_samples == 7 assert features.is_sparse labels = mb[labels_si] # 2 samples, max seq len 1, 5 dim assert labels.shape == (2, 1, num_output_classes) assert labels.end_of_sweep assert labels.num_sequences == 2 assert labels.num_samples == 2 assert not labels.is_sparse label_data = labels.asarray() assert np.allclose(label_data, np.asarray([ [[1., 0., 0., 0., 0.]], [[0., 1., 0., 0., 0.]] ])) mb = mb_source.next_minibatch(3) features = mb[features_si] labels = mb[labels_si] assert features.num_samples == 10 assert labels.num_samples == 3 tmpfile = _write_data(tmpdir, MBDATA_SPARSE) mb_source = MinibatchSource(CTFDeserializer(tmpfile, StreamDefs( features=StreamDef(field='x', shape=input_dim, is_sparse=True), labels=StreamDef(field='y', shape=num_output_classes, is_sparse=False, defines_mb_size=True) )), randomize=False) assert_data(mb_source) tmpfile1 = _write_data(tmpdir, MBDATA_SPARSE1, '1') tmpfile2 = _write_data(tmpdir, MBDATA_SPARSE2, '2') combined_mb_source = MinibatchSource([ CTFDeserializer(tmpfile1, StreamDefs( features=StreamDef(field='x', shape=input_dim, is_sparse=True))), CTFDeserializer(tmpfile2, StreamDefs( labels=StreamDef(field='y', shape=num_output_classes, is_sparse=False, defines_mb_size=True) ))], randomize=False) assert_data(combined_mb_source)
def test_eval_sparse_dense(tmpdir, device_id): from cntk import Axis from cntk.io import MinibatchSource, CTFDeserializer, StreamDef, StreamDefs from cntk.ops import input, times input_vocab_dim = label_vocab_dim = 69 ctf_data = '''\ 0 |S0 3:1 |# <s> |S1 3:1 |# <s> 0 |S0 4:1 |# A |S1 32:1 |# ~AH 0 |S0 5:1 |# B |S1 36:1 |# ~B 0 |S0 4:1 |# A |S1 31:1 |# ~AE 0 |S0 7:1 |# D |S1 38:1 |# ~D 0 |S0 12:1 |# I |S1 47:1 |# ~IY 0 |S0 1:1 |# </s> |S1 1:1 |# </s> 2 |S0 60:1 |# <s> |S1 3:1 |# <s> 2 |S0 61:1 |# A |S1 32:1 |# ~AH ''' ctf_file = str(tmpdir / '2seqtest.txt') with open(ctf_file, 'w') as f: f.write(ctf_data) mbs = MinibatchSource(CTFDeserializer( ctf_file, StreamDefs(features=StreamDef(field='S0', shape=input_vocab_dim, is_sparse=True), labels=StreamDef(field='S1', shape=label_vocab_dim, is_sparse=True))), randomize=False, max_samples=2) raw_input = sequence.input(shape=input_vocab_dim, sequence_axis=Axis('inputAxis'), name='raw_input', is_sparse=True) mb_valid = mbs.next_minibatch(minibatch_size_in_samples=100, input_map={raw_input: mbs.streams.features}, device=cntk_device(device_id)) z = times(raw_input, np.eye(input_vocab_dim)) e_reader = z.eval(mb_valid, device=cntk_device(device_id)) # CSR with the raw_input encoding in ctf_data one_hot_data = [[3, 4, 5, 4, 7, 12, 1], [60, 61]] data = [ csr(np.eye(input_vocab_dim, dtype=np.float32)[d]) for d in one_hot_data ] e_csr = z.eval({raw_input: data}, device=cntk_device(device_id)) assert np.all([np.allclose(a, b) for a, b in zip(e_reader, e_csr)]) # One-hot with the raw_input encoding in ctf_data data = Value.one_hot(one_hot_data, num_classes=input_vocab_dim, device=cntk_device(device_id)) e_hot = z.eval({raw_input: data}, device=cntk_device(device_id)) assert np.all([np.allclose(a, b) for a, b in zip(e_reader, e_hot)])
def test_htk_deserializers(): mbsize = 640 epoch_size = 1000 * mbsize lr = [0.001] feature_dim = 33 num_classes = 132 context = 2 os.chdir(data_path) features_file = "glob_0000.scp" labels_file = "glob_0000.mlf" label_mapping_file = "state.list" fd = HTKFeatureDeserializer( StreamDefs(amazing_features=StreamDef( shape=feature_dim, context=(context, context), scp=features_file))) ld = HTKMLFDeserializer( label_mapping_file, StreamDefs( awesome_labels=StreamDef(shape=num_classes, mlf=labels_file))) reader = MinibatchSource([fd, ld]) features = C.input_variable(((2 * context + 1) * feature_dim)) labels = C.input_variable((num_classes)) model = Sequential( [For(range(3), lambda: Recurrence(LSTM(256))), Dense(num_classes)]) z = model(features) ce = C.cross_entropy_with_softmax(z, labels) errs = C.classification_error(z, labels) learner = C.adam_sgd(z.parameters, lr=C.learning_rate_schedule(lr, C.UnitType.sample, epoch_size), momentum=C.momentum_as_time_constant_schedule(1000), low_memory=True, gradient_clipping_threshold_per_sample=15, gradient_clipping_with_truncation=True) trainer = C.Trainer(z, (ce, errs), learner) input_map = { features: reader.streams.amazing_features, labels: reader.streams.awesome_labels } pp = C.ProgressPrinter(freq=0) # just run and verify it doesn't crash for i in range(3): mb_data = reader.next_minibatch(mbsize, input_map=input_map) trainer.train_minibatch(mb_data) pp.update_with_trainer(trainer, with_metric=True) assert True os.chdir(abs_path)
def test_eval_sparse_dense(tmpdir, device_id): from cntk import Axis from cntk.io import MinibatchSource, CTFDeserializer, StreamDef, StreamDefs from cntk.device import cpu, gpu, set_default_device from cntk.ops import input_variable, times from scipy.sparse import csr_matrix input_vocab_dim = label_vocab_dim = 69 ctf_data = '''\ 0 |S0 3:1 |# <s> |S1 3:1 |# <s> 0 |S0 4:1 |# A |S1 32:1 |# ~AH 0 |S0 5:1 |# B |S1 36:1 |# ~B 0 |S0 4:1 |# A |S1 31:1 |# ~AE 0 |S0 7:1 |# D |S1 38:1 |# ~D 0 |S0 12:1 |# I |S1 47:1 |# ~IY 0 |S0 1:1 |# </s> |S1 1:1 |# </s> 2 |S0 60:1 |# <s> |S1 3:1 |# <s> 2 |S0 61:1 |# A |S1 32:1 |# ~AH ''' ctf_file = str(tmpdir/'2seqtest.txt') with open(ctf_file, 'w') as f: f.write(ctf_data) mbs = MinibatchSource(CTFDeserializer(ctf_file, StreamDefs( features = StreamDef(field='S0', shape=input_vocab_dim, is_sparse=True), labels = StreamDef(field='S1', shape=label_vocab_dim, is_sparse=True) )), randomize=False, epoch_size = 2) batch_axis = Axis.default_batch_axis() input_seq_axis = Axis('inputAxis') label_seq_axis = Axis('labelAxis') input_dynamic_axes = [batch_axis, input_seq_axis] raw_input = input_variable( shape=input_vocab_dim, dynamic_axes=input_dynamic_axes, name='raw_input', is_sparse=True) mb_valid = mbs.next_minibatch(minibatch_size_in_samples=100, input_map={raw_input : mbs.streams.features}) z = times(raw_input, np.eye(input_vocab_dim)) e_reader = z.eval(mb_valid) # CSR with the raw_input encoding in ctf_data one_hot_data = [ [3, 4, 5, 4, 7, 12, 1], [60, 61] ] data = [csr_matrix(np.eye(input_vocab_dim, dtype=np.float32)[d]) for d in one_hot_data] e_csr = z.eval({raw_input: data}, device=cntk_device(device_id)) assert np.all([np.allclose(a, b) for a,b in zip(e_reader, e_csr)]) # One-hot with the raw_input encoding in ctf_data data = one_hot(one_hot_data, num_classes=input_vocab_dim) e_hot = z.eval({raw_input: data}, device=cntk_device(device_id)) assert np.all([np.allclose(a, b) for a,b in zip(e_reader, e_hot)])
def test_minibatch(tmpdir): mbdata = r'''0 |S0 0 |S1 0 0 |S0 1 |S1 1 0 |S0 2 0 |S0 3 |S1 3 1 |S0 4 1 |S0 5 |S1 1 1 |S0 6 |S1 2 ''' tmpfile = str(tmpdir/'mbtest.txt') with open(tmpfile, 'w') as f: f.write(mbdata) from cntk.io import CTFDeserializer, MinibatchSource, StreamDef, StreamDefs mb_source = MinibatchSource(CTFDeserializer(tmpfile, StreamDefs( features = StreamDef(field='S0', shape=1), labels = StreamDef(field='S1', shape=1)))) features_si = mb_source.stream_info('features') labels_si = mb_source.stream_info('labels') mb = mb_source.next_minibatch(1000) assert mb[features_si].num_sequences == 2 assert mb[labels_si].num_sequences == 2 features = mb[features_si] assert len(features.value) == 2 expected_features = \ [ [[0],[1],[2],[3]], [[4],[5],[6]] ] for res, exp in zip (features.value, expected_features): assert np.allclose(res, exp) assert np.allclose(features.mask, [[2, 1, 1, 1], [2, 1, 1, 0]]) labels = mb[labels_si] assert len(labels.value) == 2 expected_labels = \ [ [[0],[1],[3]], [[1],[2]] ] for res, exp in zip (labels.value, expected_labels): assert np.allclose(res, exp) assert np.allclose(labels.mask, [[2, 1, 1], [2, 1, 0]])
def test_user_deserializer_sequence_mode(): import scipy.sparse as sp streams = [StreamInformation('x', 0, 'dense', np.float32, (2, 3)), StreamInformation('y', 1, 'sparse', np.float32, (3,))] def run_minibatch_source(minibatch_source, num_chunks, num_sequences_per_value): sequence_x_values = np.zeros(num_chunks, dtype=np.int32) sequence_y_values = np.zeros(num_chunks, dtype=np.int32) mb_count = 0 while True: if mb_count % 10 == 1: # perform checkpointing checkpoint_state = minibatch_source.get_checkpoint_state() for i in range(3): minibatch_source.next_minibatch(20) minibatch_source.restore_from_checkpoint(checkpoint_state) mb_count +=1 continue mb = minibatch_source.next_minibatch(20) mb_count += 1 if not mb: break for sequence in mb[minibatch_source.streams.x].asarray(): sequence_x_values[int(sequence[0][0][0])] +=1 for sequence in mb[minibatch_source.streams.y].as_sequences(C.sequence.input_variable((3,), True)): sequence_y_values[int(sequence.toarray()[0][0])] += 1 mb = None expected_values = np.full(num_chunks, fill_value=num_sequences_per_value, dtype=np.int32) assert (sequence_x_values == expected_values).all() assert (sequence_y_values == expected_values).all() # Big chunks d = GenDeserializer(stream_infos=streams, num_chunks=15, num_sequences=100, max_sequence_len=10) mbs = MinibatchSource([d], randomize=False, max_sweeps=2) state = mbs.get_checkpoint_state() mbs.restore_from_checkpoint(state) run_minibatch_source(mbs, num_chunks=15, num_sequences_per_value=200) # Randomized mbs = MinibatchSource([d], randomize=True, max_sweeps=2, randomization_window_in_chunks=5) state = mbs.get_checkpoint_state() mbs.restore_from_checkpoint(state) run_minibatch_source(mbs, num_chunks=15, num_sequences_per_value=200) # Small chunks of 1 d = GenDeserializer(stream_infos=streams, num_chunks=15, num_sequences=1, max_sequence_len=10) mbs = MinibatchSource([d], randomize=False, max_sweeps=3) run_minibatch_source(mbs, num_chunks=15, num_sequences_per_value=3) # Randomized mbs = MinibatchSource([d], randomize=True, max_sweeps=3, randomization_window_in_chunks=5) run_minibatch_source(mbs, num_chunks=15, num_sequences_per_value=3)
def train(): global sentences, vocabulary, reverse_vocabulary # function will create the trainer and train it for specified number of epochs # Print loss 50 times while training print_freqency = 50 pp = ProgressPrinter(print_freqency) # get the trainer word_one_hot, context_one_hots, negative_one_hots, targets, trainer, word_negative_context_product, embedding_layer = create_trainer() # Create a CTF reader which reads the sparse inputs print("reader started") reader = CTFDeserializer(G.CTF_input_file) reader.map_input(G.word_input_field, dim=G.embedding_vocab_size, format="sparse") # context inputs for i in range(context_size): reader.map_input(G.context_input_field.format(i), dim=G.embedding_vocab_size, format="sparse") # negative inputs for i in range(G.negative): reader.map_input(G.negative_input_field.format(i), dim=G.embedding_vocab_size, format="sparse") # targets reader.map_input(G.target_input_field, dim=(G.negative + 1), format="dense") print("reader done") # Get minibatch source from reader is_training = True minibatch_source = MinibatchSource(reader, randomize=is_training, epoch_size=INFINITELY_REPEAT if is_training else FULL_DATA_SWEEP) minibatch_source.streams[targets] = minibatch_source.streams[G.target_input_field] del minibatch_source.streams[G.target_input_field] print("minibatch source done") total_minibatches = total_training_instances // G.minibatch_size print("traning started") print("Total minibatches to train =", total_minibatches) for i in range(total_minibatches): # Collect minibatch # start_batch_collection = time.time() mb = minibatch_source.next_minibatch(G.minibatch_size, input_map=minibatch_source.streams) # end_batch_collection = time.time() # print("Batch collection time = %.6fsecs" % (end_batch_collection - start_batch_collection)) # print("Time taken to collect one training_instance = %.6fsecs" % ((end_batch_collection - start_batch_collection)/G.minibatch_size)) # Train minibatch # start_train = time.time() trainer.train_minibatch(mb) # end_train = time.time() # print("minibatch train time = %.6fsecs" % (end_train - start_train)) # print("Time per training instance = %.6fsecs" % ((end_train - start_train)/G.minibatch_size)) # Update progress printer pp.update_with_trainer(trainer) # start_batch_collection = time.time() print("Total training instances =", total_training_instances) return word_negative_context_product
def test_user_deserializer_sample_mode(): import scipy.sparse as sp streams = [StreamInformation('x', 0, 'dense', np.float32, (2, 3)), StreamInformation('y', 1, 'sparse', np.float32, (1, 3))] def run_minibatch_source(minibatch_source, num_chunks, num_samples_per_value): sample_x_values = np.zeros(num_chunks, dtype=np.int32) sample_y_values = np.zeros(num_chunks, dtype=np.int32) mb_count = 0 while True: if mb_count % 10 == 1: # perform checkpointing checkpoint_state = minibatch_source.get_checkpoint_state() for i in range(3): minibatch_source.next_minibatch(20) minibatch_source.restore_from_checkpoint(checkpoint_state) mb_count +=1 continue mb = minibatch_source.next_minibatch(20) mb_count += 1 if not mb: break for sequence in mb[minibatch_source.streams.x].asarray(): for sample in sequence: value = int(sample[0][0]) sample_x_values[value] += 1 for sequence in mb[minibatch_source.streams.y].asarray(): for sample in sequence: value = int(sample[0][0]) sample_y_values[value] += 1 mb = None expected_values = np.full(num_chunks, fill_value=num_samples_per_value, dtype=np.int32) assert (sample_x_values == expected_values).all() assert (sample_y_values == expected_values).all() # Big chunks d = GenDeserializer(stream_infos=streams, num_chunks=20, num_sequences=100) mbs = MinibatchSource([d], randomize=False, max_sweeps=2) run_minibatch_source(mbs, num_chunks=20, num_samples_per_value=200) # Randomized mbs = MinibatchSource([d], randomize=True, max_sweeps=2, randomization_window_in_chunks=5) run_minibatch_source(mbs, num_chunks=20, num_samples_per_value=200) # Small chunks of 1 d = GenDeserializer(stream_infos=streams, num_chunks=20, num_sequences=1) mbs = MinibatchSource([d], randomize=False, max_sweeps=3) run_minibatch_source(mbs, num_chunks=20, num_samples_per_value=3) # Randomized mbs = MinibatchSource([d], randomize=True, max_sweeps=3, randomization_window_in_chunks=5) run_minibatch_source(mbs, num_chunks=20, num_samples_per_value=3)
def test_max_samples(tmpdir): mb_source = MinibatchSource(create_ctf_deserializer(tmpdir), max_samples=1) input_map = {'features': mb_source['features']} mb = mb_source.next_minibatch(10, input_map) assert 'features' in mb assert mb['features'].num_samples == 1 assert not mb['features'].end_of_sweep mb = mb_source.next_minibatch(10, input_map) assert not mb
def test_text_format(tmpdir): from cntk.io import CTFDeserializer, MinibatchSource, StreamDef, StreamDefs mbdata = r'''0 |x 560:1 |y 1 0 0 0 0 0 |x 0:1 0 |x 0:1 1 |x 560:1 |y 0 1 0 0 0 1 |x 0:1 1 |x 0:1 1 |x 424:1 ''' tmpfile = str(tmpdir/'mbdata.txt') with open(tmpfile, 'w') as f: f.write(mbdata) input_dim = 1000 num_output_classes = 5 mb_source = MinibatchSource(CTFDeserializer(tmpfile, StreamDefs( features = StreamDef(field='x', shape=input_dim, is_sparse=True), labels = StreamDef(field='y', shape=num_output_classes, is_sparse=False) ))) assert isinstance(mb_source, MinibatchSource) features_si = mb_source.stream_info('features') labels_si = mb_source.stream_info('labels') mb = mb_source.next_minibatch(7) features = mb[features_si] # 2 samples, max seq len 4, 1000 dim assert features.shape == (2, 4, input_dim) assert features.is_sparse # TODO features is sparse and cannot be accessed right now: # *** RuntimeError: DataBuffer/WritableDataBuffer methods can only be called for NDArrayiew objects with dense storage format # 2 samples, max seq len 4, 1000 dim #assert features.data().shape().dimensions() == (2, 4, input_dim) #assert features.data().is_sparse() labels = mb[labels_si] # 2 samples, max seq len 1, 5 dim assert labels.shape == (2, 1, num_output_classes) assert not labels.is_sparse label_data = np.asarray(labels) assert np.allclose(label_data, np.asarray([ [[ 1., 0., 0., 0., 0.]], [[ 0., 1., 0., 0., 0.]] ]))
def test_max_samples(tmpdir): mb_source = MinibatchSource( create_ctf_deserializer(tmpdir), max_samples=1) input_map = {'features': mb_source['features']} mb = mb_source.next_minibatch(10, input_map) assert 'features' in mb assert mb['features'].num_samples == 1 assert not mb['features'].end_of_sweep mb = mb_source.next_minibatch(10, input_map) assert not mb
def test_htk_deserializers(): mbsize = 640 epoch_size = 1000 * mbsize lr = [0.001] feature_dim = 33 num_classes = 132 context = 2 os.chdir(data_path) features_file = "glob_0000.scp" labels_file = "glob_0000.mlf" label_mapping_file = "state.list" fd = HTKFeatureDeserializer(StreamDefs( amazing_features = StreamDef(shape=feature_dim, context=(context,context), scp=features_file))) ld = HTKMLFDeserializer(label_mapping_file, StreamDefs( awesome_labels = StreamDef(shape=num_classes, mlf=labels_file))) reader = MinibatchSource([fd,ld]) features = C.input_variable(((2*context+1)*feature_dim)) labels = C.input_variable((num_classes)) model = Sequential([For(range(3), lambda : Recurrence(LSTM(256))), Dense(num_classes)]) z = model(features) ce = C.cross_entropy_with_softmax(z, labels) errs = C.classification_error (z, labels) learner = C.adam_sgd(z.parameters, lr=C.learning_rate_schedule(lr, C.UnitType.sample, epoch_size), momentum=C.momentum_as_time_constant_schedule(1000), low_memory=True, gradient_clipping_threshold_per_sample=15, gradient_clipping_with_truncation=True) trainer = C.Trainer(z, (ce, errs), learner) input_map={ features: reader.streams.amazing_features, labels: reader.streams.awesome_labels } pp = C.ProgressPrinter(freq=0) # just run and verify it doesn't crash for i in range(3): mb_data = reader.next_minibatch(mbsize, input_map=input_map) trainer.train_minibatch(mb_data) pp.update_with_trainer(trainer, with_metric=True) assert True os.chdir(abs_path)
def test_prefetch_with_unpacking(tmpdir): data = r'''0 |S0 1 1 1 1 |S1 1000 1 |S0 2 2 2 2 |S1 100 2 |S0 3 3 3 3 |S1 100 3 |S0 1 1 1 1 |S1 10 4 |S0 2 2 2 2 |S1 1 5 |S0 3 3 3 3 |S1 2000 6 |S0 1 1 1 1 |S1 200 7 |S0 2 2 2 2 |S1 200 8 |S0 3 3 3 3 |S1 20 9 |S0 1 1 1 1 |S1 2 ''' import time tmpfile = _write_data(tmpdir, data) input_dim = 4 num_output_classes = 1 mb_source = MinibatchSource(CTFDeserializer( tmpfile, StreamDefs(features=StreamDef(field='S0', shape=input_dim, is_sparse=False), labels=StreamDef(field='S1', shape=num_output_classes, is_sparse=False))), randomize=False, max_samples=FULL_DATA_SWEEP) input_map = { 'S0': mb_source.streams.features, 'S1': mb_source.streams.labels } empty = False mb_size = 3 # On the last minibatch there will be resize called, # due to 10%3 = 1 sample in the minibatch while not empty: mb = mb_source.next_minibatch(mb_size, input_map=input_map) time.sleep(1) # make sure the prefetch kicks in if mb: # Force unpacking to check that we do # not break prefetch actual_size = mb['S0'].shape[0] assert (mb['S0'].asarray() == np.array( [[[1, 1, 1, 1]], [[2, 2, 2, 2]], [[3, 3, 3, 3]]], dtype=np.float32)[0:actual_size]).all() else: empty = True
def test_text_format(tmpdir): tmpfile = _write_data(tmpdir, MBDATA_SPARSE) input_dim = 1000 num_output_classes = 5 mb_source = MinibatchSource(CTFDeserializer(tmpfile, StreamDefs( features=StreamDef(field='x', shape=input_dim, is_sparse=True), labels=StreamDef(field='y', shape=num_output_classes, is_sparse=False) )), randomize=False) assert isinstance(mb_source, MinibatchSource) features_si = mb_source.stream_info('features') labels_si = mb_source.stream_info('labels') mb = mb_source.next_minibatch(7) features = mb[features_si] # 2 samples, max seq len 4, 1000 dim assert features.shape == (2, 4, input_dim) assert features.end_of_sweep assert features.num_sequences == 2 assert features.num_samples == 7 assert features.is_sparse labels = mb[labels_si] # 2 samples, max seq len 1, 5 dim assert labels.shape == (2, 1, num_output_classes) assert labels.end_of_sweep assert labels.num_sequences == 2 assert labels.num_samples == 2 assert not labels.is_sparse label_data = labels.asarray() assert np.allclose(label_data, np.asarray([ [[1., 0., 0., 0., 0.]], [[0., 1., 0., 0., 0.]] ])) mb = mb_source.next_minibatch(1) features = mb[features_si] labels = mb[labels_si] assert not features.end_of_sweep assert not labels.end_of_sweep assert features.num_samples < 7 assert labels.num_samples == 1
def test_base64_is_equal_image(tmpdir): import io, base64 from PIL import Image np.random.seed(1) file_mapping_path = str(tmpdir / 'file_mapping.txt') base64_mapping_path = str(tmpdir / 'base64_mapping.txt') with open(file_mapping_path, 'w') as file_mapping: with open(base64_mapping_path, 'w') as base64_mapping: for i in range(10): data = np.random.randint(0, 2**8, (5, 7, 3)) image = Image.fromarray(data.astype('uint8'), "RGB") buf = io.BytesIO() image.save(buf, format='PNG') assert image.width == 7 and image.height == 5 label = str(i) # save to base 64 mapping file encoded = base64.b64encode(buf.getvalue()).decode('ascii') base64_mapping.write('%s\t%s\n' % (label, encoded)) # save to mapping + png file file_name = label + '.png' with open(str(tmpdir / file_name), 'wb') as f: f.write(buf.getvalue()) file_mapping.write('.../%s\t%s\n' % (file_name, label)) transforms = [xforms.scale(width=7, height=5, channels=3)] b64_deserializer = Base64ImageDeserializer( base64_mapping_path, StreamDefs(images1=StreamDef(field='image', transforms=transforms), labels1=StreamDef(field='label', shape=10))) file_image_deserializer = ImageDeserializer( file_mapping_path, StreamDefs(images2=StreamDef(field='image', transforms=transforms), labels2=StreamDef(field='label', shape=10))) mb_source = MinibatchSource([b64_deserializer, file_image_deserializer]) for j in range(20): mb = mb_source.next_minibatch(1) images1_stream = mb_source.streams['images1'] images1 = mb[images1_stream].asarray() images2_stream = mb_source.streams['images2'] images2 = mb[images2_stream].asarray() assert (images1 == images2).all()
def test_crop_dimensionality(tmpdir): import io; from PIL import Image np.random.seed(1) file_mapping_path = str(tmpdir / 'file_mapping.txt') with open(file_mapping_path, 'w') as file_mapping: for i in range(5): data = np.random.randint(0, 2**8, (20, 40, 3)) image = Image.fromarray(data.astype('uint8'), "RGB") buf = io.BytesIO() image.save(buf, format='PNG') assert image.width == 40 and image.height == 20 label = str(i) # save to mapping + png file file_name = label + '.png' with open(str(tmpdir/file_name), 'wb') as f: f.write(buf.getvalue()) file_mapping.write('.../%s\t%s\n' % (file_name, label)) transforms1 = [ xforms.scale(width=40, height=20, channels=3), xforms.crop(crop_type='randomside', crop_size=(20, 10), side_ratio=(0.2, 0.5), jitter_type='uniratio')] transforms2 = [ xforms.crop(crop_type='randomside', crop_size=(20, 10), side_ratio=(0.2, 0.5), jitter_type='uniratio')] d1 = ImageDeserializer(file_mapping_path, StreamDefs( images1=StreamDef(field='image', transforms=transforms1), labels1=StreamDef(field='label', shape=10))) d2 = ImageDeserializer(file_mapping_path, StreamDefs( images2=StreamDef(field='image', transforms=transforms2), labels2=StreamDef(field='label', shape=10))) mbs = MinibatchSource([d1, d2]) for j in range(5): mb = mbs.next_minibatch(1) images1 = mb[mbs.streams.images1].asarray() images2 = mb[mbs.streams.images2].asarray() assert images1.shape == (1, 1, 3, 10, 20) assert (images1 == images2).all()
def create_reader(path, vocab_dim, entity_dim, randomize, rand_size=DEFAULT_RANDOMIZATION_WINDOW, size=INFINITELY_REPEAT): """ Create data reader for the model Args: path: The data path vocab_dim: The dimention of the vocabulary entity_dim: The dimention of entities randomize: Where to shuffle the data before feed into the trainer """ return MinibatchSource(CTFDeserializer( path, StreamDefs(context=StreamDef(field='C', shape=vocab_dim, is_sparse=True), query=StreamDef(field='Q', shape=vocab_dim, is_sparse=True), entities=StreamDef(field='E', shape=1, is_sparse=False), label=StreamDef(field='L', shape=1, is_sparse=False), entity_ids=StreamDef(field='EID', shape=entity_dim, is_sparse=True))), randomize=randomize)
def create_reader(map_file, mean_file, train, distributed_communicator=None): if not os.path.exists(map_file) or not os.path.exists(mean_file): cifar_py3 = "" if sys.version_info.major < 3 else "_py3" raise RuntimeError( "File '%s' or '%s' does not exist. Please run CifarDownload%s.py and CifarConverter%s.py from CIFAR-10 to fetch them" % (map_file, mean_file, cifar_py3, cifar_py3)) # transformation pipeline for the features has jitter/crop only when training transforms = [] if train: transforms += [ ImageDeserializer.crop(crop_type='Random', ratio=0.8, jitter_type='uniRatio') # train uses jitter ] transforms += [ ImageDeserializer.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear'), ImageDeserializer.mean(mean_file) ] # deserializer return MinibatchSource( ImageDeserializer( map_file, StreamDefs( features=StreamDef( field='image', transforms=transforms ), # first column in map file is referred to as 'image' labels=StreamDef(field='label', shape=num_classes))), # and second as 'label' distributed_communicator=distributed_communicator)
def create_mb_source(image_height, image_width, num_channels, map_file): transforms = [ImageDeserializer.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear')] image_source = ImageDeserializer(map_file) image_source.ignore_labels() image_source.map_features('features', transforms) return MinibatchSource(image_source, randomize=False)
def create_reader(map_file, mean_file, train, image_height=64, image_width=64, num_channels=3, num_classes=32): # transformation pipeline for the features has jitter/crop only when training # https://docs.microsoft.com/en-us/python/api/cntk.io.transforms?view=cntk-py-2.2 trs = [] if train: trs += [ transforms.crop(crop_type='randomside', side_ratio=0, jitter_type='none') # Horizontal flip enabled ] trs += [ transforms.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear'), transforms.mean(mean_file) ] # deserializer image_source = ImageDeserializer( map_file, StreamDefs( features=StreamDef( field='image', transforms=trs ), # first column in map file is referred to as 'image' labels=StreamDef(field='label', shape=num_classes) # and second as 'label' )) return MinibatchSource(image_source)
def create_mb_source(data_set, img_height, img_width, n_classes, n_rois, data_path, randomize): # set paths map_file = join(data_path, data_set + '.txt') roi_file = join(data_path, data_set + '.rois.txt') label_file = join(data_path, data_set + '.roilabels.txt') if not os.path.exists(map_file) or not os.path.exists(roi_file) or not os.path.exists(label_file): raise RuntimeError("File '%s', '%s' or '%s' does not exist. " % (map_file, roi_file, label_file)) # read images nrImages = len(readTable(map_file)) transforms = [scale(width=img_width, height=img_height, channels=3, scale_mode="pad", pad_value=114, interpolations='linear')] image_source = ImageDeserializer(map_file, StreamDefs(features = StreamDef(field='image', transforms=transforms))) # read rois and labels rois_dim = 4 * n_rois label_dim = n_classes * n_rois roi_source = CTFDeserializer(roi_file, StreamDefs( rois = StreamDef(field='rois', shape=rois_dim, is_sparse=False))) label_source = CTFDeserializer(label_file, StreamDefs( roiLabels = StreamDef(field='roiLabels', shape=label_dim, is_sparse=False))) # define a composite reader mb = MinibatchSource([image_source, roi_source, label_source], epoch_size=sys.maxsize, randomize=randomize) return (mb, nrImages)
def create_video_mb_source(map_files, num_channels, image_height, image_width, num_classes): transforms = [ xforms.crop(crop_type='center', crop_size=224), xforms.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear') ] map_files = sorted(map_files, key=lambda x: int(x.split('Map_')[1].split('.')[0])) print(map_files) # Create multiple image sources sources = [] for i, map_file in enumerate(map_files): streams = { "feature" + str(i): StreamDef(field='image', transforms=transforms), "label" + str(i): StreamDef(field='label', shape=num_classes) } sources.append(ImageDeserializer(map_file, StreamDefs(**streams))) return MinibatchSource(sources, max_sweeps=1, randomize=False)
def test_multiple_streams_in_htk(): feature_dim = 33 context = 2 os.chdir(data_path) features_file = "glob_0000.scp" fd = HTKFeatureDeserializer(StreamDefs( amazing_features = StreamDef(shape=feature_dim, context=(context,context), scp=features_file), amazing_features2 = StreamDef(shape=feature_dim, context=(context,context), scp=features_file))) mbs = MinibatchSource([fd]) mb = mbs.next_minibatch(1) assert (mb[mbs.streams.amazing_features].asarray() == mb[mbs.streams.amazing_features2].asarray()).all() os.chdir(abs_path)
def create_reader(map_file, train, dimensions, classes, total_number_of_samples): print( f"Reading map file: {map_file} with number of samples {total_number_of_samples}" ) # transformation pipeline for the features has jitter/crop only when training transforms = [] # finalize_network uses data augmentation (translation only) if train: transforms += [ xforms.crop(crop_type='randomside', area_ratio=(0.08, 1.0), aspect_ratio=(0.75, 1.3333), jitter_type='uniratio'), xforms.color(brightness_radius=0.4, contrast_radius=0.4, saturation_radius=0.4) ] transforms += [ xforms.scale(width=dimensions['width'], height=dimensions['height'], channels=dimensions['depth'], interpolations='linear') ] source = MinibatchSource(ImageDeserializer( map_file, StreamDefs(features=StreamDef(field='image', transforms=transforms), labels=StreamDef(field='label', shape=len(classes)))), randomize=train, max_samples=total_number_of_samples, multithreaded_deserializer=True) return source
def create_mb_source(img_height, img_width, img_channels, n_classes, n_rois, data_path, data_set): rois_dim = 4 * n_rois label_dim = n_classes * n_rois path = os.path.normpath(os.path.join(abs_path, data_path)) if data_set == 'test': map_file = os.path.join(path, test_map_filename) else: map_file = os.path.join(path, train_map_filename) roi_file = os.path.join(path, data_set + rois_filename_postfix) label_file = os.path.join(path, data_set + roilabels_filename_postfix) if not os.path.exists(map_file) or not os.path.exists(roi_file) or not os.path.exists(label_file): raise RuntimeError("File '%s', '%s' or '%s' does not exist. " "Please run install_fastrcnn.py from Examples/Image/Detection/FastRCNN to fetch them" % (map_file, roi_file, label_file)) # read images image_source = ImageDeserializer(map_file) image_source.ignore_labels() image_source.map_features(features_stream_name, [ImageDeserializer.scale(width=img_width, height=img_height, channels=img_channels, scale_mode="pad", pad_value=114, interpolations='linear')]) # read rois and labels roi_source = CTFDeserializer(roi_file) roi_source.map_input(roi_stream_name, dim=rois_dim, format="dense") label_source = CTFDeserializer(label_file) label_source.map_input(label_stream_name, dim=label_dim, format="dense") # define a composite reader return MinibatchSource([image_source, roi_source, label_source], epoch_size=sys.maxsize, randomize=data_set == "train")
def create_image_mb_source(map_file, is_training, total_number_of_samples): if not os.path.exists(map_file): raise RuntimeError("File '%s' does not exist." %map_file) # transformation pipeline for the features has jitter/crop only when training transforms = [] if is_training: transforms += [ xforms.crop(crop_type='randomside', side_ratio=0.88671875, jitter_type='uniratio') # train uses jitter ] else: transforms += [ xforms.crop(crop_type='center', side_ratio=0.88671875) # test has no jitter ] transforms += [ xforms.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear'), ] # deserializer return MinibatchSource( ImageDeserializer(map_file, StreamDefs( features = StreamDef(field='image', transforms=transforms), # first column in map file is referred to as 'image' labels = StreamDef(field='label', shape=num_classes))), # and second as 'label' randomize = is_training, epoch_size=total_number_of_samples, multithreaded_deserializer = True)
def create_reader(map_file, mean_file, train): # transformation pipeline for the features has jitter/crop only when training trs = [] # if train: # transforms += [ # ImageDeserializer.crop(crop_type='Random', ratio=0.8, jitter_type='uniRatio') # train uses jitter # ] trs += [ transforms.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear'), transforms.mean(mean_file) ] # deserializer return MinibatchSource( ImageDeserializer( map_file, StreamDefs( features=StreamDef( field='image', transforms=trs ), # first column in map file is referred to as 'image' labels=StreamDef(field='label', shape=num_classes) # and second as 'label' )))
def create_reader(map_file, mean_file, train): if not os.path.exists(map_file) or not os.path.exists(mean_file): raise RuntimeError( "File '%s' or '%s' does not exist. Please run install_cifar10.py from DataSets/CIFAR-10 to fetch them" % (map_file, mean_file)) # transformation pipeline for the features has jitter/crop only when training transforms = [] if train: transforms += [ xforms.crop(crop_type='randomside', side_ratio=0.8, jitter_type='uniratio') # train uses jitter ] transforms += [ xforms.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear'), xforms.mean(mean_file) ] # deserializer return MinibatchSource( ImageDeserializer( map_file, StreamDefs( features=StreamDef( field='image', transforms=transforms ), # first column in map file is referred to as 'image' labels=StreamDef(field='label', shape=num_classes)))) # and second as 'label'
def test_create_two_image_deserializers(tmpdir): mbdata = r'''filename 0 filename2 0 ''' map_file = str(tmpdir / 'mbdata.txt') with open(map_file, 'w') as f: f.write(mbdata) image_width = 100 image_height = 200 num_channels = 3 transforms = [ xforms.crop(crop_type='randomside', side_ratio=0.5, jitter_type='uniratio'), xforms.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear') ] image1 = ImageDeserializer( map_file, StreamDefs(f1=StreamDef(field='image', transforms=transforms))) image2 = ImageDeserializer( map_file, StreamDefs(f2=StreamDef(field='image', transforms=transforms))) mb_source = MinibatchSource([image1, image2]) assert isinstance(mb_source, MinibatchSource)
def create_image_mb_source(map_file, mean_file, is_training, total_number_of_samples): if not os.path.exists(map_file) or not os.path.exists(mean_file): raise RuntimeError("File '%s' or '%s' does not exist." % (map_file, mean_file)) # transformation pipeline for the features has jitter/crop only when training transforms = [] if is_training: transforms += [ xforms.crop(crop_type='randomside', side_ratio=0.8, jitter_type='uniratio') # train uses jitter ] else: transforms += [ xforms.crop(crop_type='center', crop_size=IMAGE_WIDTH) ] transforms += [ xforms.scale(width=IMAGE_WIDTH, height=IMAGE_HEIGHT, channels=NUM_CHANNELS, interpolations='linear'), xforms.mean(mean_file) ] # deserializer return MinibatchSource( ImageDeserializer(map_file, StreamDefs( features=StreamDef(field='image', transforms=transforms), # first column in map file is referred to as 'image' labels=StreamDef(field='label', shape=NUM_CLASSES))), # and second as 'label' randomize=is_training, max_samples=total_number_of_samples, multithreaded_deserializer = True)
def create_mb_source(image_height, image_width, num_channels, map_file, mean_file, is_training): if not os.path.exists(map_file): raise RuntimeError("File '%s' does not exist." % (map_file)) # transformation pipeline for the features has jitter/crop only when training transforms = [] if is_training: transforms += [ xforms.crop(crop_type='randomside', side_ratio=0.875, jitter_type='uniratio') # train uses jitter ] else: transforms += [ xforms.crop(crop_type='center', side_ratio=0.875) # test has no jitter ] transforms += [ xforms.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear'), ] if mean_file != '': transforms += [ xforms.mean(mean_file), ] # deserializer return MinibatchSource( ImageDeserializer(map_file, StreamDefs( features = StreamDef(field='image', transforms=transforms) # first column in map file is referred to as 'image' )), randomize = is_training, multithreaded_deserializer = True, max_sweeps = 1)
def test_base64_is_equal_image(tmpdir): import io, base64; from PIL import Image np.random.seed(1) file_mapping_path = str(tmpdir / 'file_mapping.txt') base64_mapping_path = str(tmpdir / 'base64_mapping.txt') with open(file_mapping_path, 'w') as file_mapping: with open(base64_mapping_path, 'w') as base64_mapping: for i in range(10): data = np.random.randint(0, 2**8, (5,7,3)) image = Image.fromarray(data.astype('uint8'), "RGB") buf = io.BytesIO() image.save(buf, format='PNG') assert image.width == 7 and image.height == 5 label = str(i) # save to base 64 mapping file encoded = base64.b64encode(buf.getvalue()).decode('ascii') base64_mapping.write('%s\t%s\n' % (label, encoded)) # save to mapping + png file file_name = label + '.png' with open(str(tmpdir/file_name), 'wb') as f: f.write(buf.getvalue()) file_mapping.write('.../%s\t%s\n' % (file_name, label)) transforms = [xforms.scale(width=7, height=5, channels=3)] b64_deserializer = Base64ImageDeserializer(base64_mapping_path, StreamDefs( images1=StreamDef(field='image', transforms=transforms), labels1=StreamDef(field='label', shape=10))) file_image_deserializer = ImageDeserializer(file_mapping_path, StreamDefs( images2=StreamDef(field='image', transforms=transforms), labels2=StreamDef(field='label', shape=10))) mb_source = MinibatchSource([b64_deserializer, file_image_deserializer]) for j in range(20): mb = mb_source.next_minibatch(1) images1_stream = mb_source.streams['images1'] images1 = mb[images1_stream].asarray() images2_stream = mb_source.streams['images2'] images2 = mb[images2_stream].asarray() assert(images1 == images2).all()
def create_reader(path, randomize, input_vocab_dim, label_vocab_dim, size=INFINITELY_REPEAT): if not os.path.exists(path): raise RuntimeError("File '%s' does not exist." % (path)) return MinibatchSource(CTFDeserializer(path, StreamDefs( features = StreamDef(field='S0', shape=input_vocab_dim, is_sparse=True), labels = StreamDef(field='S1', shape=label_vocab_dim, is_sparse=True) )), randomize=randomize, max_samples = size)
def create_reader(path, is_training, input_dim, num_label_classes): """ reads CNTK formatted file with 'labels' and 'features' """ return MinibatchSource(CTFDeserializer(path, StreamDefs( labels = StreamDef(field='labels', shape=num_label_classes), features = StreamDef(field='features', shape=input_dim) )), randomize = is_training, max_sweeps = INFINITELY_REPEAT if is_training else 1)
def create_reader_raw(path, is_training, input_dim, num_label_classes): """ Reads in the unstardized values. """ return MinibatchSource(CTFDeserializer(path, StreamDefs( labels = StreamDef(field='rawlabels', shape=num_label_classes), features = StreamDef(field='rawfeatures', shape=input_dim) )), randomize = is_training, max_sweeps = INFINITELY_REPEAT if is_training else 1)
def create_reader(path, is_training, input_dim, label_dim): return MinibatchSource( CTFDeserializer( path, StreamDefs(features=StreamDef(field='features', shape=input_dim), labels=StreamDef(field='labels', shape=label_dim))), randomize=is_training, epoch_size=INFINITELY_REPEAT if is_training else FULL_DATA_SWEEP)
def test_full_sweep_minibatch(tmpdir): tmpfile = _write_data(tmpdir, MBDATA_DENSE_1) mb_source = MinibatchSource(CTFDeserializer(tmpfile, StreamDefs( features = StreamDef(field='S0', shape=1), labels = StreamDef(field='S1', shape=1))), randomization_window_in_chunks=0, max_sweeps=1) features_si = mb_source.stream_info('features') labels_si = mb_source.stream_info('labels') mb = mb_source.next_minibatch(1000) assert mb[features_si].num_sequences == 2 assert mb[labels_si].num_sequences == 2 features = mb[features_si] assert features.end_of_sweep assert len(features.as_sequences()) == 2 expected_features = \ [ [[0], [1], [2], [3]], [[4], [5], [6]] ] for res, exp in zip(features.as_sequences(), expected_features): assert np.allclose(res, exp) assert np.allclose(features.data.mask, [[2, 1, 1, 1], [2, 1, 1, 0]]) labels = mb[labels_si] assert labels.end_of_sweep assert len(labels.as_sequences()) == 2 expected_labels = \ [ [[0],[1],[3]], [[1],[2]] ] for res, exp in zip(labels.as_sequences(), expected_labels): assert np.allclose(res, exp) assert np.allclose(labels.data.mask, [[2, 1, 1], [2, 1, 0]])
def test_mlf_binary_files(): os.chdir(data_path) feature_dim = 33 num_classes = 132 context = 2 features_file = "glob_0000.scp" fd = HTKFeatureDeserializer(StreamDefs( amazing_features = StreamDef(shape=feature_dim, context=(context,context), scp=features_file))) ld = HTKMLFBinaryDeserializer(StreamDefs(awesome_labels = StreamDef(shape=num_classes, mlf=e2e_data_path + "mlf2.bin"))) # Make sure we can read at least one minibatch. mbsource = MinibatchSource([fd,ld]) mbsource.next_minibatch(1) os.chdir(abs_path)
def test_prefetch_with_unpacking(tmpdir): data = r'''0 |S0 1 1 1 1 |S1 1000 1 |S0 2 2 2 2 |S1 100 2 |S0 3 3 3 3 |S1 100 3 |S0 1 1 1 1 |S1 10 4 |S0 2 2 2 2 |S1 1 5 |S0 3 3 3 3 |S1 2000 6 |S0 1 1 1 1 |S1 200 7 |S0 2 2 2 2 |S1 200 8 |S0 3 3 3 3 |S1 20 9 |S0 1 1 1 1 |S1 2 ''' import time tmpfile = _write_data(tmpdir, data) input_dim = 4 num_output_classes = 1 mb_source = MinibatchSource(CTFDeserializer(tmpfile, StreamDefs( features=StreamDef(field='S0', shape=input_dim, is_sparse=False), labels=StreamDef(field='S1', shape=num_output_classes, is_sparse=False) )), randomize=False, max_samples=FULL_DATA_SWEEP) input_map = { 'S0' : mb_source.streams.features, 'S1' : mb_source.streams.labels } empty = False mb_size = 3 # On the last minibatch there will be resize called, # due to 10%3 = 1 sample in the minibatch while not empty: mb = mb_source.next_minibatch(mb_size, input_map=input_map) time.sleep(1) # make sure the prefetch kicks in if mb: # Force unpacking to check that we do # not break prefetch actual_size = mb['S0'].shape[0] assert (mb['S0'].asarray() == np.array([[[1, 1, 1, 1]], [[2, 2, 2, 2]], [[3, 3, 3, 3]]], dtype=np.float32)[0:actual_size]).all() else: empty = True
def test_max_samples_over_several_sweeps(tmpdir): mb_source = MinibatchSource( create_ctf_deserializer(tmpdir), max_samples=11) input_map = {'features': mb_source['features']} for i in range(2): mb = mb_source.next_minibatch(5, input_map) assert 'features' in mb assert mb['features'].num_samples == 5 assert mb['features'].end_of_sweep mb = mb_source.next_minibatch(5, input_map) assert 'features' in mb assert mb['features'].num_samples == 1 assert not mb['features'].end_of_sweep mb = mb_source.next_minibatch(1, input_map) assert not mb
def test_max_sweeps(tmpdir): # set max sweeps to 3 (12 samples altogether). mb_source = MinibatchSource( create_ctf_deserializer(tmpdir), max_sweeps=3) input_map = {'features': mb_source['features']} for i in range(2): mb = mb_source.next_minibatch(5, input_map) assert 'features' in mb assert mb['features'].num_samples == 5 assert mb['features'].end_of_sweep mb = mb_source.next_minibatch(5, input_map) assert 'features' in mb assert mb['features'].num_samples == 2 assert mb['features'].end_of_sweep mb = mb_source.next_minibatch(1, input_map) assert not mb
def test_multiple_mlf_files(): os.chdir(data_path) feature_dim = 33 num_classes = 132 context = 2 test_mlf_path = e2e_data_path+"glob_00001.mlf" features_file = "glob_0000.scp" label_files = [ "glob_0000.mlf", test_mlf_path] label_mapping_file = "state.list" fd = HTKFeatureDeserializer(StreamDefs( amazing_features = StreamDef(shape=feature_dim, context=(context,context), scp=features_file))) ld = HTKMLFDeserializer(label_mapping_file, StreamDefs( awesome_labels = StreamDef(shape=num_classes, mlf=label_files))) # Make sure we can read at least one minibatch. mbsource = MinibatchSource([fd,ld]) mbsource.next_minibatch(1) os.chdir(abs_path)
def compare_cbf_and_ctf(num_mbs, mb_size, randomize): ctf = MinibatchSource(CTFDeserializer(tmpfile, streams), randomize=randomize) cbf = MinibatchSource(CBFDeserializer(tmpfile+'.bin', streams), randomize=randomize) ctf_stream_names = sorted([x.m_name for x in ctf.stream_infos()]) cbf_stream_names = sorted([x.m_name for x in cbf.stream_infos()]) assert(ctf_stream_names == cbf_stream_names) for _ in range(num_mbs): ctf_mb = ctf.next_minibatch(mb_size, device=device) cbf_mb = cbf.next_minibatch(mb_size, device=device) for name in cbf_stream_names: ctf_data = ctf_mb[ctf[name]] cbf_data = cbf_mb[cbf[name]] assert ctf_data.num_samples == cbf_data.num_samples assert ctf_data.num_sequences == cbf_data.num_sequences assert ctf_data.shape == cbf_data.shape assert ctf_data.end_of_sweep == cbf_data.end_of_sweep assert ctf_data.is_sparse == cbf_data.is_sparse assert ctf_data.data.masked_count() == cbf_data.data.masked_count() # XXX: # assert(ctf_data.asarray() == cbf_data.asarray()).all() # not using asarray because for sparse values it fails with # some strange exception "sum of the rank of the mask and Variable #rank does not equal the Value's rank". assert C.cntk_py.are_equal(ctf_data.data.data, cbf_data.data.data) if (ctf_data.data.masked_count() > 0): assert (ctf_data.data.mask == cbf_data.data.mask).all() # XXX: if mask_count is zero, mb_data.data.mask fails with # "AttributeError: 'Value' object has no attribute 'mask'"! # XXX: without invoking erase, next_minibatch will fail with: # "Resize: Cannot resize the matrix because it is a view." ctf_data.data.erase() cbf_data.data.erase()
def generate_visualization(use_brain_script_model, testing=False): num_objects_to_eval = 5 if (use_brain_script_model): model_file_name = "07_Deconvolution_BS.model" encoder_output_file_name = "encoder_output_BS.txt" decoder_output_file_name = "decoder_output_BS.txt" enc_node_name = "z.pool1" input_node_name = "f2" output_node_name = "z" else: model_file_name = "07_Deconvolution_PY.model" encoder_output_file_name = "encoder_output_PY.txt" decoder_output_file_name = "decoder_output_PY.txt" enc_node_name = "pooling_node" input_node_name = "input_node" output_node_name = "output_node" # define location of output, model and data and check existence output_path = os.path.join(abs_path, "Output") model_file = os.path.join(model_path, model_file_name) data_file = os.path.join(data_path, "Test-28x28_cntk_text.txt") if not (os.path.exists(model_file) and os.path.exists(data_file)): print("Cannot find required data or model. " "Please get the MNIST data set and run 'cntk configFile=07_Deconvolution_BS.cntk' or 'python 07_Deconvolution_PY.py' to create the model.") exit(0) # create minibatch source minibatch_source = MinibatchSource(CTFDeserializer(data_file, StreamDefs( features = StreamDef(field='features', shape=(28*28)), labels = StreamDef(field='labels', shape=10) )), randomize=False, max_sweeps = 1) # use this to print all node names in the model # print_all_node_names(model_file, use_brain_script_model) # load model and pick desired nodes as output loaded_model = load_model(model_file) output_nodes = combine( [loaded_model.find_by_name(input_node_name).owner, loaded_model.find_by_name(enc_node_name).owner, loaded_model.find_by_name(output_node_name).owner]) # evaluate model save output features_si = minibatch_source['features'] with open(os.path.join(output_path, decoder_output_file_name), 'wb') as decoder_text_file: with open(os.path.join(output_path, encoder_output_file_name), 'wb') as encoder_text_file: for i in range(0, num_objects_to_eval): mb = minibatch_source.next_minibatch(1) raw_dict = output_nodes.eval(mb[features_si]) output_dict = {} for key in raw_dict.keys(): output_dict[key.name] = raw_dict[key] encoder_input = output_dict[input_node_name] encoder_output = output_dict[enc_node_name] decoder_output = output_dict[output_node_name] in_values = (encoder_input[0,0].flatten())[np.newaxis] enc_values = (encoder_output[0,0].flatten())[np.newaxis] out_values = (decoder_output[0,0].flatten())[np.newaxis] if not testing: # write results as text and png np.savetxt(decoder_text_file, out_values, fmt="%.6f") np.savetxt(encoder_text_file, enc_values, fmt="%.6f") save_as_png(in_values, os.path.join(output_path, "imageAutoEncoder_%s__input.png" % i)) save_as_png(out_values, os.path.join(output_path, "imageAutoEncoder_%s_output.png" % i)) # visualizing the encoding is only possible and meaningful with a single conv filter enc_dim = 7 if(enc_values.size == enc_dim*enc_dim): save_as_png(enc_values, os.path.join(output_path, "imageAutoEncoder_%s_encoding.png" % i), dim=enc_dim) print("Done. Wrote output to %s" % output_path)
def test_distributed_mb_source(tmpdir): input_dim = 69 ctf_data = '''\ 0 |S0 3:1 |# <s> |S1 3:1 |# <s> 0 |S0 4:1 |# A |S1 32:1 |# ~AH 0 |S0 5:1 |# B |S1 36:1 |# ~B 0 |S0 4:1 |# A |S1 31:1 |# ~AE 0 |S0 7:1 |# D |S1 38:1 |# ~D 0 |S0 12:1 |# I |S1 47:1 |# ~IY 0 |S0 1:1 |# </s> |S1 1:1 |# </s> 2 |S0 60:1 |# <s> |S1 3:1 |# <s> 2 |S0 61:1 |# A |S1 32:1 |# ~AH 2 |S0 61:1 |# A |S1 32:1 |# ~AH 3 |S0 60:1 |# <s> |S1 3:1 |# <s> 3 |S0 61:1 |# A |S1 32:1 |# ~AH 3 |S0 61:1 |# A |S1 32:1 |# ~AH 3 |S0 61:1 |# A |S1 32:1 |# ~AH 4 |S0 60:1 |# <s> |S1 3:1 |# <s> 5 |S0 60:1 |# <s> |S1 3:1 |# <s> 5 |S0 61:1 |# A |S1 32:1 |# ~AH 6 |S0 60:1 |# <s> |S1 3:1 |# <s> 6 |S0 61:1 |# A |S1 32:1 |# ~AH 7 |S0 60:1 |# <s> |S1 3:1 |# <s> 8 |S0 60:1 |# <s> |S1 3:1 |# <s> 8 |S0 61:1 |# A |S1 32:1 |# ~AH 9 |S0 60:1 |# <s> |S1 3:1 |# <s> 9 |S0 61:1 |# A |S1 32:1 |# ~AH 10 |S0 61:1 |# A |S1 32:1 |# ~AH ''' from cntk.io import MinibatchSource, CTFDeserializer, StreamDef, StreamDefs, FULL_DATA_SWEEP ctf_file = str(tmpdir/'2seqtest.txt') with open(ctf_file, 'w') as f: f.write(ctf_data) # No randomization mb0 = MinibatchSource(CTFDeserializer(ctf_file, StreamDefs( features = StreamDef(field='S0', shape=input_dim, is_sparse=True), labels = StreamDef(field='S1', shape=input_dim, is_sparse=True) )), randomize=False, epoch_size=36) # A bit more than a sweep mb1 = MinibatchSource(CTFDeserializer(ctf_file, StreamDefs( features = StreamDef(field='S0', shape=input_dim, is_sparse=True), labels = StreamDef(field='S1', shape=input_dim, is_sparse=True) )), randomize=False, epoch_size=36) # A bit more than a sweep input = input_variable(shape=(input_dim,)) label = input_variable(shape=(input_dim,)) input_map = { input : mb0.streams.features, label : mb0.streams.labels } # Because we emulating two workers here, the minibatch_size_in_samples will be splitted in 2, # so below we expect 5 samples per worker. data = mb0.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=0) assert(data[input].num_samples == 7) # Sequence 0 data = mb0.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=0) assert(data[input].num_samples == 4) # Sequence 3 data = mb0.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=0) assert(data[input].num_samples == 5) # Sequences 5, 7, 9 data = mb0.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=0) assert(data[input].num_samples == 7) # Sequence 0 data = mb0.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=0) assert(data[input].num_samples == 4) # Sequence 3 data = mb0.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=0) assert(len(data) == 0) # No data data = mb1.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=1) assert(data[input].num_samples == 4) # Sequences 2, 4 data = mb1.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=1) assert(data[input].num_samples == 5) # Sequences 6, 8, 10 data = mb1.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=1) assert(data[input].num_samples == 3) # Sequences 2 data = mb1.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=1) assert(len(data) == 0) # No data # Radomization mb3 = MinibatchSource(CTFDeserializer(ctf_file, StreamDefs( features = StreamDef(field='S0', shape=input_dim, is_sparse=True), labels = StreamDef(field='S1', shape=input_dim, is_sparse=True) )), randomize=True, epoch_size=FULL_DATA_SWEEP) mb4 = MinibatchSource(CTFDeserializer(ctf_file, StreamDefs( features = StreamDef(field='S0', shape=input_dim, is_sparse=True), labels = StreamDef(field='S1', shape=input_dim, is_sparse=True) )), randomize=True, epoch_size=FULL_DATA_SWEEP) data = mb3.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=0) assert(data[input].num_samples == 5) data = mb3.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=0) assert(data[input].num_samples == 4) data = mb3.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=0) assert(data[input].num_samples == 4) data = mb3.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=0) assert(data[input].num_samples == 5) data = mb3.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=0) assert(data[input].num_samples == 7) data = mb4.next_minibatch(minibatch_size_in_samples=10, input_map=input_map, num_data_partitions=2, partition_index=1) assert(len(data) == 0) # Due to chunking we do not expect any data for rank 1
def test_sweep_based_schedule(tmpdir, device_id): from cntk.io import MinibatchSource, CTFDeserializer, StreamDef, StreamDefs from cntk import cross_entropy_with_softmax, classification_error, plus, reduce_sum, sequence from cntk import Trainer input_dim = 69 ctf_data = '''\ 0 |S0 3:1 |S1 3:1 |# <s> 0 |S0 4:1 |# A |S1 32:1 |# ~AH 0 |S0 5:1 |# B |S1 36:1 |# ~B 0 |S0 4:1 |# A |S1 31:1 |# ~AE 0 |S0 7:1 |# D |S1 38:1 |# ~D 0 |S0 12:1 |# I |S1 47:1 |# ~IY 0 |S0 1:1 |# </s> |S1 1:1 |# </s> 2 |S0 60:1 |# <s> |S1 3:1 |# <s> 2 |S0 61:1 |# A |S1 32:1 |# ~AH ''' ctf_file = str(tmpdir/'2seqtest.txt') with open(ctf_file, 'w') as f: f.write(ctf_data) mbs = MinibatchSource(CTFDeserializer(ctf_file, StreamDefs( features = StreamDef(field='S0', shape=input_dim, is_sparse=True), labels = StreamDef(field='S1', shape=input_dim, is_sparse=True) )), randomize=False) in1 = sequence.input_variable(shape=(input_dim,)) labels = sequence.input_variable(shape=(input_dim,)) p = parameter(shape=(input_dim,), init=10) z = plus(in1, reduce_sum(p), name='z') ce = cross_entropy_with_softmax(z, labels) errs = classification_error(z, labels) lr_per_sample = learning_rate_schedule([0.3, 0.2, 0.1, 0.0], UnitType.sample) learner = sgd(z.parameters, lr_per_sample) trainer = Trainer(z, (ce, errs), [learner]) input_map = { in1 : mbs.streams.features, labels : mbs.streams.labels } # fetch minibatch (first sequence) data = mbs.next_minibatch(1, input_map=input_map) trainer.train_minibatch(data) assert learner.learning_rate() == 0.3 # fetch minibatch (second sequence, sweep ends at this point) data = mbs.next_minibatch(1, input_map=input_map) trainer.train_minibatch(data) assert learner.learning_rate() == 0.2 # fetch minibatch (both sequences -- entire sweep in one go) data = mbs.next_minibatch(9, input_map=input_map) trainer.train_minibatch(data) assert learner.learning_rate() == 0.1 # fetch minibatch (multiple sweeps) data = mbs.next_minibatch(30, input_map=input_map) trainer.train_minibatch(data, outputs=[z.output]) assert learner.learning_rate() == 0.0
def test_base64_image_deserializer(tmpdir): import io, base64, uuid; from PIL import Image images, b64_images = [], [] np.random.seed(1) for i in range(10): data = np.random.randint(0, 2**8, (5,7,3)) image = Image.fromarray(data.astype('uint8'), "RGB") buf = io.BytesIO() image.save(buf, format='PNG') assert image.width == 7 and image.height == 5 b64_images.append(base64.b64encode(buf.getvalue())) images.append(np.array(image)) image_data = str(tmpdir / 'mbdata1.txt') seq_ids = [] uid = uuid.uuid1().int >> 64 with open(image_data, 'wb') as f: for i,data in enumerate(b64_images): seq_id = uid ^ i seq_id = str(seq_id).encode('ascii') seq_ids.append(seq_id) line = seq_id + b'\t' label = str(i).encode('ascii') line += label + b'\t' + data + b'\n' f.write(line) ctf_data = str(tmpdir / 'mbdata2.txt') with open(ctf_data, 'wb') as f: for i, sid in enumerate(seq_ids): line = sid + b'\t' + b'|index '+str(i).encode('ascii') + b'\n' f.write(line) transforms = [xforms.scale(width=7, height=5, channels=3)] b64_deserializer = Base64ImageDeserializer(image_data, StreamDefs( images=StreamDef(field='image', transforms=transforms), labels=StreamDef(field='label', shape=10))) ctf_deserializer = CTFDeserializer(ctf_data, StreamDefs(index=StreamDef(field='index', shape=1))) mb_source = MinibatchSource([ctf_deserializer, b64_deserializer]) assert isinstance(mb_source, MinibatchSource) for j in range(100): mb = mb_source.next_minibatch(10) index_stream = mb_source.streams['index'] index = mb[index_stream].asarray().flatten() image_stream = mb_source.streams['images'] results = mb[image_stream].asarray() for i in range(10): # original images are RBG, openCV produces BGR images, # reverse the last dimension of the original images bgrImage = images[int(index[i])][:,:,::-1] # transposing to get CHW representation bgrImage = np.transpose(bgrImage, (2, 0, 1)) assert (bgrImage == results[i][0]).all()
def get_minibatch(bmuf, working_dir, mb_source): from cntk.io import MinibatchSource, CTFDeserializer, StreamDef, StreamDefs if mb_source == "numpy": for i in range(num_batches): features = [] labels = [] for j in range(batch_size): seq_len_j = [seq_len, seq_len + 5, seq_len - 5][j % 3] x = np.random.rand( seq_len_j, feat_dim).astype(np.float32) y = np.random.rand( seq_len_j, label_dim).astype(np.float32) features.append(x) labels.append(y) yield {bmuf.feat: features, bmuf.label: labels} if mb_source in ("ctf_utterance", "ctf_frame", "ctf_bptt"): if mb_source == "ctf_frame": #frame mode data without sequence ids. ctf_data = ctf_data = '''\ |S0 0.49 0.18 0.84 0.7 0.59 |S1 0.12 0.24 0.14 |S0 0.69 0.63 0.47 0.93 0.69 |S1 0.34 0.85 0.17 |S0 0.04 0.5 0.39 0.86 0.28 |S1 0.62 0.36 0.53 |S0 0.71 0.9 0.15 0.83 0.18 |S1 0.2 0.74 0.04 |S0 0.38 0.67 0.46 0.53 0.75 |S1 0.6 0.14 0.35 |S0 0.94 0.54 0.09 0.55 0.08 |S1 0.07 0.53 0.47 |S0 0.11 0.24 0.17 0.72 0.72 |S1 0.9 0.98 0.18 |S0 0.3 1. 0.34 0.06 0.78 |S1 0.15 0.69 0.63 |S0 0.69 0.86 0.59 0.49 0.99 |S1 0.13 0.6 0.21 ''' #sequence mode data with sequence id else: ctf_data = ctf_data = '''\ 0 |S0 0.49 0.18 0.84 0.7 0.59 |S1 0.12 0.24 0.14 0 |S0 0.69 0.63 0.47 0.93 0.69 |S1 0.34 0.85 0.17 0 |S0 0.04 0.5 0.39 0.86 0.28 |S1 0.62 0.36 0.53 0 |S0 0.71 0.9 0.15 0.83 0.18 |S1 0.2 0.74 0.04 0 |S0 0.38 0.67 0.46 0.53 0.75 |S1 0.6 0.14 0.35 0 |S0 0.94 0.54 0.09 0.55 0.08 |S1 0.07 0.53 0.47 0 |S0 0.11 0.24 0.17 0.72 0.72 |S1 0.9 0.98 0.18 2 |S0 0.3 1. 0.34 0.06 0.78 |S1 0.15 0.69 0.63 2 |S0 0.69 0.86 0.59 0.49 0.99 |S1 0.13 0.6 0.21 ''' ctf_file = os.path.join(working_dir, '2seqtest.txt') with open(ctf_file, 'w') as f: f.write(ctf_data) # ctf_utterance model frame_mode = False truncation_length = 0 if mb_source == "ctf_frame": frame_mode = True elif mb_source == "ctf_bptt": truncation_length = 2 mbs = MinibatchSource(CTFDeserializer(ctf_file, StreamDefs( features = StreamDef(field='S0', shape=feat_dim, is_sparse=False), labels = StreamDef(field='S1', shape=label_dim, is_sparse=False) )), randomize=False, max_samples = batch_size*num_batches, frame_mode=frame_mode, truncation_length=truncation_length) for i in range(num_batches): minibatch = mbs.next_minibatch(batch_size, {bmuf.feat: mbs.streams.features, bmuf.label: mbs.streams.labels}) if not minibatch: break yield minibatch
def simple_mnist(): input_dim = 784 num_output_classes = 10 num_hidden_layers = 2 hidden_layers_dim = 200 # Input variables denoting the features and label data feature = C.input_variable(input_dim) label = C.input_variable(num_output_classes) # Instantiate the feedforward classification model scaled_input = element_times(constant(0.00390625), feature) # z = Sequential([ # Dense(hidden_layers_dim, activation=relu), # Dense(hidden_layers_dim, activation=relu), # Dense(num_output_classes)])(scaled_input) with default_options(activation=relu, init=C.glorot_uniform()): z = Sequential([For(range(num_hidden_layers), lambda i: Dense(hidden_layers_dim)), Dense(num_output_classes, activation=None)])(scaled_input) ce = cross_entropy_with_softmax(z, label) pe = classification_error(z, label) # setup the data path = abs_path + "\Train-28x28_cntk_text.txt" reader_train = MinibatchSource(CTFDeserializer(path, StreamDefs( features=StreamDef(field='features', shape=input_dim), labels=StreamDef(field='labels', shape=num_output_classes)))) input_map = { feature: reader_train.streams.features, label: reader_train.streams.labels } # Training config minibatch_size = 64 num_samples_per_sweep = 60000 num_sweeps_to_train_with = 10 # Instantiate progress writers. progress_writers = [ProgressPrinter( tag='Training', num_epochs=num_sweeps_to_train_with)] # Instantiate the trainer object to drive the model training lr = learning_rate_schedule(1, UnitType.sample) trainer = Trainer(z, (ce, pe), [adadelta(z.parameters, lr)], progress_writers) training_session( trainer=trainer, mb_source=reader_train, mb_size=minibatch_size, model_inputs_to_streams=input_map, max_samples=num_samples_per_sweep * num_sweeps_to_train_with, progress_frequency=num_samples_per_sweep ).train() # Load test data path = abs_path + "\Test-28x28_cntk_text.txt" reader_test = MinibatchSource(CTFDeserializer(path, StreamDefs( features=StreamDef(field='features', shape=input_dim), labels=StreamDef(field='labels', shape=num_output_classes)))) input_map = { feature: reader_test.streams.features, label: reader_test.streams.labels } # Test data for trained model test_minibatch_size = 1024 num_samples = 10000 num_minibatches_to_test = num_samples / test_minibatch_size test_result = 0.0 for i in range(0, int(num_minibatches_to_test)): mb = reader_test.next_minibatch(test_minibatch_size, input_map=input_map) eval_error = trainer.test_minibatch(mb) test_result = test_result + eval_error # Average of evaluation errors of all test minibatches return test_result / num_minibatches_to_test