def test_warnings(): a = Input(shape=(3,), name='input_a') b = Input(shape=(3,), name='input_b') a_2 = Dense(4, name='dense_1')(a) dp = Dropout(0.5, name='dropout') b_2 = dp(b) model = Model([a, b], [a_2, b_2]) optimizer = 'rmsprop' loss = 'mse' loss_weights = [1., 0.5] model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None) def gen_data(batch_sz): while True: yield ([np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3))], [np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3))]) with pytest.warns(Warning) as w: out = model.fit_generator(gen_data(4), steps_per_epoch=10, use_multiprocessing=True, workers=2) warning_raised = any(['Sequence' in str(w_.message) for w_ in w]) assert warning_raised, 'No warning raised when using generator with processes.' with pytest.warns(None) as w: out = model.fit_generator(RandomSequence(3), steps_per_epoch=4, use_multiprocessing=True, workers=2) assert all(['Sequence' not in str(w_.message) for w_ in w]), 'A warning was raised for Sequence.'
def run_parallel_test(data_generator): a = Input(shape=(3, ), name='input_a') b = Input(shape=(3, ), name='input_b') a_2 = Dense(4, name='dense_1')(a) dp = Dropout(0.5, name='dropout') b_2 = dp(b) optimizer = 'rmsprop' loss = 'mse' loss_weights = [1., 0.5] model = Model([a, b], [a_2, b_2]) model = make_parallel(model, 2) model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None) trained_epochs = [] tracker_cb = LambdaCallback( on_epoch_begin=lambda epoch, logs: trained_epochs.append(epoch)) model.fit_generator(data_generator(4), steps_per_epoch=3, epochs=5, initial_epoch=2, callbacks=[tracker_cb]) assert trained_epochs == [2, 3, 4]
def test_warnings(): a = Input(shape=(3,), name='input_a') b = Input(shape=(3,), name='input_b') a_2 = Dense(4, name='dense_1')(a) dp = Dropout(0.5, name='dropout') b_2 = dp(b) model = Model([a, b], [a_2, b_2]) optimizer = 'rmsprop' loss = 'mse' loss_weights = [1., 0.5] model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None) def gen_data(batch_sz): while True: yield ([np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3))], [np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3))]) with pytest.warns(Warning) as w: out = model.fit_generator(gen_data(4), steps_per_epoch=10, use_multiprocessing=True, workers=2) warning_raised = any(['Sequence' in str(w_.message) for w_ in w]) assert warning_raised, 'No warning raised when using generator with processes.' with pytest.warns(None) as w: out = model.fit_generator(RandomSequence(3), steps_per_epoch=4, use_multiprocessing=True, workers=2) assert all(['Sequence' not in str(w_.message) for w_ in w]), 'A warning was raised for Sequence.'
def train(model: Model, batch_size, ep, n_of_run=1): train_samples = len(get_files_list(train_path_X)) valid_samples = len(get_files_list(valid_path_X)) model.fit_generator(generator=imageLoader(train_path_X, train_path_Y, batch_size=batch_size) , validation_data=imageLoader(valid_path_X, valid_path_Y, batch_size=batch_size) , steps_per_epoch=int(train_samples / batch_size) , validation_steps=int(valid_samples / batch_size), epochs=ep, callbacks=[hist]) model.save_weights(str(blocks) + 'model' + str(n_of_run) + '.h5')
# randomly shift images vertically (fraction of total height) height_shift_range=0.1, shear_range=0.1, # set range for random shear zoom_range=0.2, # set range for random zoom channel_shift_range=0., # set range for random channel shifts # set mode for filling points outside the input boundaries fill_mode='nearest', cval=0., # value used for fill_mode = "constant" horizontal_flip=True, # randomly flip images vertical_flip=False, # randomly flip images # set rescaling factor (applied before any other transformation) rescale=None, # set function that will be applied on each input preprocessing_function=None, # image data format, either "channels_first" or "channels_last" data_format=None) datagen.fit(x_train) callbacks = [ ModelCheckpoint( filepath= "./teacher_models/teacher_model_epoch_{epoch:02d}-val_acc_{val_acc}.hdf5" ) ] model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), epochs=epochs, validation_data=(x_test, y_test), workers=4, callbacks=callbacks)
def test_model_methods(): a = Input(shape=(3, ), name='input_a') b = Input(shape=(3, ), name='input_b') a_2 = Dense(4, name='dense_1')(a) dp = Dropout(0.5, name='dropout') b_2 = dp(b) model = Model([a, b], [a_2, b_2]) optimizer = 'rmsprop' loss = 'mse' loss_weights = [1., 0.5] input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) # training/testing doesn't work before compiling. with pytest.raises(RuntimeError): model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None) # test train_on_batch out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) out = model.train_on_batch({ 'input_a': input_a_np, 'input_b': input_b_np }, [output_a_np, output_b_np]) out = model.train_on_batch({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense_1': output_a_np, 'dropout': output_b_np }) # test fit out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], epochs=1, batch_size=4) out = model.fit({ 'input_a': input_a_np, 'input_b': input_b_np }, [output_a_np, output_b_np], epochs=1, batch_size=4) out = model.fit({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense_1': output_a_np, 'dropout': output_b_np }, epochs=1, batch_size=4) # test validation_split out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], epochs=1, batch_size=4, validation_split=0.5) out = model.fit({ 'input_a': input_a_np, 'input_b': input_b_np }, [output_a_np, output_b_np], epochs=1, batch_size=4, validation_split=0.5) # test validation data out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], epochs=1, batch_size=4, validation_data=([input_a_np, input_b_np], [output_a_np, output_b_np])) out = model.fit({ 'input_a': input_a_np, 'input_b': input_b_np }, [output_a_np, output_b_np], epochs=1, batch_size=4, validation_split=0.5, validation_data=({ 'input_a': input_a_np, 'input_b': input_b_np }, [output_a_np, output_b_np])) out = model.fit({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense_1': output_a_np, 'dropout': output_b_np }, epochs=1, batch_size=4, validation_split=0.5, validation_data=({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense_1': output_a_np, 'dropout': output_b_np })) # test_on_batch out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) out = model.test_on_batch({ 'input_a': input_a_np, 'input_b': input_b_np }, [output_a_np, output_b_np]) out = model.test_on_batch({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense_1': output_a_np, 'dropout': output_b_np }) # predict_on_batch out = model.predict_on_batch([input_a_np, input_b_np]) out = model.predict_on_batch({ 'input_a': input_a_np, 'input_b': input_b_np }) # predict, evaluate input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4) out = model.predict([input_a_np, input_b_np], batch_size=4) # with sample_weight input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) sample_weight = [None, np.random.random((10, ))] out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=sample_weight) out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=sample_weight) # test accuracy metric model.compile(optimizer, loss, metrics=['acc'], sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 5 out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 5 # this should also work model.compile(optimizer, loss, metrics={'dense_1': 'acc'}, sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 # and this as well model.compile(optimizer, loss, metrics={'dense_1': ['acc']}, sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 # test starting from non-zero initial epoch trained_epochs = [] trained_batches = [] # define tracer callback def on_epoch_begin(epoch, logs): trained_epochs.append(epoch) def on_batch_begin(batch, logs): trained_batches.append(batch) tracker_cb = LambdaCallback(on_epoch_begin=on_epoch_begin, on_batch_begin=on_batch_begin) out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], epochs=5, batch_size=4, initial_epoch=2, callbacks=[tracker_cb]) assert trained_epochs == [2, 3, 4] # test starting from non-zero initial epoch for generator too trained_epochs = [] def gen_data(batch_sz): while True: yield ([ np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3)) ], [ np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3)) ]) out = model.fit_generator(gen_data(4), steps_per_epoch=3, epochs=5, initial_epoch=2, callbacks=[tracker_cb]) assert trained_epochs == [2, 3, 4] # test with a custom metric function def mse(y_true, y_pred): return K.mean(K.pow(y_true - y_pred, 2)) model.compile(optimizer, loss, metrics=[mse], sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) out_len = 1 + 2 * (1 + 1) # total loss + 2 outputs * (loss + metric) assert len(out) == out_len out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == out_len input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4, epochs=1) out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4) out = model.predict([input_a_np, input_b_np], batch_size=4) # empty batch with pytest.raises(ValueError): def gen_data(): while True: yield (np.asarray([]), np.asarray([])) out = model.evaluate_generator(gen_data(), steps=1) # x is not a list of numpy arrays. with pytest.raises(ValueError): out = model.predict([None]) # x does not match _feed_input_names. with pytest.raises(ValueError): out = model.predict([input_a_np, None, input_b_np]) with pytest.raises(ValueError): out = model.predict([None, input_a_np, input_b_np]) # all input/output/weight arrays should have the same number of samples. with pytest.raises(ValueError): out = model.train_on_batch([input_a_np, input_b_np[:2]], [output_a_np, output_b_np], sample_weight=sample_weight) with pytest.raises(ValueError): out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np[:2]], sample_weight=sample_weight) with pytest.raises(ValueError): out = model.train_on_batch( [input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=[sample_weight[1], sample_weight[1][:2]]) # `sample_weight` is neither a dict nor a list. with pytest.raises(TypeError): out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=tuple(sample_weight)) # `validation_data` is neither a tuple nor a triple. with pytest.raises(ValueError): out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], epochs=1, batch_size=4, validation_data=([input_a_np, input_b_np], )) # `loss` does not match outputs. with pytest.raises(ValueError): model.compile(optimizer, loss=['mse', 'mae', 'mape']) # `loss_weights` does not match output_names. with pytest.raises(ValueError): model.compile(optimizer, loss='mse', loss_weights={'lstm': 0.5}) # `loss_weights` does not match outputs. with pytest.raises(ValueError): model.compile(optimizer, loss='mse', loss_weights=[0.5]) # `loss_weights` is invalid type. with pytest.raises(TypeError): model.compile(optimizer, loss='mse', loss_weights=(0.5, 0.5)) # `sample_weight_mode` does not match output_names. with pytest.raises(ValueError): model.compile(optimizer, loss='mse', sample_weight_mode={'lstm': 'temporal'}) # `sample_weight_mode` does not match output_names. with pytest.raises(ValueError): model.compile(optimizer, loss='mse', sample_weight_mode=['temporal']) # `sample_weight_mode` matches output_names partially. with pytest.raises(ValueError): model.compile(optimizer, loss='mse', sample_weight_mode={'dense_1': 'temporal'}) # `loss` does not exist. with pytest.raises(ValueError): model.compile(optimizer, loss=[]) model.compile(optimizer, loss=['mse', 'mae']) model.compile(optimizer, loss='mse', loss_weights={ 'dense_1': 0.2, 'dropout': 0.8 }) model.compile(optimizer, loss='mse', loss_weights=[0.2, 0.8]) # the rank of weight arrays should be 1. with pytest.raises(ValueError): out = model.train_on_batch( [input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=[None, np.random.random((10, 20, 30))]) model.compile(optimizer, loss='mse', sample_weight_mode={ 'dense_1': None, 'dropout': 'temporal' }) model.compile(optimizer, loss='mse', sample_weight_mode=[None, 'temporal']) # the rank of output arrays should be at least 3D. with pytest.raises(ValueError): out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=sample_weight) model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None) trained_epochs = [] trained_batches = [] out = model.fit_generator(generator=RandomSequence(3), steps_per_epoch=3, epochs=5, initial_epoch=0, validation_data=RandomSequence(4), validation_steps=3, callbacks=[tracker_cb]) assert trained_epochs == [0, 1, 2, 3, 4] assert trained_batches == list(range(3)) * 5 # steps_per_epoch will be equal to len of sequence if it's unspecified trained_epochs = [] trained_batches = [] out = model.fit_generator(generator=RandomSequence(3), epochs=5, initial_epoch=0, validation_data=RandomSequence(4), callbacks=[tracker_cb]) assert trained_epochs == [0, 1, 2, 3, 4] assert trained_batches == list(range(12)) * 5 # fit_generator will throw an exception if steps is unspecified for regular generator with pytest.raises(ValueError): def gen_data(): while True: yield (np.asarray([]), np.asarray([])) out = model.fit_generator(generator=gen_data(), epochs=5, initial_epoch=0, validation_data=gen_data(), callbacks=[tracker_cb]) # predict_generator output shape behavior should be consistent def expected_shape(batch_size, n_batches): return (batch_size * n_batches, 4), (batch_size * n_batches, 3) # Multiple outputs and one step. batch_size = 5 sequence_length = 1 shape_0, shape_1 = expected_shape(batch_size, sequence_length) out = model.predict_generator( RandomSequence(batch_size, sequence_length=sequence_length)) assert np.shape(out[0]) == shape_0 and np.shape(out[1]) == shape_1 # Multiple outputs and multiple steps. batch_size = 5 sequence_length = 2 shape_0, shape_1 = expected_shape(batch_size, sequence_length) out = model.predict_generator( RandomSequence(batch_size, sequence_length=sequence_length)) assert np.shape(out[0]) == shape_0 and np.shape(out[1]) == shape_1 # Create a model with a single output. single_output_model = Model([a, b], a_2) single_output_model.compile(optimizer, loss, metrics=[], sample_weight_mode=None) # Single output and one step. batch_size = 5 sequence_length = 1 shape_0, _ = expected_shape(batch_size, sequence_length) out = single_output_model.predict_generator( RandomSequence(batch_size, sequence_length=sequence_length)) assert np.shape(out) == shape_0 # Single output and multiple steps. batch_size = 5 sequence_length = 2 shape_0, _ = expected_shape(batch_size, sequence_length) out = single_output_model.predict_generator( RandomSequence(batch_size, sequence_length=sequence_length)) assert np.shape(out) == shape_0
""" datagen_for_validation = ImageDataGenerator( zca_whitening=True, # apply ZCA whitening zca_epsilon=1e-06 # epsilon for ZCA whitening ) # Compute quantities required for feature-wise normalization # (std, mean, and principal components if ZCA whitening is applied). datagen_for_validation.fit(x_test) """ datagen.fit(x_train) # Fit the model on the batches generated by datagen.flow(). model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), epochs=epochs, callbacks=[lr_cb], validation_data=(x_test, y_test, batch_size), validation_steps=100, verbose=1) # Save model and weights if not os.path.isdir(save_dir): os.makedirs(save_dir) model_path = os.path.join(save_dir, model_name) model.save(model_path) print('Saved trained model at %s ' % model_path) # Score trained model. scores = model.evaluate(x_test, y_test, verbose=1) print('Test loss:', scores[0]) print('Test accuracy:', scores[1])
class TL(Model): """ Triplet-Loss trained Neural Network. https://arxiv.org/abs/1503.03832 """ def __init__(self, base=None, siamese=None): super(TL, self).__init__() # Store the base model. assert (base != None) self.base = base # For loading. if base != None and siamese != None: self.base = base self.siamese = siamese self.latent_dim = self.base.outputs[0].shape[1] return # Get the latent dimension. assert len(self.base.outputs) == 1 assert len(self.base.outputs[0].shape) == 2 self.latent_dim = self.base.outputs[0].shape[1] # Get the input shape. input_shape = self.base.inputs[0].shape.as_list()[1:] # Create the anchor. input_anchor = layers.Input(shape=input_shape) output_anchor = input_anchor output_anchor = self.base(output_anchor) # Create the positive. input_positive = layers.Input(shape=input_shape) output_positive = input_positive output_positive = self.base(output_positive) # Create the negative. input_negative = layers.Input(shape=input_shape) output_negative = input_negative output_negative = self.base(output_negative) # Create a dummy output. output = layers.concatenate( [output_anchor, output_positive, output_negative]) # Create the model. self.siamese = Model([input_anchor, input_positive, input_negative], output, name="triplet_model") def compile(self, optimizer, loss=None, metrics=None, loss_weights=None, sample_weight_mode=None, weighted_metrics=None, target_tensors=None, triplet_loss="mse", **kwargs): """ Compiles the TL. Additionally to the default functionality of *compile*, it adds the triplet-loss. In order to do so you have to provide it via the parameter *triplet_loss*. The VAE loss is similar to >>> vae_loss = max(0.0, pos_dist - neg_dist + alpha) See the literature for details. Additional args: triplet_loss (string): The base-loss for the triplet-loss. Values are either *euclidean* for euclidean norm or *cosine* for cosine similarity. """ assert loss == None, "Not expected to provide an explicit loss for TL. Use 'triplet_loss'" self.triplet_loss = triplet_loss def triplet_loss_function(y_true, y_pred, alpha=0.4): anchor = y_pred[:, 0:self.latent_dim] positive = y_pred[:, self.latent_dim:self.latent_dim * 2] negative = y_pred[:, self.latent_dim * 2:self.latent_dim * 3] if triplet_loss == "euclidean": pos_dist = euclidean_loss(positive, anchor) neg_dist = euclidean_loss(negative, anchor) elif triplet_loss == "cosine": pos_dist = cosine_loss(positive, anchor) neg_dist = cosine_loss(negative, anchor) else: raise Exception("Unexpected: " + triplet_loss) basic_loss = pos_dist - neg_dist + alpha loss = K.maximum(basic_loss, 0.0) return loss loss = triplet_loss_function self.siamese.compile(optimizer, loss, metrics, loss_weights, sample_weight_mode, weighted_metrics, **kwargs) def fit(self, x=None, y=None, batch_size=None, minibatch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0., validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, **kwargs): """ This is basically the same as in vanilla Keras. Additional args: minibatch_size (int): The model internally does some sampling. The *minibatch_size* specifies how many candidates to use in order to create a triplet for training. """ assert minibatch_size != None, "ERROR! Must provide 'minibatch_size'." assert steps_per_epoch != None, "ERROR! Must provide 'steps_per_epoch'." assert validation_steps != None, "ERROR! Must provide 'validation_steps'." y_dummy = np.zeros((batch_size, self.latent_dim * 3)) # Template generator. def triplet_loss_generator(x_generator, y_generator, model, sampling): # Get the classes. classes = sorted(list(set(y_generator))) # Sort by classes for easy indexing. class_indices = {} for c in classes: class_indices[c] = [] for index, c in enumerate(y_generator): class_indices[c].append(index) # Compute the complements. class_complements = {} for c in classes: class_complements[c] = [c2 for c2 in classes if c2 != c] # Generator loop. while True: x_input_anchors = [] x_input_positives = [] x_input_negatives = [] # Generate a whole batch. for _ in range(batch_size): anchor_class = random.choice(classes) anchor_index = random.choice(class_indices[anchor_class]) anchor_input = x_generator[anchor_index] #print("anchor_class", anchor_class) anchor_latent = self.base.predict( np.expand_dims(anchor_input, axis=0))[0] # Generate some positive candidates. positive_candidates = [] while len(positive_candidates) < minibatch_size: positive_class = anchor_class positive_index = random.choice( class_indices[positive_class]) positive_input = x_generator[positive_index] assert positive_class == y_generator[positive_index] #print("positive_class", positive_class) positive_candidates.append(positive_input) # Find the farthest positive candidate. positive_candidates = np.array(positive_candidates) positive_latents = self.base.predict(positive_candidates) positive_extremum = compute_latent_extremum( anchor_latent, positive_latents, "argmax", self.triplet_loss) positive_input = positive_candidates[positive_extremum] # Generate some negative candidates. negative_candidates = [] while len(negative_candidates) < minibatch_size: negative_class = random.choice( class_complements[anchor_class]) negative_index = random.choice( class_indices[negative_class]) negative_input = x_generator[negative_index] assert negative_class == y_generator[negative_index] #print("negative_class", negative_class) negative_candidates.append(negative_input) # Find the closest negative candidate. negative_candidates = np.array(negative_candidates) negative_latents = self.base.predict(negative_candidates) negative_extremum = compute_latent_extremum( anchor_latent, negative_latents, "argmin", self.triplet_loss) negative_input = negative_candidates[negative_extremum] # Done. x_input_anchors.append(anchor_input) x_input_positives.append(positive_input) x_input_negatives.append(negative_input) x_input_anchors = np.array(x_input_anchors) x_input_positives = np.array(x_input_positives) x_input_negatives = np.array(x_input_negatives) x_input = [ x_input_anchors, x_input_positives, x_input_negatives ] yield x_input, y_dummy # Create the generators. training_generator = triplet_loss_generator(x, y, batch_size, self.siamese) if validation_data != None: validation_generator = triplet_loss_generator( validation_data[0], validation_data[1], batch_size, self.siamese) else: validation_generator = None # Create the history. history_keys = ["loss", "val_loss"] history = {} for history_key in history_keys: history[history_key] = [] # Training the model for epoch in range(epochs): print("Epoch " + str(epoch + 1) + "/" + str(epochs) + "...") # Generating data for training. training_input, training_output = next(training_generator) if validation_generator != None: validation_input, validation_output = next( validation_generator) model_history = self.siamese.fit( training_input, training_output, validation_data=(validation_input, validation_output), epochs=1, steps_per_epoch=steps_per_epoch, verbose=0, validation_steps=validation_steps) # Update the history. for history_key in history_keys: history_value = model_history.history[history_key] history[history_key].append(history_value) print(history_key, history_value) return history def fit_generator(self, generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0): """ Coming soon... """ print("TODO: implement fit_generator!") raise Exception("Not implemented!") return self.siamese.fit_generator(generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch) def evaluate(self, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None): """ Evaluates the model. Same as vanilla Keras. """ return self.siamese.evaluate(x, y, batch_size, verbose, sample_weight, steps=None) def predict(self, x, batch_size=None, verbose=0, steps=None): """ Does a prediction. Same as vanilla Keras. """ return self.siamese.predict(x, batch_size, verbose, steps) def summary(self): """ Provides a summary. """ print("Basemodel:") self.base.summary() print("Siamese model:") self.siamese.summary() def save(self, path): """ Saves the TL. This includes the whole Siamese Net plus the base-model. This code >>> tl.save("myae.h5") will create the files *tl.h5*, and *tl-base.h5*. """ self.siamese.save(path) self.base.save(append_to_filepath(path, "-base"))
y = TimeDistributed(Conv2D(512, 3, padding='same', activation='relu'))(y) y = TimeDistributed(Conv2D(512, 3, padding='same', activation='relu'))(y) y = TimeDistributed(MaxPooling2D(pool_size=(2, 2)))(y) # Tensor shape: (B, T, 4, 4, 512) -> (B, T, 1, 512) y = TimeDistributed(GlobalAveragePooling2D())(y) with tf.device(available_devices[3]): y = LSTM(128, return_sequences=False, return_state=False)(y) prediction = Dense(num_classes, activation='softmax')(y) # Instantiate a 'keras.engine.training.Model' instance model = Model(inputs=cnn_input, outputs=prediction) # Compile model model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(lr=learning_rate), metrics=['accuracy']) # Train model datadir = 'Z:/1. 프로젝트/2018_삼성SDS_스타크래프트/Supervised/trainingData_v3/data(선수별)/박성균/128/' generator = generate_arrays_from_directory(datadir, batch_size) steps_per_epoch = math.ceil(os.listdir(datadir).__len__() / batch_size) model.fit_generator(generator=generator, steps_per_epoch=steps_per_epoch, epochs=epochs, max_queue_size=10, verbose=1)
def test_model_methods(): a = Input(shape=(3,), name='input_a') b = Input(shape=(3,), name='input_b') a_2 = Dense(4, name='dense_1')(a) dp = Dropout(0.5, name='dropout') b_2 = dp(b) model = Model([a, b], [a_2, b_2]) optimizer = 'rmsprop' loss = 'mse' loss_weights = [1., 0.5] input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) input_a_df = pd.DataFrame(input_a_np) input_b_df = pd.DataFrame(input_b_np) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) output_a_df = pd.DataFrame(output_a_np) output_b_df = pd.DataFrame(output_b_np) # training/testing doesn't work before compiling. with pytest.raises(RuntimeError): model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None) # test train_on_batch out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np]) out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np}) out = model.train_on_batch([input_a_df, input_b_df], [output_a_df, output_b_df]) # test fit out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], epochs=1, batch_size=4) out = model.fit({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np], epochs=1, batch_size=4) out = model.fit({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np}, epochs=1, batch_size=4) out = model.fit([input_a_df, input_b_df], [output_a_df, output_b_df], epochs=1, batch_size=4) # test validation_split out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], epochs=1, batch_size=4, validation_split=0.5) out = model.fit({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np], epochs=1, batch_size=4, validation_split=0.5) # test validation data out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], epochs=1, batch_size=4, validation_data=([input_a_np, input_b_np], [output_a_np, output_b_np])) out = model.fit({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np], epochs=1, batch_size=4, validation_split=0.5, validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np])) out = model.fit({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np}, epochs=1, batch_size=4, validation_split=0.5, validation_data=( {'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np})) # test_on_batch out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np]) out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np}) out = model.test_on_batch([input_a_df, input_b_df], [output_a_df, output_b_df]) # predict_on_batch out = model.predict_on_batch([input_a_np, input_b_np]) out = model.predict_on_batch({'input_a': input_a_np, 'input_b': input_b_np}) out = model.predict_on_batch([input_a_df, input_b_df]) # predict, evaluate input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4) out = model.evaluate([input_a_df, input_b_df], [output_a_df, output_b_df], batch_size=4) out = model.predict([input_a_np, input_b_np], batch_size=4) out = model.predict([input_a_df, input_b_df], batch_size=4) # with sample_weight input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) sample_weight = [None, np.random.random((10,))] out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=sample_weight) out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=sample_weight) # test accuracy metric model.compile(optimizer, loss, metrics=['acc'], sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 5 out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 5 # this should also work model.compile(optimizer, loss, metrics={'dense_1': 'acc'}, sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 # and this as well model.compile(optimizer, loss, metrics={'dense_1': ['acc']}, sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 # test starting from non-zero initial epoch trained_epochs = [] # define tracer callback def on_epoch_begin(epoch, logs): trained_epochs.append(epoch) tracker_cb = LambdaCallback(on_epoch_begin=on_epoch_begin) out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], epochs=5, batch_size=4, initial_epoch=2, callbacks=[tracker_cb]) assert trained_epochs == [2, 3, 4] # test starting from non-zero initial epoch for generator too trained_epochs = [] def gen_data(batch_sz): while True: yield ([np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3))], [np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3))]) out = model.fit_generator(gen_data(4), steps_per_epoch=3, epochs=5, initial_epoch=2, callbacks=[tracker_cb]) assert trained_epochs == [2, 3, 4] # test with a custom metric function def mse(y_true, y_pred): return K.mean(K.pow(y_true - y_pred, 2)) model.compile(optimizer, loss, metrics=[mse], sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) out_len = 1 + 2 * (1 + 1) # total loss + 2 outputs * (loss + metric) assert len(out) == out_len out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == out_len input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4, epochs=1) out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4) out = model.predict([input_a_np, input_b_np], batch_size=4) # empty batch with pytest.raises(ValueError): def gen_data(): while True: yield (np.asarray([]), np.asarray([])) out = model.evaluate_generator(gen_data(), steps=1) # x is not a list of numpy arrays. with pytest.raises(ValueError): out = model.predict([None]) # x does not match _feed_input_names. with pytest.raises(ValueError): out = model.predict([input_a_np, None, input_b_np]) with pytest.raises(ValueError): out = model.predict([None, input_a_np, input_b_np]) # all input/output/weight arrays should have the same number of samples. with pytest.raises(ValueError): out = model.train_on_batch([input_a_np, input_b_np[:2]], [output_a_np, output_b_np], sample_weight=sample_weight) with pytest.raises(ValueError): out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np[:2]], sample_weight=sample_weight) with pytest.raises(ValueError): out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=[sample_weight[1], sample_weight[1][:2]]) # `sample_weight` is neither a dict nor a list. with pytest.raises(TypeError): out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=tuple(sample_weight)) # `validation_data` is neither a tuple nor a triple. with pytest.raises(ValueError): out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], epochs=1, batch_size=4, validation_data=([input_a_np, input_b_np],)) # `loss` does not match outputs. with pytest.raises(ValueError): model.compile(optimizer, loss=['mse', 'mae', 'mape']) # `loss_weights` does not match output_names. with pytest.raises(ValueError): model.compile(optimizer, loss='mse', loss_weights={'lstm': 0.5}) # `loss_weights` does not match outputs. with pytest.raises(ValueError): model.compile(optimizer, loss='mse', loss_weights=[0.5]) # `loss_weights` is invalid type. with pytest.raises(TypeError): model.compile(optimizer, loss='mse', loss_weights=(0.5, 0.5)) # `sample_weight_mode` does not match output_names. with pytest.raises(ValueError): model.compile(optimizer, loss='mse', sample_weight_mode={'lstm': 'temporal'}) # `sample_weight_mode` does not match output_names. with pytest.raises(ValueError): model.compile(optimizer, loss='mse', sample_weight_mode=['temporal']) # `sample_weight_mode` matches output_names partially. with pytest.raises(ValueError): model.compile(optimizer, loss='mse', sample_weight_mode={'dense_1': 'temporal'}) # `loss` does not exist. with pytest.raises(ValueError): model.compile(optimizer, loss=[]) model.compile(optimizer, loss=['mse', 'mae']) model.compile(optimizer, loss='mse', loss_weights={'dense_1': 0.2, 'dropout': 0.8}) model.compile(optimizer, loss='mse', loss_weights=[0.2, 0.8]) # the rank of weight arrays should be 1. with pytest.raises(ValueError): out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=[None, np.random.random((10, 20, 30))]) model.compile(optimizer, loss='mse', sample_weight_mode={'dense_1': None, 'dropout': 'temporal'}) model.compile(optimizer, loss='mse', sample_weight_mode=[None, 'temporal']) # the rank of output arrays should be at least 3D. with pytest.raises(ValueError): out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=sample_weight) model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None) trained_epochs = [] out = model.fit_generator(generator=RandomSequence(3), steps_per_epoch=12, epochs=5, initial_epoch=0, validation_data=RandomSequence(4), validation_steps=12, callbacks=[tracker_cb]) assert trained_epochs == [0, 1, 2, 3, 4]
def test_model_methods(): a = Input(shape=(3,), name='input_a') b = Input(shape=(3,), name='input_b') a_2 = Dense(4, name='dense_1')(a) dp = Dropout(0.5, name='dropout') b_2 = dp(b) model = Model([a, b], [a_2, b_2]) optimizer = 'rmsprop' loss = 'mse' loss_weights = [1., 0.5] input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) # training/testing doesn't work before compiling. with pytest.raises(RuntimeError): model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None) # test train_on_batch out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np]) out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np}) # test fit out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], epochs=1, batch_size=4) out = model.fit({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np], epochs=1, batch_size=4) out = model.fit({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np}, epochs=1, batch_size=4) # test validation_split out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], epochs=1, batch_size=4, validation_split=0.5) out = model.fit({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np], epochs=1, batch_size=4, validation_split=0.5) # test validation data out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], epochs=1, batch_size=4, validation_data=([input_a_np, input_b_np], [output_a_np, output_b_np])) out = model.fit({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np], epochs=1, batch_size=4, validation_split=0.5, validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np])) out = model.fit({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np}, epochs=1, batch_size=4, validation_split=0.5, validation_data=( {'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np})) # test_on_batch out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np]) out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np}) # predict_on_batch out = model.predict_on_batch([input_a_np, input_b_np]) out = model.predict_on_batch({'input_a': input_a_np, 'input_b': input_b_np}) # predict, evaluate input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4) out = model.predict([input_a_np, input_b_np], batch_size=4) # with sample_weight input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) sample_weight = [None, np.random.random((10,))] out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=sample_weight) out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=sample_weight) # test accuracy metric model.compile(optimizer, loss, metrics=['acc'], sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 5 out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 5 # this should also work model.compile(optimizer, loss, metrics={'dense_1': 'acc'}, sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 # and this as well model.compile(optimizer, loss, metrics={'dense_1': ['acc']}, sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 # test starting from non-zero initial epoch trained_epochs = [] trained_batches = [] # define tracer callback def on_epoch_begin(epoch, logs): trained_epochs.append(epoch) def on_batch_begin(batch, logs): trained_batches.append(batch) tracker_cb = LambdaCallback(on_epoch_begin=on_epoch_begin, on_batch_begin=on_batch_begin) out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], epochs=5, batch_size=4, initial_epoch=2, callbacks=[tracker_cb]) assert trained_epochs == [2, 3, 4] # test starting from non-zero initial epoch for generator too trained_epochs = [] def gen_data(batch_sz): while True: yield ([np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3))], [np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3))]) out = model.fit_generator(gen_data(4), steps_per_epoch=3, epochs=5, initial_epoch=2, callbacks=[tracker_cb]) assert trained_epochs == [2, 3, 4] # test with a custom metric function def mse(y_true, y_pred): return K.mean(K.pow(y_true - y_pred, 2)) model.compile(optimizer, loss, metrics=[mse], sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) out_len = 1 + 2 * (1 + 1) # total loss + 2 outputs * (loss + metric) assert len(out) == out_len out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == out_len input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4, epochs=1) out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4) out = model.predict([input_a_np, input_b_np], batch_size=4) # enable verbose for evaluate_generator out = model.evaluate_generator(gen_data(4), steps=3, verbose=1) # empty batch with pytest.raises(ValueError): def gen_data(): while True: yield (np.asarray([]), np.asarray([])) out = model.evaluate_generator(gen_data(), steps=1) # x is not a list of numpy arrays. with pytest.raises(ValueError): out = model.predict([None]) # x does not match _feed_input_names. with pytest.raises(ValueError): out = model.predict([input_a_np, None, input_b_np]) with pytest.raises(ValueError): out = model.predict([None, input_a_np, input_b_np]) # all input/output/weight arrays should have the same number of samples. with pytest.raises(ValueError): out = model.train_on_batch([input_a_np, input_b_np[:2]], [output_a_np, output_b_np], sample_weight=sample_weight) with pytest.raises(ValueError): out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np[:2]], sample_weight=sample_weight) with pytest.raises(ValueError): out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=[sample_weight[1], sample_weight[1][:2]]) # `sample_weight` is neither a dict nor a list. with pytest.raises(TypeError): out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=tuple(sample_weight)) # `validation_data` is neither a tuple nor a triple. with pytest.raises(ValueError): out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], epochs=1, batch_size=4, validation_data=([input_a_np, input_b_np],)) # `loss` does not match outputs. with pytest.raises(ValueError): model.compile(optimizer, loss=['mse', 'mae', 'mape']) # `loss_weights` does not match output_names. with pytest.raises(ValueError): model.compile(optimizer, loss='mse', loss_weights={'lstm': 0.5}) # `loss_weights` does not match outputs. with pytest.raises(ValueError): model.compile(optimizer, loss='mse', loss_weights=[0.5]) # `loss_weights` is invalid type. with pytest.raises(TypeError): model.compile(optimizer, loss='mse', loss_weights=(0.5, 0.5)) # `sample_weight_mode` does not match output_names. with pytest.raises(ValueError): model.compile(optimizer, loss='mse', sample_weight_mode={'lstm': 'temporal'}) # `sample_weight_mode` does not match output_names. with pytest.raises(ValueError): model.compile(optimizer, loss='mse', sample_weight_mode=['temporal']) # `sample_weight_mode` matches output_names partially. with pytest.raises(ValueError): model.compile(optimizer, loss='mse', sample_weight_mode={'dense_1': 'temporal'}) # `loss` does not exist. with pytest.raises(ValueError): model.compile(optimizer, loss=[]) model.compile(optimizer, loss=['mse', 'mae']) model.compile(optimizer, loss='mse', loss_weights={'dense_1': 0.2, 'dropout': 0.8}) model.compile(optimizer, loss='mse', loss_weights=[0.2, 0.8]) # the rank of weight arrays should be 1. with pytest.raises(ValueError): out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=[None, np.random.random((10, 20, 30))]) model.compile(optimizer, loss='mse', sample_weight_mode={'dense_1': None, 'dropout': 'temporal'}) model.compile(optimizer, loss='mse', sample_weight_mode=[None, 'temporal']) # the rank of output arrays should be at least 3D. with pytest.raises(ValueError): out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=sample_weight) model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None) trained_epochs = [] trained_batches = [] out = model.fit_generator(generator=RandomSequence(3), steps_per_epoch=3, epochs=5, initial_epoch=0, validation_data=RandomSequence(4), validation_steps=3, callbacks=[tracker_cb]) assert trained_epochs == [0, 1, 2, 3, 4] assert trained_batches == list(range(3)) * 5 # steps_per_epoch will be equal to len of sequence if it's unspecified trained_epochs = [] trained_batches = [] out = model.fit_generator(generator=RandomSequence(3), epochs=5, initial_epoch=0, validation_data=RandomSequence(4), callbacks=[tracker_cb]) assert trained_epochs == [0, 1, 2, 3, 4] assert trained_batches == list(range(12)) * 5 # fit_generator will throw an exception if steps is unspecified for regular generator with pytest.raises(ValueError): def gen_data(): while True: yield (np.asarray([]), np.asarray([])) out = model.fit_generator(generator=gen_data(), epochs=5, initial_epoch=0, validation_data=gen_data(), callbacks=[tracker_cb]) # Check if generator is only accessed an expected number of times gen_counters = [0, 0] def gen_data(i): while True: gen_counters[i] += 1 yield ([np.random.random((1, 3)), np.random.random((1, 3))], [np.random.random((1, 4)), np.random.random((1, 3))]) out = model.fit_generator(generator=gen_data(0), epochs=3, steps_per_epoch=2, validation_data=gen_data(1), validation_steps=1, max_queue_size=2, workers=2) # Need range check here as filling of the queue depends on sleep in the enqueuers assert 6 <= gen_counters[0] <= 8 # 12 = (epoch * workers * validation steps * max_queue_size) assert 3 <= gen_counters[1] <= 12 gen_counters = [0] out = model.fit_generator(generator=RandomSequence(3), epochs=3, validation_data=gen_data(0), validation_steps=1, max_queue_size=2, workers=2) # 12 = (epoch * workers * validation steps * max_queue_size) # Need range check here as filling of the queue depends on sleep in the enqueuers assert 3 <= gen_counters[0] <= 12 # predict_generator output shape behavior should be consistent def expected_shape(batch_size, n_batches): return (batch_size * n_batches, 4), (batch_size * n_batches, 3) # Multiple outputs and one step. batch_size = 5 sequence_length = 1 shape_0, shape_1 = expected_shape(batch_size, sequence_length) out = model.predict_generator(RandomSequence(batch_size, sequence_length=sequence_length)) assert np.shape(out[0]) == shape_0 and np.shape(out[1]) == shape_1 # Multiple outputs and multiple steps. batch_size = 5 sequence_length = 2 shape_0, shape_1 = expected_shape(batch_size, sequence_length) out = model.predict_generator(RandomSequence(batch_size, sequence_length=sequence_length)) assert np.shape(out[0]) == shape_0 and np.shape(out[1]) == shape_1 # Create a model with a single output. single_output_model = Model([a, b], a_2) single_output_model.compile(optimizer, loss, metrics=[], sample_weight_mode=None) # Single output and one step. batch_size = 5 sequence_length = 1 shape_0, _ = expected_shape(batch_size, sequence_length) out = single_output_model.predict_generator(RandomSequence(batch_size, sequence_length=sequence_length)) assert np.shape(out) == shape_0 # Single output and multiple steps. batch_size = 5 sequence_length = 2 shape_0, _ = expected_shape(batch_size, sequence_length) out = single_output_model.predict_generator(RandomSequence(batch_size, sequence_length=sequence_length)) assert np.shape(out) == shape_0
def test_model_with_external_loss(): # None loss, only regularization loss. a = Input(shape=(3,), name='input_a') a_2 = Dense(4, name='dense_1', kernel_regularizer='l1', bias_regularizer='l2')(a) dp = Dropout(0.5, name='dropout') a_3 = dp(a_2) model = Model(a, [a_2, a_3]) optimizer = 'rmsprop' loss = None model.compile(optimizer, loss, metrics=['mae']) input_a_np = np.random.random((10, 3)) # test train_on_batch out = model.train_on_batch(input_a_np, None) out = model.test_on_batch(input_a_np, None) # fit out = model.fit(input_a_np, None) # evaluate out = model.evaluate(input_a_np, None) # No dropout, external loss. a = Input(shape=(3,), name='input_a') a_2 = Dense(4, name='dense_1')(a) a_3 = Dense(4, name='dense_2')(a) model = Model(a, [a_2, a_3]) model.add_loss(K.mean(a_3 + a_2)) optimizer = 'rmsprop' loss = None model.compile(optimizer, loss, metrics=['mae']) # test train_on_batch out = model.train_on_batch(input_a_np, None) out = model.test_on_batch(input_a_np, None) # fit out = model.fit(input_a_np, None) # evaluate out = model.evaluate(input_a_np, None) # Test fit with no external data at all. if K.backend() == 'tensorflow': import tensorflow as tf a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32)) a_2 = Dense(4, name='dense_1')(a) a_2 = Dropout(0.5, name='dropout')(a_2) model = Model(a, a_2) model.add_loss(K.mean(a_2)) model.compile(optimizer='rmsprop', loss=None, metrics=['mean_squared_error']) # test train_on_batch out = model.train_on_batch(None, None) out = model.test_on_batch(None, None) out = model.predict_on_batch(None) # test fit with pytest.raises(ValueError): out = model.fit(None, None, epochs=1, batch_size=10) out = model.fit(None, None, epochs=1, steps_per_epoch=1) # define a generator to produce x=None and y=None def data_tensors_generator(): while True: yield (None, None) generator = data_tensors_generator() # test fit_generator for framework-native data tensors out = model.fit_generator(generator, epochs=1, steps_per_epoch=3) # test evaluate_generator for framework-native data tensors out = model.evaluate_generator(generator, steps=3) # test fit with validation data with pytest.raises(ValueError): out = model.fit(None, None, epochs=1, steps_per_epoch=None, validation_steps=2) out = model.fit(None, None, epochs=1, steps_per_epoch=2, validation_steps=2) # test evaluate with pytest.raises(ValueError): out = model.evaluate(None, None, batch_size=10) out = model.evaluate(None, None, steps=3) # test predict with pytest.raises(ValueError): out = model.predict(None, batch_size=10) out = model.predict(None, steps=3) assert out.shape == (10 * 3, 4) # Test multi-output model without external data. a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32)) a_1 = Dense(4, name='dense_1')(a) a_2 = Dropout(0.5, name='dropout')(a_1) model = Model(a, [a_1, a_2]) model.add_loss(K.mean(a_2)) model.compile(optimizer='rmsprop', loss=None, metrics=['mean_squared_error']) # test train_on_batch out = model.train_on_batch(None, None) out = model.test_on_batch(None, None) out = model.predict_on_batch(None) # test fit with pytest.raises(ValueError): out = model.fit(None, None, epochs=1, batch_size=10) out = model.fit(None, None, epochs=1, steps_per_epoch=1) # test fit with validation data with pytest.raises(ValueError): out = model.fit(None, None, epochs=1, steps_per_epoch=None, validation_steps=2) out = model.fit(None, None, epochs=1, steps_per_epoch=2, validation_steps=2) # test evaluate with pytest.raises(ValueError): out = model.evaluate(None, None, batch_size=10) out = model.evaluate(None, None, steps=3) # test predict with pytest.raises(ValueError): out = model.predict(None, batch_size=10) out = model.predict(None, steps=3) assert len(out) == 2 assert out[0].shape == (10 * 3, 4) assert out[1].shape == (10 * 3, 4)
class ResiCNN: # My JackNet with residual block cnn_filter_num = 64 cnn_kernel_size = 3 def __init__(self, channels=3): self.model = None self.optimizer = None self.channels = channels def bulid(self): # build model image_in = Input((None, None, self.channels)) conv = Conv2D(filters=self.cnn_filter_num, kernel_size=self.cnn_kernel_size, strides=(1, 1), padding='same', data_format='channels_last')(image_in) conv = Activation('relu')(conv) x = conv for layers in range(8): x = self._build_residual_block(x) conv_out = Conv2D(filters=self.channels, kernel_size=self.cnn_kernel_size, strides=(1, 1), padding='same', data_format='channels_last')(x) output = Add()([image_in, conv_out]) self.model = Model(image_in, output, name='model') def _build_residual_block(self, x): # build residual block x_in = x x = Conv2D(filters=self.cnn_filter_num, kernel_size=self.cnn_kernel_size, strides=(1, 1), padding='same', data_format='channels_last')(x) x = BatchNormalization(axis=-1)(x) x = Activation('relu')(x) x = Conv2D(filters=self.cnn_filter_num, kernel_size=self.cnn_kernel_size, strides=(1, 1), padding='same', data_format='channels_last')(x) x = BatchNormalization(axis=-1)(x) x = Add()([x_in, x]) x = Activation("relu")(x) return x def predict(self, x): # denoise on input x if x.ndim == 3: x = x.reshape(1, x.shape[0], x.shape[1], self.channels) return self.model.predict_on_batch(x) def load(self, config_path, model_path): # load model print('restore model...') if os.path.exists(config_path) and os.path.exists(model_path): with open(config_path, 'r') as fp: self.model = Model.from_config(json.load(fp)) self.model.load_weights(model_path) return True return False def save(self, config_path, model_path): # save model with open(config_path, 'w') as fp: json.dump(self.model.get_config(), fp) self.model.save_weights(model_path) def compile(self): # choose adam optimizer and set learning rate self.optimizer = Adam(lr=1e-2) self.model.compile(optimizer=self.optimizer, loss=self.loss) def train_generator(self, data, epochs=1, steps_per_epochs=None, callbacks=None): self.model.fit_generator(iter(data), epochs=epochs, steps_per_epoch=steps_per_epochs, callbacks=callbacks) def train(self, data, epochs=1, callbacks=None): self.model.fit(x=data[0], y=data[1], epochs=epochs, batch_size=8, callbacks=callbacks) @staticmethod def loss(y_true, y_pred): # loss function, mean square error return 0.5 * K.sum(K.square(y_pred - y_true), axis=-1)
W_regularizer=l2(.0005), activation='softmax')(x) model = Model(base_model.input, predictions) # model = load_model(filepath='./model4.29-0.69.hdf5') opt = SGD(lr=.01, momentum=.9) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) model_json = model.to_json() with open("{}/{}.def".format(job_path, model_name), "w") as json_file: json_file.write(model_json) print(train_generator.class_indices) f = open("{}/{}.cls".format(job_path, model_name), 'w') f.write(json.dumps(train_generator.class_indices)) f.close() # fine-tune the model model.fit_generator( train_generator, samples_per_epoch=nb_train_samples, epochs=epochs, validation_data=validation_generator, nb_val_samples=nb_validation_samples, callbacks=[lr_scheduler, csv_logger, checkpointer, tensorboard]) model.save_weights("{}/final_{}_{}.h5".format(job_path, job_name, model_name))
def test_model_methods(): a = Input(shape=(3, ), name='input_a') b = Input(shape=(3, ), name='input_b') a_2 = Dense(4, name='dense_1')(a) dp = Dropout(0.5, name='dropout') b_2 = dp(b) model = Model([a, b], [a_2, b_2]) optimizer = 'rmsprop' loss = 'mse' loss_weights = [1., 0.5] model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None) input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) # test train_on_batch out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) out = model.train_on_batch({ 'input_a': input_a_np, 'input_b': input_b_np }, [output_a_np, output_b_np]) out = model.train_on_batch({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense_1': output_a_np, 'dropout': output_b_np }) # test fit out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], nb_epoch=1, batch_size=4) out = model.fit({ 'input_a': input_a_np, 'input_b': input_b_np }, [output_a_np, output_b_np], nb_epoch=1, batch_size=4) out = model.fit({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense_1': output_a_np, 'dropout': output_b_np }, nb_epoch=1, batch_size=4) # test validation_split out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], nb_epoch=1, batch_size=4, validation_split=0.5) out = model.fit({ 'input_a': input_a_np, 'input_b': input_b_np }, [output_a_np, output_b_np], nb_epoch=1, batch_size=4, validation_split=0.5) out = model.fit({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense_1': output_a_np, 'dropout': output_b_np }, nb_epoch=1, batch_size=4, validation_split=0.5) # test validation data out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], nb_epoch=1, batch_size=4, validation_data=([input_a_np, input_b_np], [output_a_np, output_b_np])) out = model.fit({ 'input_a': input_a_np, 'input_b': input_b_np }, [output_a_np, output_b_np], nb_epoch=1, batch_size=4, validation_split=0.5, validation_data=({ 'input_a': input_a_np, 'input_b': input_b_np }, [output_a_np, output_b_np])) out = model.fit({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense_1': output_a_np, 'dropout': output_b_np }, nb_epoch=1, batch_size=4, validation_split=0.5, validation_data=({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense_1': output_a_np, 'dropout': output_b_np })) # test_on_batch out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) out = model.test_on_batch({ 'input_a': input_a_np, 'input_b': input_b_np }, [output_a_np, output_b_np]) out = model.test_on_batch({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense_1': output_a_np, 'dropout': output_b_np }) # predict_on_batch out = model.predict_on_batch([input_a_np, input_b_np]) out = model.predict_on_batch({ 'input_a': input_a_np, 'input_b': input_b_np }) # predict, evaluate input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4) out = model.predict([input_a_np, input_b_np], batch_size=4) # with sample_weight input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) sample_weight = [None, np.random.random((10, ))] out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=sample_weight) out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=sample_weight) # test accuracy metric model.compile(optimizer, loss, metrics=['acc'], sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 5 out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 5 # this should also work model.compile(optimizer, loss, metrics={'dense_1': 'acc'}, sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 # and this as well model.compile(optimizer, loss, metrics={'dense_1': ['acc']}, sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 # test starting from non-zero initial epoch trained_epochs = [] def on_epoch_begin(epoch, logs): trained_epochs.append(epoch) tracker_cb = LambdaCallback(on_epoch_begin=on_epoch_begin) out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], nb_epoch=5, batch_size=4, initial_epoch=2, callbacks=[tracker_cb]) assert trained_epochs == [2, 3, 4] # test starting from non-zero initial epoch for generator too trained_epochs = [] def gen_data(batch_sz): while True: yield ([ np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3)) ], [ np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3)) ]) out = model.fit_generator(gen_data(4), samples_per_epoch=10, nb_epoch=5, initial_epoch=2, callbacks=[tracker_cb]) assert trained_epochs == [2, 3, 4] # test with a custom metric function mse = lambda y_true, y_pred: K.mean(K.pow(y_true - y_pred, 2)) def mse_powers(y_true, y_pred): m = mse(y_true, y_pred) return {'mse_squared': K.pow(m, 2), 'mse_cubed': K.pow(m, 3)} model.compile(optimizer, loss, metrics=[mse, mse_powers], sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) out_len = 1 + 2 * 4 # total loss, per layer: loss + 3 metrics assert len(out) == out_len out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == out_len input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4, nb_epoch=1) out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4) out = model.predict([input_a_np, input_b_np], batch_size=4)
def test_model_with_external_loss(): # None loss, only regularization loss. a = Input(shape=(3, ), name='input_a') a_2 = Dense(4, name='dense_1', kernel_regularizer='l1', bias_regularizer='l2')(a) dp = Dropout(0.5, name='dropout') a_3 = dp(a_2) model = Model(a, [a_2, a_3]) optimizer = 'rmsprop' loss = None model.compile(optimizer, loss, metrics=['mae']) input_a_np = np.random.random((10, 3)) # test train_on_batch out = model.train_on_batch(input_a_np, None) out = model.test_on_batch(input_a_np, None) # fit out = model.fit(input_a_np, None) # evaluate out = model.evaluate(input_a_np, None) # No dropout, external loss. a = Input(shape=(3, ), name='input_a') a_2 = Dense(4, name='dense_1')(a) a_3 = Dense(4, name='dense_2')(a) model = Model(a, [a_2, a_3]) model.add_loss(K.mean(a_3 + a_2)) optimizer = 'rmsprop' loss = None model.compile(optimizer, loss, metrics=['mae']) # test train_on_batch out = model.train_on_batch(input_a_np, None) out = model.test_on_batch(input_a_np, None) # fit out = model.fit(input_a_np, None) # evaluate out = model.evaluate(input_a_np, None) # Test fit with no external data at all. if K.backend() == 'tensorflow': import tensorflow as tf a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32)) a_2 = Dense(4, name='dense_1')(a) a_2 = Dropout(0.5, name='dropout')(a_2) model = Model(a, a_2) model.add_loss(K.mean(a_2)) model.compile(optimizer='rmsprop', loss=None, metrics=['mean_squared_error']) # test train_on_batch out = model.train_on_batch(None, None) out = model.test_on_batch(None, None) out = model.predict_on_batch(None) # test fit with pytest.raises(ValueError): out = model.fit(None, None, epochs=1, batch_size=10) out = model.fit(None, None, epochs=1, steps_per_epoch=1) # define a generator to produce x=None and y=None def data_tensors_generator(): while True: yield (None, None) generator = data_tensors_generator() # test fit_generator for framework-native data tensors out = model.fit_generator(generator, epochs=1, steps_per_epoch=3) # test evaluate_generator for framework-native data tensors out = model.evaluate_generator(generator, steps=3) # test fit with validation data with pytest.raises(ValueError): out = model.fit(None, None, epochs=1, steps_per_epoch=None, validation_steps=2) out = model.fit(None, None, epochs=1, steps_per_epoch=2, validation_steps=2) # test evaluate with pytest.raises(ValueError): out = model.evaluate(None, None, batch_size=10) out = model.evaluate(None, None, steps=3) # test predict with pytest.raises(ValueError): out = model.predict(None, batch_size=10) out = model.predict(None, steps=3) assert out.shape == (10 * 3, 4) # Test multi-output model without external data. a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32)) a_1 = Dense(4, name='dense_1')(a) a_2 = Dropout(0.5, name='dropout')(a_1) model = Model(a, [a_1, a_2]) model.add_loss(K.mean(a_2)) model.compile(optimizer='rmsprop', loss=None, metrics=['mean_squared_error']) # test train_on_batch out = model.train_on_batch(None, None) out = model.test_on_batch(None, None) out = model.predict_on_batch(None) # test fit with pytest.raises(ValueError): out = model.fit(None, None, epochs=1, batch_size=10) out = model.fit(None, None, epochs=1, steps_per_epoch=1) # test fit with validation data with pytest.raises(ValueError): out = model.fit(None, None, epochs=1, steps_per_epoch=None, validation_steps=2) out = model.fit(None, None, epochs=1, steps_per_epoch=2, validation_steps=2) # test evaluate with pytest.raises(ValueError): out = model.evaluate(None, None, batch_size=10) out = model.evaluate(None, None, steps=3) # test predict with pytest.raises(ValueError): out = model.predict(None, batch_size=10) out = model.predict(None, steps=3) assert len(out) == 2 assert out[0].shape == (10 * 3, 4) assert out[1].shape == (10 * 3, 4)
class AE(Model): """ Autoencoder. This is a simple autoencoder consisting of an encoder and a decoder. You can use the class like this: >>> encoder = ... >>> decoder = ... >>> ae = Autoencoder(encoder=encoder, decoder=decoder) >>> ae.compile(...) >>> ae.fit(...) """ def __init__(self, encoder=None, decoder=None, autoencoder=None): super(AE, self).__init__() # For calling this as a super-constructor. parameters = [encoder, decoder] if all(v is None for v in parameters): return # From loading. if encoder != None and decoder != None and autoencoder != None: self.encoder = encoder self.decoder = decoder self.autoencoder = autoencoder return # Check preconditions. assert len(encoder.outputs) == 1 assert len(decoder.inputs) == 1 assert encoder.outputs[0].shape[1:] == decoder.inputs[0].shape[ 1:], str(encoder.outputs[0].shape) + " " + str( decoder.inputs[0].shape) self.latent_dim = encoder.outputs[0].shape[1] self.encoder = encoder self.decoder = decoder # Creating the AE. inputs = self.encoder.inputs[0] outputs = self.decoder(self.encoder(inputs)) self.autoencoder = Model(inputs, outputs, name='ae') def compile(self, optimizer, loss=None, metrics=None, loss_weights=None, sample_weight_mode=None, weighted_metrics=None, target_tensors=None, **kwargs): """ Compiles the model. This is the same as compilation in Keras. """ assert "reconstruction_loss" not in kwargs, "Not expected to use reconstruction_loss in AE." self.autoencoder.compile(optimizer, loss, metrics, loss_weights, sample_weight_mode, weighted_metrics, **kwargs) def fit(self, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0., validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, **kwargs): """ Trains the autoencoder. """ return self.autoencoder.fit(x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs) def fit_generator(self, generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0): """ Trains the autoencoder with a generator. """ return self.autoencoder.fit_generator( generator, steps_per_epoch, epochs, verbose=verbose, callbacks=callbacks, validation_data=validation_data, validation_steps=validation_steps, class_weight=class_weight, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, shuffle=shuffle, initial_epoch=initial_epoch) def evaluate(self, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None): """ Evaluates the autoencoder. """ return self.autoencoder.evaluate(x, y, batch_size, verbose, sample_weight, steps=None) def predict(self, x, batch_size=None, verbose=0, steps=None): """ Does a prediction. This is the same as :func:`~ngdlm.models.AE.predict_reconstruct_from_samples` """ return self.predict_reconstruct_from_samples(x, batch_size, verbose, steps) def predict_reconstruct_from_samples(self, x, batch_size=None, verbose=0, steps=None): """ Reconstructs samples. Samples are firstly mapped to latent space using the encoder. The resulting latent vectors are then mapped to reconstruction space via the decoder. """ return self.autoencoder.predict(x, batch_size, verbose, steps) def predict_embed_samples_into_latent(self, x, batch_size=None, verbose=0, steps=None): """ Embeds samples into latent space using the encoder. """ return self.encoder.predict(x, batch_size, verbose, steps) def predict_reconstruct_from_latent(self, x, batch_size=None, verbose=0, steps=None): """ Maps latent vectors to reconstruction space using the decoder. """ return self.decoder.predict(x, batch_size, verbose, steps) def summary(self): """ Provides a summary. """ print("Encoder:") self.encoder.summary() print("Decoder:") self.decoder.summary() print("Autoencoder:") self.autoencoder.summary() def save(self, path): """ Saves the autoencoder. This includes the whole autoencoder plus the encoder and the decoder. The encoder and decoder use the path plus a respective annotation. This code >>> ae.save("myae.h5") will create the files *myae.h5*, *myae-encoder.h5*, and *myae-decoder.h5*. """ self.autoencoder.save(path) self.encoder.save(append_to_filepath(path, "-encoder")) self.decoder.save(append_to_filepath(path, "-decoder"))
def test_model_methods(): a = Input(shape=(3,), name='input_a') b = Input(shape=(3,), name='input_b') a_2 = Dense(4, name='dense_1')(a) dp = Dropout(0.5, name='dropout') b_2 = dp(b) model = Model([a, b], [a_2, b_2]) optimizer = 'rmsprop' loss = 'mse' loss_weights = [1., 0.5] model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None) input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) # test train_on_batch out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np]) out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np}) # test fit out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], nb_epoch=1, batch_size=4) out = model.fit({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np], nb_epoch=1, batch_size=4) out = model.fit({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np}, nb_epoch=1, batch_size=4) # test validation_split out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], nb_epoch=1, batch_size=4, validation_split=0.5) out = model.fit({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np], nb_epoch=1, batch_size=4, validation_split=0.5) out = model.fit({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np}, nb_epoch=1, batch_size=4, validation_split=0.5) # test validation data out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], nb_epoch=1, batch_size=4, validation_data=([input_a_np, input_b_np], [output_a_np, output_b_np])) out = model.fit({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np], nb_epoch=1, batch_size=4, validation_split=0.5, validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np])) out = model.fit({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np}, nb_epoch=1, batch_size=4, validation_split=0.5, validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np})) # test_on_batch out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np]) out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np}) # predict_on_batch out = model.predict_on_batch([input_a_np, input_b_np]) out = model.predict_on_batch({'input_a': input_a_np, 'input_b': input_b_np}) # predict, evaluate input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4) out = model.predict([input_a_np, input_b_np], batch_size=4) # with sample_weight input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) sample_weight = [None, np.random.random((10,))] out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=sample_weight) out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=sample_weight) # test accuracy metric model.compile(optimizer, loss, metrics=['acc'], sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 5 out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 5 # this should also work model.compile(optimizer, loss, metrics={'dense_1': 'acc'}, sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 # and this as well model.compile(optimizer, loss, metrics={'dense_1': ['acc']}, sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 # test starting from non-zero initial epoch trained_epochs = [] def on_epoch_begin(epoch, logs): trained_epochs.append(epoch) tracker_cb = LambdaCallback(on_epoch_begin=on_epoch_begin) out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], nb_epoch=5, batch_size=4, initial_epoch=2, callbacks=[tracker_cb]) assert trained_epochs == [2, 3, 4] # test starting from non-zero initial epoch for generator too trained_epochs = [] def gen_data(batch_sz): while True: yield ([np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3))], [np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3))]) out = model.fit_generator(gen_data(4), samples_per_epoch=10, nb_epoch=5, initial_epoch=2, callbacks=[tracker_cb]) assert trained_epochs == [2, 3, 4] # test with a custom metric function mse = lambda y_true, y_pred: K.mean(K.pow(y_true - y_pred, 2)) def mse_powers(y_true, y_pred): m = mse(y_true, y_pred) return { 'mse_squared': K.pow(m, 2), 'mse_cubed': K.pow(m, 3) } model.compile(optimizer, loss, metrics=[mse, mse_powers], sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) out_len = 1 + 2 * 4 # total loss, per layer: loss + 3 metrics assert len(out) == out_len out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == out_len input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4, nb_epoch=1) out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4) out = model.predict([input_a_np, input_b_np], batch_size=4)
def test_model_methods(): a = Input(shape=(3, ), name='input_a') b = Input(shape=(3, ), name='input_b') a_2 = Dense(4, name='dense_1')(a) dp = Dropout(0.5, name='dropout') b_2 = dp(b) model = Model([a, b], [a_2, b_2]) optimizer = 'rmsprop' loss = 'mse' loss_weights = [1., 0.5] input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) # training/testing doesn't work before compiling. with pytest.raises(RuntimeError): model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None) # test train_on_batch out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) out = model.train_on_batch({ 'input_a': input_a_np, 'input_b': input_b_np }, [output_a_np, output_b_np]) out = model.train_on_batch({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense_1': output_a_np, 'dropout': output_b_np }) # test fit out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], epochs=1, batch_size=4) out = model.fit({ 'input_a': input_a_np, 'input_b': input_b_np }, [output_a_np, output_b_np], epochs=1, batch_size=4) out = model.fit({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense_1': output_a_np, 'dropout': output_b_np }, epochs=1, batch_size=4) # test validation_split out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], epochs=1, batch_size=4, validation_split=0.5) out = model.fit({ 'input_a': input_a_np, 'input_b': input_b_np }, [output_a_np, output_b_np], epochs=1, batch_size=4, validation_split=0.5) out = model.fit({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense_1': output_a_np, 'dropout': output_b_np }, epochs=1, batch_size=4, validation_split=0.5) # test validation data out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], epochs=1, batch_size=4, validation_data=([input_a_np, input_b_np], [output_a_np, output_b_np])) out = model.fit({ 'input_a': input_a_np, 'input_b': input_b_np }, [output_a_np, output_b_np], epochs=1, batch_size=4, validation_split=0.5, validation_data=({ 'input_a': input_a_np, 'input_b': input_b_np }, [output_a_np, output_b_np])) out = model.fit({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense_1': output_a_np, 'dropout': output_b_np }, epochs=1, batch_size=4, validation_split=0.5, validation_data=({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense_1': output_a_np, 'dropout': output_b_np })) # test_on_batch out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) out = model.test_on_batch({ 'input_a': input_a_np, 'input_b': input_b_np }, [output_a_np, output_b_np]) out = model.test_on_batch({ 'input_a': input_a_np, 'input_b': input_b_np }, { 'dense_1': output_a_np, 'dropout': output_b_np }) # predict_on_batch out = model.predict_on_batch([input_a_np, input_b_np]) out = model.predict_on_batch({ 'input_a': input_a_np, 'input_b': input_b_np }) # predict, evaluate input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4) out = model.predict([input_a_np, input_b_np], batch_size=4) # with sample_weight input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) sample_weight = [None, np.random.random((10, ))] out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=sample_weight) out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=sample_weight) # test accuracy metric model.compile(optimizer, loss, metrics=['acc'], sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 5 out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 5 # this should also work model.compile(optimizer, loss, metrics={'dense_1': 'acc'}, sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 # and this as well model.compile(optimizer, loss, metrics={'dense_1': ['acc']}, sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == 4 # test starting from non-zero initial epoch trained_epochs = [] # define tracer callback def on_epoch_begin(epoch, logs): trained_epochs.append(epoch) tracker_cb = LambdaCallback(on_epoch_begin=on_epoch_begin) out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], epochs=5, batch_size=4, initial_epoch=2, callbacks=[tracker_cb]) assert trained_epochs == [2, 3, 4] # test starting from non-zero initial epoch for generator too trained_epochs = [] def gen_data(batch_sz): while True: yield ([ np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3)) ], [ np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3)) ]) out = model.fit_generator(gen_data(4), steps_per_epoch=3, epochs=5, initial_epoch=2, callbacks=[tracker_cb]) assert trained_epochs == [2, 3, 4] # test with a custom metric function def mse(y_true, y_pred): return K.mean(K.pow(y_true - y_pred, 2)) model.compile(optimizer, loss, metrics=[mse], sample_weight_mode=None) out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) out_len = 1 + 2 * (1 + 1) # total loss + 2 outputs * (loss + metric) assert len(out) == out_len out = model.test_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np]) assert len(out) == out_len input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) output_a_np = np.random.random((10, 4)) output_b_np = np.random.random((10, 3)) out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4, epochs=1) out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4) out = model.predict([input_a_np, input_b_np], batch_size=4) # x is not a list of numpy arrays. with pytest.raises(ValueError): out = model.predict([None]) # x does not match _feed_input_names. with pytest.raises(ValueError): out = model.predict([input_a_np, None, input_b_np]) with pytest.raises(ValueError): out = model.predict([None, input_a_np, input_b_np]) # all input/output/weight arrays should have the same number of samples. with pytest.raises(ValueError): out = model.train_on_batch([input_a_np, input_b_np[:2]], [output_a_np, output_b_np], sample_weight=sample_weight) with pytest.raises(ValueError): out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np[:2]], sample_weight=sample_weight) with pytest.raises(ValueError): out = model.train_on_batch( [input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=[sample_weight[1], sample_weight[1][:2]]) # `sample_weight` is neither a dict nor a list. with pytest.raises(TypeError): out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=tuple(sample_weight)) # `validation_data` is neither a tuple nor a triple. with pytest.raises(ValueError): out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], epochs=1, batch_size=4, validation_data=([input_a_np, input_b_np], )) # `loss` does not match outputs. with pytest.raises(ValueError): model.compile(optimizer, loss=['mse', 'mae', 'mape']) # `loss_weights` does not match output_names. with pytest.raises(ValueError): model.compile(optimizer, loss='mse', loss_weights={'lstm': 0.5}) # `loss_weights` does not match outputs. with pytest.raises(ValueError): model.compile(optimizer, loss='mse', loss_weights=[0.5]) # `loss_weights` is invalid type. with pytest.raises(TypeError): model.compile(optimizer, loss='mse', loss_weights=(0.5, 0.5)) # `sample_weight_mode` does not match output_names. with pytest.raises(ValueError): model.compile(optimizer, loss='mse', sample_weight_mode={'lstm': 'temporal'}) # `sample_weight_mode` does not match output_names. with pytest.raises(ValueError): model.compile(optimizer, loss='mse', sample_weight_mode=['temporal']) # `sample_weight_mode` matches output_names partially. with pytest.raises(ValueError): model.compile(optimizer, loss='mse', sample_weight_mode={'dense_1': 'temporal'}) # `loss` does not exist. with pytest.raises(RuntimeError): model.compile(optimizer, loss=[]) model.compile(optimizer, loss=['mse', 'mae']) model.compile(optimizer, loss='mse', loss_weights={ 'dense_1': 0.2, 'dropout': 0.8 }) model.compile(optimizer, loss='mse', loss_weights=[0.2, 0.8]) # the rank of weight arrays should be 1. with pytest.raises(ValueError): out = model.train_on_batch( [input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=[None, np.random.random((10, 20, 30))]) model.compile(optimizer, loss='mse', sample_weight_mode={ 'dense_1': None, 'dropout': 'temporal' }) model.compile(optimizer, loss='mse', sample_weight_mode=[None, 'temporal']) # the rank of output arrays should be at least 3D. with pytest.raises(ValueError): out = model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np], sample_weight=sample_weight)