예제 #1
0
def mk_model():
    x = Dense(64, activation='relu')(inputs)
    x = Dense(64, activation='relu',
              kernel_regularizer=regularizers.l2(0.01))(x)
    predictions = Dense(10, activation='softmax')(x)
    model = Model(inputs=inputs, outputs=predictions)
    with open("debugmodel.json", 'w') as f:
        f.write(model.to_json())
    return model
예제 #2
0
                                          verbose=1,
                                          callbacks=[history,
                                                     npt_monitor])
                                                     # validation_output_callback])

y_pred = model.predict_generator(validation_generator,
                                 steps=(hprm['TEST_SIZE'] // hprm['BATCH_SIZE'])+1,
                                 callbacks=[],
                                 verbose=1)
# ---------------------------------------------------------------------------------------------------------------------

# --------------------------------------
# EXPORT MODEL ARCHITECTURE AND WEIGHTS |
# --------------------------------------
# export model structure to json file:
model_struct_json = model.to_json()
filename = filepattern('model_allfreeze_', '.json')
with open(filename, 'w') as f:
    f.write(model_struct_json)

# export weights to an hdf5 file:
w_filename = filepattern('weights_allfreeze_', '.h5')
model.save_weights(w_filename)
# ---------------------------------------------------------------------------------------------------------------------

# -------------------------------------------------------------
# VISUALIZE BASE ARCHITECTURE TO DECIDE WHICH LAYERS TO FREEZE |
# -------------------------------------------------------------
# PUT BREAKPOINT HERE!!!!!!!!!!!!!!!
print(list(show_architecture(base)))
# INSERT DEBUGGER BREAKPOINT DIRECTLY ON THE NEXT COMMAND TO VIEW THE ARCHITECTURE AT RUNTIME
                                            verbose=1,
                                            factor=0.2,
                                            min_lr=0.0001)

tensor_board = TensorBoard(log_dir='./graph')

callbacks = [early_stop, checkpoint, learning_rate_reduction, tensor_board]

model.compile(loss='categorical_crossentropy',
              optimizer=Adam(lr=0.001),
              metrics=['accuracy'])

nb_train_samples = 28789
nb_validation_samples = 3589

epochs = 25

history = model.fit_generator(train_generator,
                              steps_per_epoch=nb_train_samples // batch_size,
                              epochs=epochs,
                              callbacks=callbacks,
                              validation_data=validation_generator,
                              validation_steps=nb_validation_samples //
                              batch_size)

model_json = model.to_json()

with open("emotion_classification_mobilenet_7_emotions.json",
          "w") as json_file:
    json_file.write(model_json)
except Exception as error:
    print("Error trying to load checkpoint.")
    print(error)

x_data={'encoder_input':encoder_input_data,
        'decoder_input':decoder_input_data}
y_data={'decoder_output':decoder_output_data}
model_train.fit(x=x_data,y=y_data,batch_size=512,validation_split=0.005,callbacks=callbacks)
modelname1='MachineTranslationTrain'
modelname2='MachineTranslationEncoder'
modelname3='MachineTranslationDecoder'
model_train.save('{}.keras'.format(modelname1))
model_encoder.save('{}.keras'.format(modelname2))
model_decoder.save('{}.keras'.format(modelname3))
with open('model_encoder.json', 'w', encoding='utf8') as f:
    f.write(model_encoder.to_json())
model_encoder.save_weights('model_encoder_weights.h5')
with open('model_decoder.json', 'w', encoding='utf8') as f:
    f.write(model_decoder.to_json())
model_decoder.save_weights('model_decoder_weights.h5')
with open('model_train.json', 'w', encoding='utf8') as f:
    f.write(model_train.to_json())
model_train.save_weights('model_train_weights.h5')
#Translate Texts

def translate(input_text,true_output_text=None):
    input_tokens=tokenizer_src.text_to_tokens(text=input_text,reverse=True,padding=True)
    initial_state=model_encoder.predict(input_tokens)
    max_tokens=tokenizer_dest.max_tokens
    shape=(1,max_tokens)
    decoder_input_data=np.zeros(shape=shape,dtype=np.int)
예제 #5
0
path_checkpoint = '22_checkpoint.keras'
callback_checkpoint = ModelCheckpoint(filepath=path_checkpoint,
                                      verbose=1,
                                      save_weights_only=True)

callback_tensorboard = TensorBoard(log_dir='./22_logs/',
                                   histogram_freq=0,
                                   write_graph=False)
callbacks = [callback_checkpoint, callback_tensorboard]

try:
    decoder_model.load_weights(path_checkpoint)
except Exception as error:
    print("Error trying to load checkpoint.")
    print(error)

# decoder_model.fit_generator(generator=generator,
#                             steps_per_epoch=steps_per_epoch,
#                             epochs=10,
#                             callbacks=callbacks)

with open('model_def_caption.json', 'w') as ff:
    json_string = decoder_model.to_json()
    ff.write(json_string)

# decoder_model.save_weights('caption_model_25.h5')

generate_caption("image.jpg")
# generate_caption_coco(idx=10, train=True)
예제 #6
0
def main():
    # Counting Dataset
    counting_dataset_path = 'counting_data_UCF'
    counting_dataset = list()
    train_labels = {}
    val_labels = {}
    for im_path in glob.glob(os.path.join(counting_dataset_path, '*.jpg')):
        counting_dataset.append(im_path)
        img = image.load_img(im_path)
        gt_file = im_path.replace('.jpg', '_ann.mat')
        h, w = img.size
        dmap, crowd_number = load_gt_from_mat(gt_file, (w, h))
        train_labels[im_path] = dmap
        val_labels[im_path] = crowd_number
    counting_dataset_pyramid, train_labels_pyramid = multiscale_pyramid(
        counting_dataset, train_labels)

    # Ranking Dataset
    ranking_dataset_path = 'ranking_data'
    ranking_dataset = list()
    for im_path in glob.glob(os.path.join(ranking_dataset_path, '*.jpg')):
        ranking_dataset.append(im_path)

    # randomize the order of images before splitting
    np.random.shuffle(counting_dataset)

    split_size = int(round(len(counting_dataset) / 5))
    splits_list = list()
    for t in range(5):
        splits_list.append(counting_dataset[t * split_size:t * split_size +
                                            split_size])

    split_val_labels = {}

    mae_sum = 0.0
    mse_sum = 0.0

    # create folder to save results
    date = str(datetime.datetime.now())
    d = date.split()
    d1 = d[0]
    d2 = d[1].split(':')
    results_folder = 'Results-' + d1 + '-' + d2[0] + '.' + d2[1]
    if not os.path.exists(results_folder):
        os.makedirs(results_folder)

    # 5-fold cross validation
    epochs = int(round(iterations / iterations_per_epoch))
    n_fold = 5

    for f in range(0, n_fold):
        print('\nFold ' + str(f))

        # Model
        model = VGG16(include_top=False, weights='imagenet')
        transfer_layer = model.get_layer('block5_conv3')
        conv_model = Model(inputs=[model.input],
                           outputs=[transfer_layer.output],
                           name='vgg_partial')

        counting_input = Input(shape=(224, 224, 3),
                               dtype='float32',
                               name='counting_input')
        ranking_input = Input(shape=(224, 224, 3),
                              dtype='float32',
                              name='ranking_input')
        x = conv_model([counting_input, ranking_input])
        counting_output = Conv2D(1, (3, 3),
                                 strides=(1, 1),
                                 padding='same',
                                 data_format=None,
                                 dilation_rate=(1, 1),
                                 activation='relu',
                                 use_bias=True,
                                 kernel_initializer='glorot_uniform',
                                 bias_initializer='zeros',
                                 kernel_regularizer=None,
                                 bias_regularizer=None,
                                 activity_regularizer=None,
                                 kernel_constraint=None,
                                 bias_constraint=None,
                                 name='counting_output')(x)

        # The ranking output is computed using SUM pool. Here I use
        # GlobalAveragePooling2D followed by a multiplication by 14^2 to do
        # this.
        ranking_output = Lambda(
            lambda i: 14.0 * 14.0 * i,
            name='ranking_output')(GlobalAveragePooling2D(
                name='global_average_pooling2d')(counting_output))
        train_model = Model(inputs=[counting_input, ranking_input],
                            outputs=[counting_output, ranking_output])
        train_model.summary()

        # l2 weight decay
        for layer in train_model.layers:
            if hasattr(layer, 'kernel_regularizer'):
                layer.kernel_regularizer = regularizers.l2(5e-4)
            elif layer.name == 'vgg_partial':
                for l in layer.layers:
                    if hasattr(l, 'kernel_regularizer'):
                        l.kernel_regularizer = regularizers.l2(5e-4)

        optimizer = SGD(lr=0.0, decay=0.0, momentum=0.9, nesterov=False)
        loss = {
            'counting_output': euclideanDistanceCountingLoss,
            'ranking_output': pairwiseRankingHingeLoss
        }
        loss_weights = [1.0, 0.0]
        train_model.compile(optimizer=optimizer,
                            loss=loss,
                            loss_weights=loss_weights)

        splits_list_tmp = splits_list.copy()

        # counting validation split
        split_val = splits_list_tmp[f]

        del splits_list_tmp[f]
        flat = itertools.chain.from_iterable(splits_list_tmp)

        # counting train split
        split_train = list(flat)

        # counting validation split labels
        split_val_labels = {k: val_labels[k] for k in split_val}

        counting_dataset_pyramid_split = []
        train_labels_pyramid_split = []
        for key in split_train:
            counting_dataset_pyramid_split.append(
                counting_dataset_pyramid[key][0])
            counting_dataset_pyramid_split.append(
                counting_dataset_pyramid[key][1])
            counting_dataset_pyramid_split.append(
                counting_dataset_pyramid[key][2])
            counting_dataset_pyramid_split.append(
                counting_dataset_pyramid[key][3])
            counting_dataset_pyramid_split.append(
                counting_dataset_pyramid[key][4])

            train_labels_pyramid_split.append(train_labels_pyramid[key][0])
            train_labels_pyramid_split.append(train_labels_pyramid[key][1])
            train_labels_pyramid_split.append(train_labels_pyramid[key][2])
            train_labels_pyramid_split.append(train_labels_pyramid[key][3])
            train_labels_pyramid_split.append(train_labels_pyramid[key][4])

        index_shuf = np.arange(len(counting_dataset_pyramid_split))
        np.random.shuffle(index_shuf)
        counting_dataset_pyramid_split_shuf = []
        train_labels_pyramid_split_shuf = []
        for i in index_shuf:
            counting_dataset_pyramid_split_shuf.append(
                counting_dataset_pyramid_split[i])
            train_labels_pyramid_split_shuf.append(
                train_labels_pyramid_split[i])

        train_generator = DataGenerator(counting_dataset_pyramid_split_shuf,
                                        train_labels_pyramid_split_shuf,
                                        ranking_dataset, **params)
        lrate = LearningRateScheduler(step_decay)
        callbacks_list = [lrate]
        train_model.fit_generator(generator=train_generator,
                                  epochs=epochs,
                                  callbacks=callbacks_list)

        #test images
        tmp_model = train_model.get_layer('vgg_partial')
        test_input = Input(shape=(None, None, 3),
                           dtype='float32',
                           name='test_input')
        new_input = tmp_model(test_input)
        co = train_model.get_layer('counting_output')(new_input)
        test_output = Lambda(lambda i: K.sum(i, axis=(1, 2)),
                             name='test_output')(co)
        test_model = Model(inputs=[test_input], outputs=[test_output])

        predictions = np.empty((len(split_val), 1))
        y_validation = np.empty((len(split_val), 1))
        for i in range(len(split_val)):
            img = image.load_img(split_val[i], target_size=(224, 224))
            img_to_array = image.img_to_array(img)
            img_to_array = preprocess_input(img_to_array)
            img_to_array = np.expand_dims(img_to_array, axis=0)

            pred_test = test_model.predict(img_to_array)
            predictions[i] = pred_test
            y_validation[i] = split_val_labels[split_val[i]]

        mean_abs_err = mae(predictions, y_validation)
        mean_sqr_err = mse(predictions, y_validation)

        # serialize model to JSON
        model_json = test_model.to_json()
        model_json_name = "test_model_" + str(f) + ".json"
        with open(model_json_name, "w") as json_file:
            json_file.write(model_json)
        # serialize weights to HDF5
        model_h5_name = "test_model_" + str(f) + ".h5"
        test_model.save_weights(model_h5_name)
        print("Saved model to disk")

        print('\n######################')
        print('Results on TEST SPLIT:')
        print(' MAE: {}'.format(mean_abs_err))
        print(' MSE: {}'.format(mean_sqr_err))
        print("Took %f seconds" % (time.time() - s))
        path1 = results_folder + '/test_split_results_fold-' + str(f) + '.txt'
        with open(path1, 'w') as f:
            f.write('mae: %f,\nmse: %f, \nTook %f seconds' %
                    (mean_abs_err, mean_sqr_err, time.time() - s))

        mae_sum = mae_sum + mean_abs_err
        mse_sum = mse_sum + mean_sqr_err

    print('\n################################')
    print('Average Results on TEST SPLIT:')
    print(' AVE MAE: {}'.format(mae_sum / n_fold))
    print(' AVE MSE: {}'.format(mse_sum / n_fold))
    print("Took %f seconds" % (time.time() - s))
    path2 = results_folder + '/test_split_results_avg.txt'
    with open(path2, 'w') as f:
        f.write('avg_mae: %f, \navg_mse: %f, \nTook %f seconds' %
                (mae_sum / n_fold, mse_sum / n_fold, time.time() - s))

def calc_steps(data_len, batchsize):
    return (data_len + batchsize - 1) // batchsize


# Calculate the steps per epoch
train_steps = calc_steps(len(train_path), 8)
val_steps = calc_steps(len(val_path), 8)

checkpointer = ModelCheckpoint('cp-{epoch:02d}-{val_loss:.4f}-od-resnet50.h5',
                               verbose=1)
# Train the model
history = model.fit_generator(
    traingen,
    steps_per_epoch=train_steps,
    epochs=20,  # Change this to a larger number to train for longer
    validation_data=valgen,
    validation_steps=val_steps,
    verbose=1,
    max_queue_size=5  # Change this number based on memory restrictions
)

model.save('outlier_detector_resnet50.h5')

model.save_weights('model_weights.h5')

# Save the model architecture
with open('model_architecture.json', 'w') as f:
    f.write(model.to_json())
예제 #8
0
class Seq2SeqAtt(object):
    model_name = 'seq2seq-qa-glove'

    def __init__(self):
        self.model = None
        self.encoder_model = None
        self.decoder_model = None
        self.target_word2idx = None
        self.target_idx2word = None
        self.max_decoder_seq_length = None
        self.max_encoder_seq_length = None
        self.num_decoder_tokens = None
        self.glove_model = GloveModel()

    @staticmethod
    def get_architecture_file_path(model_dir_path):
        return os.path.join(model_dir_path, Seq2SeqAtt.model_name + '-architecture.json')

    @staticmethod
    def get_weight_file_path(model_dir_path):
        return os.path.join(model_dir_path, Seq2SeqAtt.model_name + '-weights.h5')

    def load_glove_model(self, data_dir_path):
        self.glove_model.load_model(data_dir_path)

    def load_model(self, model_dir_path):
        self.target_word2idx = np.load(
            model_dir_path + '/' + Seq2SeqAtt.model_name + '-target-word2idx.npy').item()
        self.target_idx2word = np.load(
            model_dir_path + '/' + Seq2SeqAtt.model_name + '-target-idx2word.npy').item()
        context = np.load(model_dir_path + '/' + Seq2SeqAtt.model_name + '-config.npy').item()
        self.max_encoder_seq_length = context['input_max_seq_length']
        self.max_decoder_seq_length = context['target_max_seq_length']
        self.num_decoder_tokens = context['num_target_tokens']

        self.create_model()
        self.model.load_weights(Seq2SeqAtt.get_weight_file_path(model_dir_path))

    def create_model(self):
        resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu='grpc://' + os.environ['COLAB_TPU_ADDR'])
        tf.contrib.distribute.initialize_tpu_system(resolver)
        strategy = tf.contrib.distribute.TPUStrategy(resolver)

        with strategy.scope():
            hidden_size = 256
            enc_timesteps = self.max_encoder_seq_length
            #timesteps = self.max_encoder_seq_length #perhaps making timesteps size of max sequence length would work?????""
            dec_timesteps = self.max_decoder_seq_length
            print(f"embedding size: {self.glove_model.embedding_size}")
            # encoder_inputs = Input(shape=(None, self.glove_model.embedding_size), name='encoder_inputs')
            # decoder_inputs = Input(shape=(None, self.num_decoder_tokens), name='decoder_inputs')
            encoder_inputs = Input(shape=(enc_timesteps, self.glove_model.embedding_size), name='encoder_inputs')
            decoder_inputs = Input(shape=(dec_timesteps, self.num_decoder_tokens), name='decoder_inputs')
            
            # Encoder GRU
            encoder_gru = Bidirectional(GRU(hidden_size, return_sequences=True, return_state=True, name='encoder_gru'), name='bidirectional_encoder')
            encoder_out, encoder_fwd_state, encoder_back_state = encoder_gru(encoder_inputs)

            # Set up the decoder GRU, using `encoder_states` as initial state.
            decoder_gru = GRU(hidden_size*2, return_sequences=True, return_state=True, name='decoder_gru')
            decoder_out, decoder_state = decoder_gru(
                decoder_inputs, initial_state=Concatenate(axis=-1)([encoder_fwd_state, encoder_back_state])
            )

            # Attention layer
            attn_layer = AttentionLayer(name='attention_layer')
            attn_out, attn_states = attn_layer([encoder_out, decoder_out])

            # Concat attention input and decoder GRU output
            decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([decoder_out, attn_out])

            # Dense layer
            dense = Dense(self.num_decoder_tokens, activation='softmax', name='softmax_layer')
            dense_time = TimeDistributed(dense, name='time_distributed_layer')
            decoder_pred = dense_time(decoder_concat_input)

            # Full model
            self.model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_pred)
            self.model.compile(optimizer=tf.train.RMSPropOptimizer(learning_rate=0.01) loss='categorical_crossentropy')

            self.model.summary()

            """ Inference model """
            batch_size = 1

            """ Encoder (Inference) model """
            encoder_inf_inputs = Input(batch_shape=(batch_size, enc_timesteps, self.glove_model.embedding_size), name='encoder_inf_inputs')
            encoder_inf_out, encoder_inf_fwd_state, encoder_inf_back_state = encoder_gru(encoder_inf_inputs)
            self.encoder_model = Model(inputs=encoder_inf_inputs, outputs=[encoder_inf_out, encoder_inf_fwd_state, encoder_inf_back_state])

            """ Decoder (Inference) model """
            decoder_inf_inputs = Input(batch_shape=(batch_size, 1, self.num_decoder_tokens), name='decoder_word_inputs')
            encoder_inf_states = Input(batch_shape=(batch_size, dec_timesteps, 2*hidden_size), name='encoder_inf_states')
            decoder_init_state = Input(batch_shape=(batch_size, 2*hidden_size), name='decoder_init')

            decoder_inf_out, decoder_inf_state = decoder_gru(
                decoder_inf_inputs, initial_state=decoder_init_state)
            attn_inf_out, attn_inf_states = attn_layer([encoder_inf_states, decoder_inf_out])
            decoder_inf_concat = Concatenate(axis=-1, name='concat')([decoder_inf_out, attn_inf_out])
            decoder_inf_pred = TimeDistributed(dense)(decoder_inf_concat)
            self.decoder_model = Model(inputs=[encoder_inf_states, decoder_init_state, decoder_inf_inputs],
                                outputs=[decoder_inf_pred, attn_inf_states, decoder_inf_state])

    def fit(self, data_set, model_dir_path, epochs=None, batch_size=None, test_size=None, random_state=None,
            save_best_only=False, max_target_vocab_size=None):
        if batch_size is None:
            batch_size = 64
        if epochs is None:
            epochs = 100
        if test_size is None:
            test_size = 0.2
        if random_state is None:
            random_state = 42
        if max_target_vocab_size is None:
            max_target_vocab_size = 5000

        data_set_seq2seq = SQuADSeq2SeqEmbTupleSamples(data_set, self.glove_model.word2em,
                                                       self.glove_model.embedding_size,
                                                       max_target_vocab_size=max_target_vocab_size)
        data_set_seq2seq.save(model_dir_path, 'qa-glove-att')

        x_train, x_test, y_train, y_test = data_set_seq2seq.split(test_size=test_size, random_state=random_state)

        print(len(x_train))
        print(len(x_test))

        self.max_encoder_seq_length = data_set_seq2seq.input_max_seq_length
        self.max_decoder_seq_length = data_set_seq2seq.target_max_seq_length
        self.num_decoder_tokens = data_set_seq2seq.num_target_tokens
        print(f'max_encoder_seq_length: {self.max_encoder_seq_length}')
        print(f'max_decoder_seq_length: {self.max_decoder_seq_length}')
        print(f'num_decoder_tokens: {self.num_decoder_tokens}')

        weight_file_path = self.get_weight_file_path(model_dir_path)
        architecture_file_path = self.get_architecture_file_path(model_dir_path)

        self.create_model()

        with open(architecture_file_path, 'w') as f:
            f.write(self.model.to_json())

        train_gen = generate_batch(data_set_seq2seq, x_train, y_train, batch_size)
        test_gen = generate_batch(data_set_seq2seq, x_test, y_test, batch_size)

        train_num_batches = len(x_train) // batch_size
        test_num_batches = len(x_test) // batch_size

        checkpoint = ModelCheckpoint(filepath=weight_file_path, save_best_only=save_best_only)

#########COLAB##########
        #TPU_WORKER = 'grpc://' + os.environ['COLAB_TPU_ADDR']
        #tensorflow.logging.set_verbosity(tensorflow.logging.INFO)

        #self.model = tensorflow.contrib.tpu.keras_to_tpu_model(
        #    self.model,
        #    strategy=tensorflow.contrib.tpu.TPUDistributionStrategy(
        #        tensorflow.contrib.cluster_resolver.TPUClusterResolver(TPU_WORKER)))
#######################

        history = self.model.fit_generator(generator=train_gen, steps_per_epoch=train_num_batches,
                                           epochs=epochs,
                                           verbose=1, validation_data=test_gen, validation_steps=test_num_batches,
                                           callbacks=[checkpoint])

        self.model.save_weights(weight_file_path)

        np.save(os.path.join(model_dir_path, Seq2SeqAtt.model_name + '-history.npy'), history.history)

        return history

    def reply(self, paragraph, question):
        input_seq = []
        input_emb = []
        input_text = paragraph.lower() + ' question ' + question.lower()
        for word in nltk.word_tokenize(input_text):
            if not in_white_list(word):
                continue
            emb = self.glove_model.encode_word(word)
            input_emb.append(emb)
        input_seq.append(input_emb)
        input_seq = pad_sequences(input_seq, self.max_encoder_seq_length)
        states_value = self.encoder_model.predict(input_seq)
        target_seq = np.zeros((1, 1, self.num_decoder_tokens))
        target_seq[0, 0, self.target_word2idx['START']] = 1
        target_text = ''
        target_text_len = 0
        terminated = False
        while not terminated:
            output_tokens, h, c = self.decoder_model.predict([target_seq] + states_value)

            sample_token_idx = np.argmax(output_tokens[0, -1, :])
            sample_word = self.target_idx2word[sample_token_idx]
            target_text_len += 1

            if sample_word != 'START' and sample_word != 'END':
                target_text += ' ' + sample_word

            if sample_word == 'END' or target_text_len >= self.max_decoder_seq_length:
                terminated = True

            target_seq = np.zeros((1, 1, self.num_decoder_tokens))
            target_seq[0, 0, sample_token_idx] = 1

            states_value = [h, c]
        return target_text.strip()

    def test_run(self, ds, index=None):
        if index is None:
            index = 0
        paragraph, question, actual_answer = ds.get_data(index)
        predicted_answer = self.reply(paragraph, question)
        print({'predict': predicted_answer, 'actual': actual_answer})
class Neural:

    def __init__(self, size_window_left, size_window_right, number_samples, threshold, number_epochs,
                      learning_patterns_per_id, optimizer_function, loss_function, dense_layers,
                      output_evolution_error_figures):

        self.size_windows_left = size_window_left
        self.size_window_right = size_window_right
        self.number_samples = number_samples
        self.threshold = threshold
        self.number_epochs = number_epochs
        self.learning_patterns_per_id = learning_patterns_per_id
        self.optimizer_function = optimizer_function
        self.loss_function = loss_function
        self.output_evolution_error_figures = output_evolution_error_figures
        self.neural_network = None
        self.dense_layers = dense_layers

    def create_neural_network(self):

        input_size = Input(shape=(self.size_windows_left + self.size_window_right + 1,))
        # Please do not change this layer
        self.neural_network = Dense(20, )(input_size)
        self.neural_network = Dropout(0.2)(self.neural_network)

        for i in range(self.dense_layers - 1):

            self.neural_network = Dense(20)(self.neural_network)
            self.neural_network = Dropout(0.5)(self.neural_network)

        # Please do not change this layer
        self.neural_network = Dense(1, activation='sigmoid')(self.neural_network)
        self.neural_network = Model(input_size, self.neural_network)
        self.neural_network.summary()
        self.neural_network.compile(optimizer=self.optimizer_function, loss=self.loss_function,
                                    metrics=['mean_squared_error'])

    def fit(self, x, y, x_validation, y_validation):

        first_test_training = self.neural_network.evaluate(x, y)
        first_test_validation = self.neural_network.evaluate(x_validation, y_validation)
        history = self.neural_network.fit(x, y, epochs=self.number_epochs,
                                          validation_data=(x_validation, y_validation), )
        self.plotter_error_evaluate(history.history['mean_squared_error'], history.history['val_mean_squared_error'],
                                    first_test_training, first_test_validation)

    def plotter_error_evaluate(self, mean_square_error_training, mean_square_error_evaluate, first_error_training,
                               first_error_evaluate):

        mean_square_error_training.insert(0, first_error_training[1])
        mean_square_error_evaluate.insert(0, first_error_evaluate[1])
        matplotlib.pyplot.plot(mean_square_error_training, 'b', marker='^', label="Treinamento")
        matplotlib.pyplot.plot(mean_square_error_evaluate, 'g', marker='o', label="Validação")
        matplotlib.pyplot.legend(loc="upper right")
        matplotlib.pyplot.xlabel('Quantidade de épocas')
        matplotlib.pyplot.ylabel('Erro Médio')
        matplotlib.pyplot.savefig(
            self.output_evolution_error_figures + "fig_Mean_square_error_" + str(datetime.datetime.now()) + ".pdf")

    def predict_values(self, x):

        return self.neural_network.predict(x)

    def save_models(self, model_architecture_file, model_weights_file):

        model_json = self.neural_network.to_json()

        with open(model_architecture_file, "w") as json_file:

            json_file.write(model_json)

        self.neural_network.save_weights(model_weights_file)
        print("Saved model {} {}".format(model_architecture_file, model_weights_file))

    def load_models(self, model_architecture_file, model_weights_file):

        json_file = open(model_architecture_file, 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        self.neural_network = model_from_json(loaded_model_json)
        self.neural_network.load_weights(model_weights_file)
        print("Loaded model {} {}".format(model_architecture_file, model_weights_file))

    @staticmethod
    def get_samples_vectorized(sample):

        sample_vectorized = []

        for i in range(len(sample)):

            sample_vectorized.append(float(sample[i][2]))

        return sample_vectorized, sample[5][2]

    def predict(self, x):

        x_axis = []
        y_axis = []
        results_predicted = []

        for i in range(len(x)):

            x_temp, y_temp = self.get_samples_vectorized(x[i])
            x_axis.append(x_temp)
            y_axis.append(y_temp)

        predicted = self.neural_network.predict(x_axis)

        for i in range(len(predicted)):

            if predicted[i] > self.threshold or y_axis[i] > 0.8:

                results_predicted.append(x[i][5])

        return results_predicted
예제 #10
0
class SiameseModel:
    def __init__(self, use_cudnn_lstm=True, plot_model_architecture=False):
        n_hidden = 50
        input_dim = 300

        # unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at initialization. Setting it to true will also force  bias_initializer="zeros". This is recommended in Jozefowicz et al.
        # he_normal: Gaussian initialization scaled by fan_in (He et al., 2014)
        if use_cudnn_lstm:
            # Use CuDNNLSTM instead of LSTM, because it is faster
            lstm = layers.CuDNNLSTM(n_hidden,
                                    unit_forget_bias=True,
                                    kernel_initializer='he_normal',
                                    kernel_regularizer='l2',
                                    name='lstm_layer')
        else:
            lstm = layers.LSTM(n_hidden,
                               unit_forget_bias=True,
                               kernel_initializer='he_normal',
                               kernel_regularizer='l2',
                               name='lstm_layer')

        # Building the left branch of the model: inputs are variable-length sequences of vectors of size 128.
        left_input = Input(shape=(None, input_dim), name='input_1')
        #        left_masked_input = layers.Masking(mask_value=0)(left_input)
        left_output = lstm(left_input)

        # Building the right branch of the model: when you call an existing layer instance, you reuse its weights.
        right_input = Input(shape=(None, input_dim), name='input_2')
        #        right_masked_input = layers.Masking(mask_value=0)(right_input)
        right_output = lstm(right_input)

        # Builds the classifier on top
        l1_norm = lambda x: 1 - K.abs(x[0] - x[1])
        merged = layers.Lambda(function=l1_norm,
                               output_shape=lambda x: x[0],
                               name='L1_distance')([left_output, right_output])
        predictions = layers.Dense(1,
                                   activation='tanh',
                                   name='Similarity_layer')(merged)  #sigmoid

        # Instantiating and training the model: when you train such a model, the weights of the LSTM layer are updated based on both inputs.
        self.model = Model([left_input, right_input], predictions)

        self.__compile()
        print(self.model.summary())

        if plot_model_architecture:
            from tensorflow.python.keras.utils import plot_model
            plot_model(self.model, to_file='siamese_architecture.png')

    def __compile(self):
        optimizer = Adadelta(
        )  # gradient clipping is not there in Adadelta implementation in keras
        #        optimizer = 'adam'
        self.model.compile(loss='mse',
                           optimizer=optimizer,
                           metrics=[pearson_correlation])

    def fit(self,
            left_data,
            right_data,
            targets,
            validation_data,
            epochs=5,
            batch_size=128):
        # The paper employ early stopping based on a validation, but they didn't mention parameters.
        early_stopping_monitor = EarlyStopping(
            monitor='val_pearson_correlation', mode='max', patience=20)
        #        callbacks = [early_stopping_monitor]
        callbacks = []
        history = self.model.fit(
            [left_data, right_data],
            targets,
            validation_data=validation_data,
            epochs=epochs,
            batch_size=batch_size  #)
            ,
            callbacks=callbacks)

        self.visualize_metric(history.history, 'loss')
        self.visualize_metric(history.history, 'pearson_correlation')
        self.load_activation_model()

    def visualize_metric(self, history_dic, metric_name):
        plt.plot(history_dic[metric_name])
        legend = ['train']
        if 'val_' + metric_name in history_dic:
            plt.plot(history_dic['val_' + metric_name])
            legend.append('test')
        plt.title('model ' + metric_name)
        plt.ylabel(metric_name)
        plt.xlabel('epoch')
        plt.legend(legend, loc='upper left')
        plt.show()

    def predict(self, left_data, right_data):
        return self.model.predict([left_data, right_data])

    def evaluate(self, left_data, right_data, targets, batch_size=128):
        return self.model.evaluate([left_data, right_data],
                                   targets,
                                   batch_size=batch_size)

    def load_activation_model(self):
        self.activation_model = Model(
            inputs=self.model.input[0],
            outputs=self.model.get_layer('lstm_layer').output)

    def visualize_activation(self, data):
        activations = self.activation_model.predict(data)
        plt.figure(figsize=(10, 100), dpi=80)
        plt.imshow(activations, cmap='Blues')
        plt.grid()
        plt.xticks(ticks=range(0, 50))
        plt.yticks(ticks=range(0, data.shape[0]))
        plt.show()

    def visualize_specific_activation(self, data, dimension_idx):
        activations = self.activation_model.predict(data)
        if dimension_idx >= activations.shape[1]:
            raise ValueError('dimension_idx must be less than %d' %
                             activations.shape[1])
        fig = plt.figure(figsize=(10, 1), dpi=80)
        ax = fig.add_subplot(111)
        plt.title('dimension_idx = %d' % dimension_idx)
        weights = activations[:, dimension_idx]
        plt.yticks(ticks=[0, 1])
        plt.plot(weights, np.zeros_like(weights), 'o')
        for i, txt in enumerate(weights):
            ax.annotate((i + 1), (weights[i], 0))
        plt.show()

    def save(self, model_folder='./model/'):
        # serialize model to JSON
        model_json = self.model.to_json()
        with open(model_folder + 'model.json', 'w') as json_file:
            json_file.write(model_json)
        # serialize weights to HDF5
        self.model.save_weights(model_folder + 'model.h5')
        print('Saved model to disk')

    def save_pretrained_weights(
            self, model_wieghts_path='./model/pretrained_weights.h5'):
        self.model.save_weights(model_wieghts_path)
        print('Saved pretrained weights to disk')

    def load(self, model_folder='./model/'):
        # load json and create model
        json_file = open(model_folder + 'model.json', 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        loaded_model = model_from_json(loaded_model_json)
        # load weights into new model
        loaded_model.load_weights(model_folder + 'model.h5')
        print('Loaded model from disk')

        self.model = loaded_model
        # loaded model should be compiled
        self.__compile()
        self.load_activation_model()

    def load_pretrained_weights(
            self, model_wieghts_path='./model/pretrained_weights.h5'):
        # load weights into new model
        self.model.load_weights(model_wieghts_path)
        print('Loaded pretrained weights from disk')
        self.__compile()
예제 #11
0
print("accuracy: {:.2f}%".format(classifier.evaluate(X_test, y_test, batch_size=batch_size)[1] * 100))


# In[11]:


fig, axis = plt.subplots(2, 2, sharey=True, sharex=True, figsize=(15, 10))

axis[0, 0].plot(history.history['loss'])
axis[0, 0].set_title('loss')

axis[0, 1].plot(history.history['val_loss'])
axis[0, 1].set_title('validation loss')

axis[1, 0].plot(history.history['acc'])
axis[1, 0].set_title('accuracy')

axis[1, 1].plot(history.history['val_acc'])
axis[1, 1].set_title('validation accuracy')
plt.show()


# In[12]:


with open("classifier.json", "w") as f:
    f.write(classifier.to_json())
classifier.save_weights("classifier.h5")

예제 #12
0
           padding='same')(encoded)
x = UpSampling2D(size=(2, 2))(x)
#Deconv2
x = Conv2D(filters=8, kernel_size=(3, 3), activation='relu', padding='same')(x)
x = UpSampling2D(size=(2, 2))(x)
#Deconv3
x = Conv2D(filters=16, kernel_size=(3, 3), activation='relu',
           padding='same')(x)
x = UpSampling2D(size=(2, 2))(x)
decoded = Conv2D(filters=3,
                 kernel_size=(3, 3),
                 activation='sigmoid',
                 padding='same')(x)

#声明并编译模型
autoencoder = Model(inputs=input_img, outputs=decoded)
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')

#训练模型
autoencoder.fit(x_train,
                x_train,
                epochs=50,
                batch_size=48,
                shuffle=True,
                validation_data=(x_test, x_test))

#保存模型
autoencoder.save_weights("weights")
with open("autoencoder.json", "w") as f:
    f.write(autoencoder.to_json())
예제 #13
0
decoded = UpSampling2D((2, 2))(decoded)

# Final layer that is the same shape as the input. This is the result that should return the same image as the input
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(decoded)
# Check the output is the same shape as the input
print('shape of decoded', K.int_shape(decoded))

# Initialize input and output
autoencoder = Model(input_layer, decoded)

# Model that will return the embedding rather than the predicted image, but trained using the autoencoded model
encoder = Model(input_layer, encoded_layer)
print('shape of encoded', K.int_shape(encoded))

# Save the architextures as strings
json_autoencoder = autoencoder.to_json()
json_encoder = encoder.to_json()

# Implement early stopping
early_stopping = EarlyStopping(monitor='val_loss',
                               min_delta=0,
                               patience=10,
                               verbose=1,
                               mode='auto')

# model_checkpoints = tensorflow.keras.callbacks.ModelCheckpoint("checkpoint-{val_loss:.3f}.h5", monitor='val_loss', verbose=0, save_best_only=False,save_weights_only=False, mode='auto', save_freq ='epoch')

#load audio array
with open('audio_array.pkl', 'rb') as picklefile:
    audio_array = pickle.load(picklefile)
예제 #14
0
class StyleBank(object):
    def __init__(self):
        ######################### Tunable Parameters ################################
        # General
        self.img_shape = (None, 128, 128, 3
                          )  # (None, 512, 512, 3) ## Image Shape
        self.n_styles = 4  # 50 ## Number of styles in the bank
        self.n_content = 1000  ## Number of content images
        self.N_steps = 300000  ## Total number of training steps
        self.T = 2  ## Number of consecutive steps for training styles before training the AutoEncoder
        self.print_iter = 100  ## Log output
        self.Batch_Size = 4  ## Batch size
        self.Use_Batch_Norm = True  ## Use batch normalization instead of instance normalization

        # LR
        self.LR_Initial = 0.01  ## Initial ADAM learning rate
        self.LR_Current = self.LR_Initial  ## For logging
        self.LR_Decay = 0.8  ## LR decay
        self.LR_Update_Every = self.N_steps / 10  ## LR decay period

        # Loss
        self.Optimizer = optimizers.Adam(
            lr=self.LR_Initial)  ## Optimizer for both branches
        self.LossAlpha = 0.025  # Content weight
        self.LossBeta = 1.2  # Style weight
        self.LossGamma = 1.0  # Total Variation weight
        ######################### \Tunable Parameters ################################

        self.StyleNetLoss = {k: None for k in range(self.n_styles)}
        self.StyleNetContentLoss = {k: None for k in range(self.n_styles)}
        self.StyleNetStyleLoss = {k: None for k in range(self.n_styles)}

        # Data
        self.Content_DB = None
        self.Style_DB = None
        self.Content_DB_path = './DB/content/'
        self.Style_DB_path = './DB/style/'
        self.Content_DB_list = glob(self.Content_DB_path + '*')
        self.Style_DB_list = glob(self.Style_DB_path + '*')

        # VGG
        self.VGG16 = None

        # auto-encoder
        self.encoder = None
        self.decoder = None

        # style bank
        self.style_bank = {k: None for k in range(self.n_styles)}

        self.StyleNet = {k: None for k in range(self.n_styles)}
        self.AutoEncoderNet = None

        # inputs - content and one for style
        self.KinputContent = None
        self.KinputStyle = None
        self.tfStyleIndices = None

        self.TensorBoardStyleNet = {k: None for k in range(self.n_styles)}
        self.TensorBoardAutoEncoder = None

    def initialize_placeholders(self):
        # initialize the content and style image tensors
        self.KinputContent = Input(shape=self.img_shape[1:],
                                   name="InputContent")
        self.KinputDecoded = None

    def build_models(self):
        ###########
        # Encoder #
        ###########
        print("Building Encoder")
        input_layer = Input(shape=self.img_shape[1:])
        t_encoder = Conv2D(32, (9, 9),
                           strides=(1, 1),
                           padding='same',
                           use_bias=False)(input_layer)
        if self.Use_Batch_Norm:
            t_encoder = BatchNormalization()(t_encoder)
        else:
            t_encoder = InstanceNormalization()(t_encoder)
        t_encoder = Activation('relu')(t_encoder)
        t_encoder = Conv2D(64, (3, 3),
                           strides=(2, 2),
                           padding='same',
                           use_bias=False)(t_encoder)
        if self.Use_Batch_Norm:
            t_encoder = BatchNormalization()(t_encoder)
        else:
            t_encoder = InstanceNormalization()(t_encoder)
        t_encoder = Activation('relu')(t_encoder)
        t_encoder = Conv2D(128, (3, 3),
                           strides=(2, 2),
                           padding='same',
                           use_bias=False)(t_encoder)
        if self.Use_Batch_Norm:
            t_encoder = BatchNormalization()(t_encoder)
        else:
            t_encoder = InstanceNormalization()(t_encoder)
        t_encoder = Activation('relu')(t_encoder)
        self.encoder = Model(input_layer, t_encoder, name='Encoder')
        print(self.encoder.summary())

        ###########
        # Decoder #
        ###########
        print("Building Decoder")
        input_layer = Input(shape=self.encoder.layers[-1].output_shape[1:])
        t_decoder = Conv2DTranspose(64, (3, 3),
                                    strides=(2, 2),
                                    padding='same',
                                    use_bias=False)(input_layer)
        if self.Use_Batch_Norm:
            t_decoder = BatchNormalization()(t_decoder)
        else:
            t_decoder = InstanceNormalization()(t_decoder)
        t_decoder = Activation('relu')(t_decoder)
        t_decoder = Conv2DTranspose(32, (3, 3),
                                    strides=(2, 2),
                                    padding='same',
                                    use_bias=False)(t_decoder)
        if self.Use_Batch_Norm:
            t_decoder = BatchNormalization()(t_decoder)
        else:
            t_decoder = InstanceNormalization()(t_decoder)
        t_decoder = Activation('relu')(t_decoder)
        t_decoder = Conv2DTranspose(3, (9, 9),
                                    strides=(1, 1),
                                    padding='same',
                                    use_bias=False)(t_decoder)
        self.decoder = Model(input_layer, t_decoder, name='Decoder')
        print(self.decoder.summary())

        #############
        # StyleBank #
        #############
        for i in self.style_bank:
            print("Building Style {}".format(i))
            bank_name = "StyleBank{}".format(i)
            stylenet_name = "StyleNet{}".format(i)
            input_layer = Input(shape=self.encoder.layers[-1].output_shape[1:])
            t_style = Conv2D(256, (3, 3),
                             strides=(1, 1),
                             padding='same',
                             use_bias=False)(input_layer)
            if self.Use_Batch_Norm:
                t_style = BatchNormalization()(t_style)
            else:
                t_style = InstanceNormalization()(t_style)
            t_style = Activation('relu')(t_style)
            t_style = Conv2D(256, (3, 3),
                             strides=(1, 1),
                             padding='same',
                             use_bias=False)(t_style)
            if self.Use_Batch_Norm:
                t_style = BatchNormalization()(t_style)
            else:
                t_style = InstanceNormalization()(t_style)
            t_style = Activation('relu')(t_style)
            t_style = Conv2D(128, (3, 3),
                             strides=(1, 1),
                             padding='same',
                             use_bias=False)(t_style)
            if self.Use_Batch_Norm:
                t_style = BatchNormalization()(t_style)
            else:
                t_style = InstanceNormalization()(t_style)
            t_style = Activation('relu')(t_style)
            self.style_bank[i] = Model(input_layer, t_style, name=bank_name)

            #########################
            # StyleBank Full Models #
            #########################
            input_layer = self.encoder.layers[
                0].output  # layers.Input(batch_shape=self.encoder.layers[0].input_shape)
            prev_layer = input_layer
            for layer in self.encoder.layers[1:]:
                prev_layer = layer(prev_layer)
            for layer in self.style_bank[i].layers[1:]:
                prev_layer = layer(prev_layer)
            for layer in self.decoder.layers[1:]:
                prev_layer = layer(prev_layer)
            self.StyleNet[i] = Model([input_layer], [prev_layer],
                                     name=stylenet_name)
            print(self.StyleNet[i].summary())

        ##########################
        # AutoEncoder Full Model #
        ##########################
        print("Building AutoEncoder")
        input_layer = self.encoder.layers[
            0].output  # layers.Input(batch_shape=self.encoder.layers[0].input_shape)
        prev_layer = input_layer
        for layer in self.encoder.layers[1:]:
            prev_layer = layer(prev_layer)
        for layer in self.decoder.layers[1:]:
            prev_layer = layer(prev_layer)
        self.AutoEncoderNet = Model([input_layer], [prev_layer],
                                    name='AutoEncoder')
        print(self.AutoEncoderNet.summary())

        ### VGG
        print("Importing VGG")
        self.VGG16 = VGG16(include_top=False,
                           weights='imagenet',
                           input_shape=self.img_shape[1:])

        print("Plotting Models")
        plot_model(self.AutoEncoderNet,
                   to_file='Model_AutoEncoderNet.png',
                   show_shapes=True)
        plot_model(self.VGG16, to_file='Model_VGG16.png', show_shapes=True)
        for i in self.style_bank:
            stylenet_model_file = "Model_StyleNet{}.png".format(i)
            plot_model(self.StyleNet[i],
                       to_file=stylenet_model_file,
                       show_shapes=True)

    def compile_models(self):
        print("Compiling models")

        def total_variation_loss(x):
            img_nrows = self.img_shape[1]
            img_ncols = self.img_shape[2]
            assert K.ndim(x) == 4
            if K.image_data_format() == 'channels_first':
                a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] -
                             x[:, :, 1:, :img_ncols - 1])
                b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] -
                             x[:, :, :img_nrows - 1, 1:])
            else:
                a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] -
                             x[:, 1:, :img_ncols - 1, :])
                b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] -
                             x[:, :img_nrows - 1, 1:, :])

            return K.sum(K.pow(a + b, 1.25))

        def gram_matrix(x):
            assert K.ndim(x) == 4
            grams = list()
            for i in range(self.Batch_Size):
                img = x[i, :, :, :]
                if K.image_data_format() == 'channels_first':
                    features = K.batch_flatten(img)
                else:
                    features = K.batch_flatten(
                        K.permute_dimensions(img, (2, 0, 1)))
                grams.append(K.dot(features, K.transpose(features)))
            gram = tf.keras.backend.stack(grams)
            return gram

        def stylenet_loss_wrapper(input_tensor):
            def stylenet_loss(S, O):
                style_loss = K.variable(0.0)
                content_loss = K.variable(0.0)
                vgg16_layers = [l for l in self.VGG16.layers]
                vgg16_layers = vgg16_layers[1:]
                FlI = input_tensor  #self.encoder.layers[0].output
                FlS = S
                FlO = O
                for i in range(len(vgg16_layers)):
                    FlI = vgg16_layers[i](FlI)
                    FlS = vgg16_layers[i](FlS)
                    FlO = vgg16_layers[i](FlO)
                    if vgg16_layers[i] == self.VGG16.get_layer(
                            'block4_conv2') or self.VGG16.get_layer(
                                'block3_conv2') or self.VGG16.get_layer(
                                    'block2_conv2') or self.VGG16.get_layer(
                                        'block1_conv2'):
                        gram_mse = K.mean(
                            K.square(gram_matrix(FlO) - gram_matrix(FlS)))
                        layer_channels = vgg16_layers[i].output_shape[3]
                        layer_size = vgg16_layers[i].output_shape[
                            1] * vgg16_layers[i].output_shape[2]
                        gram_mse_norm = gram_mse / (2.0**2 *
                                                    (layer_channels**2) *
                                                    (layer_size**2))
                        style_loss = style_loss + gram_mse_norm

                    if vgg16_layers[i] == self.VGG16.get_layer('block4_conv2'):
                        content_loss = K.mean(K.square(FlO - FlI))
                        break
                tv_loss = total_variation_loss(O)
                return self.LossAlpha * content_loss + self.LossBeta * style_loss + self.LossGamma * tv_loss

            return stylenet_loss

        # Compile Models
        for i in self.style_bank:
            print("Compiling StyleBank {}".format(i))
            self.StyleNet[i].compile(optimizer=self.Optimizer,
                                     loss=stylenet_loss_wrapper(
                                         self.StyleNet[i].layers[0].output))
        self.AutoEncoderNet.compile(optimizer=self.Optimizer, loss=mse)
        print("Initial learning rates: StyleNet={}, AutoEncoder={}".format(
            K.eval(self.StyleNet[0].optimizer.lr),
            K.eval(self.AutoEncoderNet.optimizer.lr)))

    def get_batch_ids(self, batch_size, data_size):
        return np.random.choice(np.arange(0, data_size),
                                size=batch_size,
                                replace=False)

    def train_models(self):
        style_id = 0
        new_lr = self.LR_Initial
        for step in range(self.N_steps):
            style_ids = [style_id for i in range(self.Batch_Size)]
            batch_ids = self.get_batch_ids(self.Batch_Size, self.n_content)
            # Load the DB
            print("Loading DB, step {}...".format(step), end='')
            self.Content_DB = np.array([
                resize(imread(self.Content_DB_list[batch_id]),
                       self.img_shape[1:]) for batch_id in batch_ids
            ])

            style_im = resize(imread(self.Style_DB_list[style_id]),
                              self.img_shape[1:])
            self.Style_DB = np.array([style_im for style_id in style_ids])

            print("Finished Loading DB")
            if step % (self.T + 1) != self.T:  # Train Style
                loss_style = self.StyleNet[style_id].train_on_batch(
                    self.Content_DB, self.Style_DB)
                self.TensorBoardStyleNet[style_id].on_epoch_end(
                    step, self.named_logs(self.StyleNet[style_id], loss_style))
            else:  # Train AE
                loss_autoencoder = self.AutoEncoderNet.train_on_batch(
                    self.Content_DB, self.Content_DB)
                self.TensorBoardAutoEncoder.on_epoch_end(
                    step, self.named_logs(self.AutoEncoderNet,
                                          loss_autoencoder))
                style_id += 1
                style_id = style_id % self.n_styles
            if step % self.print_iter == 0 and step != 0:
                print(
                    "step {0}, loss_style={1}, loss_autoencoder={2}, timestamp={3}"
                    .format(step, loss_style, loss_autoencoder,
                            datetime.now()))
            if step % self.LR_Update_Every == 0 and step != 0:
                new_lr = new_lr * self.LR_Decay
                self.LR_Current = new_lr
                for i in self.style_bank:
                    K.set_value(self.StyleNet[i].optimizer.lr, new_lr)
                K.set_value(self.AutoEncoderNet.optimizer.lr, new_lr)
                print("Updating LR to: StyleNet={}, AutoEncoder={}".format(
                    K.eval(self.StyleNet[0].optimizer.lr),
                    K.eval(self.AutoEncoderNet.optimizer.lr)))
        for i in self.style_bank:
            self.TensorBoardStyleNet[i].on_train_end(None)
        self.TensorBoardAutoEncoder.on_train_end(None)

    def prepare_tensorboard(self):
        for i in self.style_bank:
            self.TensorBoardStyleNet[i] = keras.callbacks.TensorBoard(
                log_dir="tb_logs/stylenet_{}".format(i),
                histogram_freq=0,
                batch_size=self.Batch_Size,
                write_graph=True,
                write_grads=True)
            self.TensorBoardStyleNet[i].set_model(self.StyleNet[i])
        self.TensorBoardAutoEncoder = keras.callbacks.TensorBoard(
            log_dir="tb_logs/autoencoder",
            histogram_freq=0,
            batch_size=self.Batch_Size,
            write_graph=True,
            write_grads=True)
        self.TensorBoardAutoEncoder.set_model(self.AutoEncoderNet)

    def named_logs(self, model, logs):
        result = {}
        for l in zip(model.metrics_names, [logs]):
            result[l[0]] = l[1]
        return result

    def save_models(self):
        # serialize model to JSON
        if self.Use_Batch_Norm:
            out_mod_dir = 'Save_BatchNorm'
        else:
            out_mod_dir = 'Save_InstNorm'
        os.makedirs(out_mod_dir)
        ae_json = self.AutoEncoderNet.to_json()
        with open(os.path.join(out_mod_dir, "autoencoder.json"),
                  "w") as json_file:
            json_file.write(ae_json)
        # serialize weights to HDF5
        self.AutoEncoderNet.save_weights(
            os.path.join(out_mod_dir, "autoencoder.h5"))
        for i in self.style_bank:
            ae_json = self.StyleNet[i].to_json()
            with open(os.path.join(out_mod_dir, "stylenet_{}.json".format(i)),
                      "w") as json_file:
                json_file.write(ae_json)
            # serialize weights to HDF5
            self.StyleNet[i].save_weights(
                os.path.join(out_mod_dir, "stylenet_{}.h5".format(i)))
        print("Saved model to disk")

    def load_models(self):
        # load json and create model
        if self.Use_Batch_Norm:
            out_mod_dir = 'Save_BatchNorm'
        else:
            out_mod_dir = 'Save_InstNorm'
        ae_json = open(os.path.join(out_mod_dir, 'autoencoder.json'), 'r')
        ae_model_json = ae_json.read()
        ae_json.close()
        ae_model = models.model_from_json(
            ae_model_json,
            custom_objects={'InstanceNormalization': InstanceNormalization})
        # load weights into new model
        ae_model.load_weights(os.path.join(out_mod_dir, "autoencoder.h5"))
        self.AutoEncoderNet = ae_model
        for i in self.style_bank:
            ae_json = open(
                os.path.join(out_mod_dir, "stylenet_{}.json".format(i)), 'r')
            ae_model_json = ae_json.read()
            ae_json.close()
            ae_model = models.model_from_json(ae_model_json,
                                              custom_objects={
                                                  'InstanceNormalization':
                                                  InstanceNormalization
                                              })
            # load weights into new model
            ae_model.load_weights(
                os.path.join(out_mod_dir, "stylenet_{}.h5".format(i)))
            self.StyleNet[i] = ae_model
        print("Loaded models from disk")