def set_model(self, model):
     TensorBoard.set_model(self, model)
     TensorBoardEmbeddingMixin.set_model(self, model)
Ejemplo n.º 2
0
def nlp_train():
    """
        Function used to trained a model
    """
    config = analyse_nlp_train()
    print('\nThe configuration for the training session is :\n', config)

    print("\n1) Getting the text sample...")
    with open(config["text"]) as f:
        text = f.read().lower()
    print('corpus length:', len(text))

    chars = sorted(list(set(text)))
    vocab_size = len(chars)
    print('total chars:', vocab_size)
    char_indices = dict((c, i) for i, c in enumerate(chars))
    indices_char = dict((i, c) for i, c in enumerate(chars))

    # cut the text in semi-redundant sequences of time_steps characters
    time_steps = config["time_steps"]
    step = config["step"]
    sentences = []
    next_chars = []
    for i in range(0, len(text) - time_steps, step):
        sentences.append(text[i:i + time_steps])
        next_chars.append(text[i + time_steps])
    training_size = len(sentences)
    print('nb sequences:', training_size)

    print('\n2) Vectorization...\n')
    # creating empty placeholders for x and y, the expected character
    x = np.zeros((training_size, time_steps, vocab_size), dtype=np.bool)
    y = np.zeros((training_size, vocab_size), dtype=np.bool)

    # associate each character to a one-hot vector of size vocab_size
    for i, sentence in enumerate(sentences):
        for t, char in enumerate(sentence):
            x[i, t, char_indices[char]] = 1
        y[i, char_indices[next_chars[i]]] = 1

    # print some stuff to better understand the proccess of Vectorization
    # and spitting between x and y
    for _ in range(3):
        index = random.randint(0, len(sentences) - 1)
        for t in range(time_steps):
            print(indices_char[np.argmax(x[index, t, ])], end='')
        print("\nThe expected character is -> ",
              indices_char[np.argmax(y[index, ])], "\n")

    # size of the hidden_state
    hidden_state_size = config["hidden_state_size"]

    print('\n3) Build model...\n')

    if config["load_model"] is None:
        # build a fresh new model
        model = Sequential()
        if config["cell"] == "SimpleRNN":
            model.add(
                SimpleRNN(hidden_state_size,
                          input_shape=(time_steps, vocab_size)))
        elif config["cell"] == "LSTM":
            model.add(
                LSTM(hidden_state_size, input_shape=(time_steps, vocab_size)))
        elif config["cell"] == "GRU":
            model.add(
                GRU(hidden_state_size, input_shape=(time_steps, vocab_size)))
        model.add(Dense(vocab_size))
        model.add(Activation('softmax'))
    else:
        # re-use a pre-trained model to train it more
        print("Loaded model from the", config["load_model"], "file.")
        model = load_model(config["load_model"])

    # summarize all the trainable parameters of the model and its configuration
    model.summary()

    # explains how we are going to make the model better
    optimizer = RMSprop(lr=config["lr"])
    model.compile(loss='categorical_crossentropy', optimizer=optimizer)

    # do some stuff at the end of each epoch
    def on_epoch_end(epoch, logs):
        """
            Function invoked at the end of each epoch.
        """
        print()
        generated = '\n\n----- Generating text after Epoch: ' + str(epoch)

        start_index = random.randint(0, len(text) - time_steps - 1)

        generated += '\n\n'
        sentence = text[start_index:start_index + time_steps]
        generated += sentence
        print('----- Generating with seed: "' + sentence + '"')
        sys.stdout.write(generated)

        for _ in range(config["nb_char"]):
            x_pred = np.zeros((1, time_steps, vocab_size))
            for t, char in enumerate(sentence):
                x_pred[0, t, char_indices[char]] = 1.

            preds = model.predict(x_pred, verbose=0)[0]
            next_index = sample(preds, config["temperature"])
            next_char = indices_char[next_index]

            generated += next_char
            sentence = sentence[1:] + next_char

            sys.stdout.write(next_char)
            sys.stdout.flush()
        if config["gen_text"] is not None:
            # open in a = append mode because we want to follow the progression
            file = open(config["gen_text"], "a")
            file.write(generated)
            print(
                "\n\nINFO : The previous generated text has been successfully saved in the",
                config["gen_text"], "file.")
        print()

    callbacks_list = [
        ModelCheckpoint(config["save_model"]),
        TensorBoard(log_dir='./logs'),
        ReduceLROnPlateau(monitor='loss', factor=0.1, patience=7),
        LambdaCallback(on_epoch_end=on_epoch_end)
    ]

    print("\n4) Training the model...\n")
    model.fit(x,
              y,
              batch_size=config["batch_size"],
              epochs=config["epochs"],
              callbacks=callbacks_list)
Ejemplo n.º 3
0
if __name__ == "__main__":

    # 学習済みモデルの保存先
    model_dir = Path("..", "model_dir")
    model_dir.mkdir(exist_ok=True)

    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = np.expand_dims(x_train, axis=-1) / 255.
    x_test = np.expand_dims(x_test, axis=-1) / 255.

    train_noise = np.abs(np.random.normal(0.0, 0.2, x_train.shape))

    test_noise = np.abs(np.random.normal(0.0, 0.2, x_test.shape))

    x_train_noised = np.clip(x_train + train_noise, 0.0, 1.0)
    x_test_noised = np.clip(x_test + test_noise, 0.0, 1.0)

    log_dir = Path("..", "logs")
    tb = TensorBoard(log_dir=str(log_dir))

    model = ConvDAE()
    model.compile(optimizer="Adam",
                  loss=keras.losses.MSE,
                  metrics=[keras.metrics.mse])
    model.fit(x=x_train_noised,
              y=x_train,
              epochs=5,
              validation_data=(x_test_noised, x_test),
              callbacks=[tb])
    model.save(str(model_dir / "ConvDAE.hdf5"))
Ejemplo n.º 4
0
def main(argv):
    FLAGS(argv)
    annotation_path = FLAGS.annotation_path
    log_dir = FLAGS.log_dir
    classes_path = os.path.join(FLAGS.model_data, 'classes.txt')
    anchors_path = os.path.join(FLAGS.model_data, 'anchors.txt')
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    input_shape = (416,416) # multiple of 32, hw

    is_tiny_version = len(anchors)==6 # default setting
    if is_tiny_version:
        model = create_tiny_model(
                input_shape,
                anchors,
                num_classes,
                freeze_body=2,
                weights_path=FLAGS.pretrained_model_path)
    else:
        model = create_model(
                input_shape,
                anchors,
                num_classes,
                freeze_body=2,
                weights_path=FLAGS.pretrained_model_path)

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(
            log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
            monitor='val_loss',
            save_weights_only=True,
            save_best_only=True,
            period=3)

    reduce_lr = ReduceLROnPlateau(
            monitor='val_loss',
            factor=0.1,
            patience=3,
            verbose=1)

    early_stopping = EarlyStopping(
            monitor='val_loss',
            min_delta=0,
            patience=10,
            verbose=1)

    val_split = 0.1
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_val = int(len(lines)*val_split)
    num_train = len(lines) - num_val

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset.
    # This step is enough to obtain a not bad model.
    if True:
        model.compile(
                optimizer=Adam(lr=1e-3),
                loss={'yolo_loss': lambda y_true, y_pred: y_pred})

        batch_size = 32
        print('Train on {} samples, '
              'val on {} samples, '
              'with batch size {}.'.format(num_train, num_val, batch_size))

        model.fit_generator(
                data_generator_wrapper(
                        lines[:num_train],
                        batch_size,
                        input_shape,
                        anchors,
                        num_classes),
                steps_per_epoch=max(1, num_train//batch_size),
                validation_data=data_generator_wrapper(
                        lines[num_train:],
                        batch_size,
                        input_shape,
                        anchors,
                        num_classes),
                validation_steps=max(1, num_val//batch_size),
                epochs=50,
                initial_epoch=0,
                callbacks=[logging, checkpoint])

        model.save_weights(log_dir + 'trained_weights_stage_1.h5')

    # Unfreeze and continue training, to fine-tune.
    # Train longer if the result is not good.
    if False:
        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        print('Unfreeze all of the layers.')

        model.compile(
                optimizer=Adam(lr=1e-4),
                loss={'yolo_loss': lambda y_true, y_pred: y_pred})

        batch_size = 32
        print('Train on {} samples,'
              'val on {} samples,'
              'with batch size {}.'.format(num_train, num_val, batch_size))

        model.fit_generator(
                data_generator_wrapper(
                        lines[:num_train],
                        batch_size,
                        input_shape,
                        anchors,
                        num_classes),
                steps_per_epoch=max(1, num_train//batch_size),
                validation_data=data_generator_wrapper(
                        lines[num_train:],
                        batch_size,
                        input_shape,
                        anchors,
                        num_classes),
                validation_steps=max(1, num_val//batch_size),
                epochs=100,
                initial_epoch=50,
                callbacks=[logging, checkpoint, reduce_lr, early_stopping])

        model.save_weights(log_dir + 'trained_weights_final.h5')
Ejemplo n.º 5
0
def train(model_name="kaji_mach_0",
          synth_data=False,
          target='MI',
          balancer=True,
          predict=False,
          return_model=False,
          n_percentage=1.0,
          time_steps=14,
          epochs=10):
    """

  Use Keras model.fit using parameter inputs

  Args:
  ----
  model_name : Parameter used for naming the checkpoint_dir
  synth_data : Default to False. Allows you to use synthetic or real data.

  Return:
  -------
  Nonetype. Fits model only. 

  """

    f = open('./pickled_objects/X_TRAIN_{0}.txt'.format(target), 'rb')
    X_TRAIN = pickle.load(f)
    f.close()

    f = open('./pickled_objects/Y_TRAIN_{0}.txt'.format(target), 'rb')
    Y_TRAIN = pickle.load(f)
    f.close()

    f = open('./pickled_objects/X_VAL_{0}.txt'.format(target), 'rb')
    X_VAL = pickle.load(f)
    f.close()

    f = open('./pickled_objects/Y_VAL_{0}.txt'.format(target), 'rb')
    Y_VAL = pickle.load(f)
    f.close()

    f = open('./pickled_objects/x_boolmat_val_{0}.txt'.format(target), 'rb')
    X_BOOLMAT_VAL = pickle.load(f)
    f.close()

    f = open('./pickled_objects/y_boolmat_val_{0}.txt'.format(target), 'rb')
    Y_BOOLMAT_VAL = pickle.load(f)
    f.close()

    f = open('./pickled_objects/no_feature_cols_{0}.txt'.format(target), 'rb')
    no_feature_cols = pickle.load(f)
    f.close()

    X_TRAIN = X_TRAIN[0:int(n_percentage * X_TRAIN.shape[0])]
    Y_TRAIN = Y_TRAIN[0:int(n_percentage * Y_TRAIN.shape[0])]

    #build model
    model = build_model(no_feature_cols=no_feature_cols,
                        output_summary=True,
                        time_steps=time_steps)

    #init callbacks
    tb_callback = TensorBoard(log_dir='./logs/{0}_{1}.log'.format(
        model_name, time),
                              histogram_freq=0,
                              write_grads=False,
                              write_images=True,
                              write_graph=True)

    #Make checkpoint dir and init checkpointer
    checkpoint_dir = "./saved_models/{0}".format(model_name)

    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    checkpointer = ModelCheckpoint(filepath=checkpoint_dir +
                                   "/model.{epoch:02d}-{val_loss:.2f}.hdf5",
                                   monitor='val_loss',
                                   verbose=0,
                                   save_best_only=True,
                                   save_weights_only=False,
                                   mode='auto',
                                   period=1)

    #fit
    model.fit(
        x=X_TRAIN,
        y=Y_TRAIN,
        batch_size=16,
        epochs=epochs,
        callbacks=[tb_callback],  #, checkpointer],
        validation_data=(X_VAL, Y_VAL),
        shuffle=True)

    model.save('./saved_models/{0}.h5'.format(model_name))

    if predict:
        print('TARGET: {0}'.format(target))
        Y_PRED = model.predict(X_VAL)
        Y_PRED = Y_PRED[~Y_BOOLMAT_VAL]
        np.unique(Y_PRED)
        Y_VAL = Y_VAL[~Y_BOOLMAT_VAL]
        Y_PRED_TRAIN = model.predict(X_TRAIN)
        print('Confusion Matrix Validation')
        print(confusion_matrix(Y_VAL, np.around(Y_PRED)))
        print('Validation Accuracy')
        print(accuracy_score(Y_VAL, np.around(Y_PRED)))
        print('ROC AUC SCORE VAL')
        print(roc_auc_score(Y_VAL, Y_PRED))
        print('CLASSIFICATION REPORT VAL')
        print(classification_report(Y_VAL, np.around(Y_PRED)))

    if return_model:
        return model
Ejemplo n.º 6
0
def build_model(train_generator, validation_generator, params=None):
    global layer2
    global layer3

    model = Sequential()
    model.add(
        Convolution2D(params['num_filters1'],
                      params['kernel_size1'],
                      strides=4,
                      padding='valid',
                      input_shape=(224, 224,
                                   3)))  #, kernel_initializer='glorot_normal'
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=2))

    model.add(Convolution2D(params['num_filters2'], 3, padding='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=2))

    if params['num_layers'] > 2:
        model.add(Convolution2D(params['num_filters3'], 3, padding='same'))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(3, 3), strides=2))
        model.add(Dropout(params['dropout']))

        model_dir2 = join(model_dir, str(params['num_layers']),
                          'model' + str(layer3), str(params['dropout']))
        layer3 += 1
    else:

        model_dir2 = join(model_dir, str(params['num_layers']),
                          'model' + str(layer2), params['optimizer'])
        layer2 += 1

    model.add(Convolution2D(8, 1))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(GlobalAveragePooling2D())
    model.add(Activation('softmax'))

    os.makedirs(model_dir2, exist_ok=True)
    plot_model(model,
               to_file=join(model_dir2, 'model.png'),
               show_shapes=True,
               show_layer_names=True)
    model.summary()

    model.compile(loss='categorical_crossentropy',
                  optimizer=params['optimizer'],
                  metrics=['accuracy'])

    tensorboard_cb = TensorBoard(log_dir=model_dir2 + '/logs',
                                 batch_size=batch_size,
                                 histogram_freq=0,
                                 update_freq='epoch')
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.2,
                                  patience=5,
                                  min_lr=0.0001)
    checkpointcallback = ModelCheckpoint(
        filepath=model_dir2 + '/weights.{epoch:02d}-{val_loss:.2f}.hdf5',
        period=1)

    history = model.fit_generator(
        train_generator,
        steps_per_epoch=(int((train_generator.samples) // batch_size) + 1),
        nb_epoch=params['num_layers'] * number_of_epoch,
        validation_data=validation_generator,
        validation_steps=(int(
            (validation_generator.samples) // batch_size) + 1),
        workers=args.workers,
        callbacks=[checkpointcallback, reduce_lr, tensorboard_cb])

    model.save(join(model_dir2, 'model.h5'))

    return model, history
Ejemplo n.º 7
0
model.add(
    Embedding(top_words,
              embedding_vecor_length,
              input_length=max_review_length))

# Convolutional model (3x conv, flatten, 2x dense)
model.add(Convolution1D(64, 3, border_mode='same'))
model.add(Convolution1D(32, 3, border_mode='same'))
model.add(Convolution1D(16, 3, border_mode='same'))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(180, activation='sigmoid'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))

# Log to tensorboard
tensorBoardCallback = TensorBoard(log_dir='./logs', write_graph=True)
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(X_train,
          y_train,
          nb_epoch=1,
          callbacks=[tensorBoardCallback],
          batch_size=64)

# Evaluation on the test set
scores = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1] * 100))
Ejemplo n.º 8
0
if __name__ == "__main__":

    X_train, X_test, y_train, y_test = load_data()

    mu_predictor = ret_model()
    mu_predictor.compile(optimizer=adam(lr=0.001), loss="mean_squared_error")
    logdir = "./" + str(datetime.now().strftime('%s')) + "mu/"
    mu_predictor.fit(
        X_train,
        y_train,
        epochs=20,
        batch_size=128,
        shuffle=True,
        validation_split=0.1,
        callbacks=[
            TensorBoard(log_dir=logdir),
            EarlyStopping(patience=2),
            ModelCheckpoint(filepath=logdir +
                            'model_epoch.{epoch:02d}-los{val_loss:.4f}.h5',
                            monitor='val_loss',
                            verbose=1,
                            save_best_only=True,
                            mode='auto')
        ])
    print("mu_predictor is save to", logdir)

    # 二乗誤差系列を取っている
    print("calculating squared error ...")
    temp = mu_predictor.predict(X_train)
    err_var = np.square(temp - y_train)
Ejemplo n.º 9
0
                                   decay=0.0,
                                   amsgrad=False),
                    metrics=["accuracy"],
                    loss='sparse_categorical_crossentropy')
checkpoint = ModelCheckpoint("all_fc2.h5",
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True,
                             save_weights_only=False,
                             mode='auto',
                             period=1)
early = EarlyStopping(monitor='val_loss',
                      min_delta=0,
                      patience=100,
                      verbose=1,
                      mode='auto')
print("training started !")
tbCallBack = TensorBoard(log_dir='./Graph',
                         histogram_freq=0,
                         write_graph=True,
                         write_images=True)

history = model_final.fit(
    train_features,
    train_labels,
    #steps_per_epochs=100,
    epochs=1200,
    batch_size=batch_size,
    validation_data=(val_features, val_labels),
    callbacks=[checkpoint, early, tbCallBack])
x = Dropout(.5)(x)
predictions = Dense(10, activation='softmax')(x)


model = Model(input=base_model.input, output=predictions)

for layer in base_model.layers:
    layer.trainable = False

model.compile(optimizer='RMSprop', loss='categorical_crossentropy',
              metrics=['accuracy'])

print("First pass")
checkpointer = ModelCheckpoint(filepath='/home/fatema/Downloads/Re%3a_Regarding_Python%2fDeep_learning_Project/first.3.{epoch:02d}-{val_loss:.2f}.hdf5', verbose=1, save_best_only=True)
csv_logger = CSVLogger('first.3.log')
tensorboard=TensorBoard(log_dir="/home/fatema/Downloads/Re%3a_Regarding_Python%2fDeep_learning_Project/logs/{}".format(time())) #tensorboard declaration to visualize plot
model.fit_generator(generator,
                    validation_data=val_generator,
                    validation_steps=500,
                    steps_per_epoch=750,
                    nb_epoch=5,
                    verbose=1,
                    callbacks=[csv_logger, checkpointer, tensorboard])

for layer in model.layers[:172]:
    layer.trainable = False
for layer in model.layers[172:]:
    layer.trainable = True

print("Second pass")
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])
Ejemplo n.º 11
0
def main():
    # config setting
    conf_file = '../../conf/objectdetection/config_od.ini'
    t_conf = setting_conf(conf_file)

    annotation_path = t_conf.annotation_file
    log_dir = t_conf.save_log_dir
    classes_file = t_conf.classes_file
    anchors_file = t_conf.anchors_file
    num_classes = t_conf.num_classes
    input_shape = t_conf.input_shape[:2] # multiple of 32, hw
    pre_train_model = t_conf.pre_train_model
    freeze_body = t_conf.freeze_body
    save_log_dir = t_conf.save_log_dir
    save_model_dir = t_conf.save_model_dir
    epochs = t_conf.epochs
    batch_size = t_conf.batch_size

    anchors = get_anchors(anchors_file)

    if t_conf.tiny_flg == 1:
        model = create_tiny_model(input_shape, anchors, num_classes,
            freeze_body=freeze_body, weights_path=pre_train_model)
    else:
        model = create_model(input_shape, anchors, num_classes,
            freeze_body=freeze_body, weights_path=pre_train_model)

    logging = TensorBoard(log_dir=save_log_dir)
    checkpoint = ModelCheckpoint(save_model_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
        monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)

    val_split = 0.1
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_val = int(len(lines)*val_split)
    num_train = len(lines) - num_val

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    if True:
        model.compile(optimizer=Adam(lr=1e-3), loss={
            # use custom yolo_loss Lambda layer.
            'yolo_loss': lambda y_true, y_pred: y_pred})

        batch_size = batch_size
        print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
        model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
                steps_per_epoch=max(1, num_train//batch_size),
                validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
                validation_steps=max(1, num_val//batch_size),
                epochs=int(epochs/2),
                initial_epoch=0,
                callbacks=[logging, checkpoint])
        model.save_weights(save_model_dir + 'trained_weights_stage_1.h5')

    # Unfreeze and continue training, to fine-tune.
    # Train longer if the result is not good.
    if True:
        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
        print('Unfreeze all of the layers.')

        batch_size = batch_size # note that more GPU memory is required after unfreezing the body
        print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
        model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
            steps_per_epoch=max(1, num_train//batch_size),
            validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
            validation_steps=max(1, num_val//batch_size),
            epochs=int(epochs),
            initial_epoch=int(epochs/2),
            callbacks=[logging, checkpoint, reduce_lr, early_stopping])
        model.save_weights(save_model_dir + 'trained_weights_final.h5')
Ejemplo n.º 12
0
    # fig, axes = plt.subplots(1, 2)
    # axes[0].imshow(train_images[10])
    # axes[0].set_title('image')
    # axes[1].imshow(train_masks[10][:, :, 0])
    # axes[1].set_title('mask')
    # plt.show()
    #
    # exit()
    callbacks_list = [
        ModelCheckpoint('models/linknet_gray' + str(BATCH) + '_batch.h5',
                        verbose=1,
                        save_best_only=True,
                        mode='min',
                        save_weights_only=True),
        TensorBoard(log_dir='./logs',
                    batch_size=BATCH,
                    write_images=True),
        ReduceLROnPlateau(verbose=1, factor=0.25, patience=3, min_lr=1e-6)
    ]

    model = Linknet(
        backbone_name='mobilenetv2',
        input_shape=(HEIGHT, WIDTH, 3),
        activation='sigmoid',
        decoder_block_type='transpose',
        encoder_weights='imagenet',
        decoder_use_batchnorm=True
    )

    model.summary()
    model.compile(optimizer=Adadelta(1e-3), loss=loss, metrics=[dice_score, jaccard_score])
Ejemplo n.º 13
0
def _main():

    # Set train variables
    training_set_annotation_path = 'dist/training.txt'
    validation_set_annotation_path = 'dist/validation.txt'
    test_set_annotation_path = 'dist/test.txt'
    classes_path = 'dist/classes.txt'
    pretrained_weights_path = 'source/darknet53_weights.h5'
    logs_root_path = 'logs'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    final_output_path = 'dist/'
    val_split = 0.1

    # Split training and validation set
    with open(training_set_annotation_path) as f:
        training_lines = f.readlines()
    with open(validation_set_annotation_path) as f:
        validation_lines = f.readlines()
    with open(test_set_annotation_path) as f:
        test_lines = f.readlines()

    # Set input size
    input_shape = (608, 608)  # multiple of 32, hw

    # Train with frozen layers first, to get a stable loss.

    batch_sizes = [8, 16, 32]
    init_learning_rates = [1e-2, 1e-3, 1e-4]
    anchors_paths = ['source/yolo_anchors.txt']

    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=3,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=15,
                                   verbose=1)

    # Make log_dir folder
    folder_hash = datetime.now().strftime("%Y%m%d-%H%M%S")
    os.makedirs(logs_root_path, exist_ok=True)
    os.makedirs(logs_root_path + '/' + folder_hash, exist_ok=True)
    log_dir = logs_root_path + '/' + folder_hash + '/'

    # set logging
    checkpoint = ModelCheckpoint(
        log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
        monitor='val_loss',
        save_weights_only=True,
        save_best_only=True,
        period=3)

    for batch_size in batch_sizes:

        os.makedirs(logs_root_path + '/' + folder_hash + '/' + 'batchsize-' +
                    str(batch_size),
                    exist_ok=True)

        for init_learning_rate in init_learning_rates:

            os.makedirs(logs_root_path + '/' + folder_hash + '/' +
                        'batchsize-' + str(batch_size) + '/' + 'lr-' +
                        str(init_learning_rate),
                        exist_ok=True)

            for anchors_path in anchors_paths:
                anchors = get_anchors(anchors_path)

                anchors_filename = os.path.splitext(
                    os.path.basename(anchors_path))[0]

                # Make log dir
                os.makedirs(logs_root_path + '/' + folder_hash + '/' +
                            'batchsize-' + str(batch_size) + '/' + 'lr-' +
                            str(init_learning_rate) + '/' + anchors_filename,
                            exist_ok=True)
                log_dir = logs_root_path + '/' + folder_hash + '/' + 'batchsize-' + str(
                    batch_size) + '/' + 'lr-' + str(
                        init_learning_rate) + '/' + anchors_filename + '/'

                # set logging
                logging = TensorBoard(log_dir=log_dir, update_freq='batch')

                # Load and create model
                model = create_model(input_shape,
                                     anchors,
                                     num_classes,
                                     freeze_body=2,
                                     weights_path=pretrained_weights_path)

                # use custom yolo_loss Lambda layer.
                model.compile(optimizer=Adam(lr=init_learning_rate),
                              loss={
                                  'yolo_loss': lambda y_true, y_pred: y_pred
                              })
                print(
                    '-----------------------------------------------------------------------------------------'
                )
                print(
                    'Train on {} samples, val on {} samples, with batch size {} and learning rate {} with anchor file: {}'
                    .format(len(training_lines), len(validation_lines),
                            batch_size, init_learning_rate, anchors_path))
                print(
                    '-----------------------------------------------------------------------------------------'
                )
                model.fit_generator(
                    data_generator_wrapper(training_lines, batch_size,
                                           input_shape, anchors, num_classes),
                    steps_per_epoch=max(1,
                                        len(training_lines) // batch_size),
                    validation_data=data_generator_wrapper(
                        validation_lines, batch_size, input_shape, anchors,
                        num_classes),
                    validation_steps=max(1,
                                         len(validation_lines) // batch_size),
                    epochs=15,
                    initial_epoch=0,
                    callbacks=[logging, checkpoint, reduce_lr])
                model.save_weights(log_dir + 'test__weights_batch-size-' +
                                   str(batch_size) + '_lr-' +
                                   str(init_learning_rate) + '.h5')
                model.save(log_dir + 'test__models_batch-size-' +
                           str(batch_size) + '_lr-' + str(init_learning_rate) +
                           '.h5')
batch_size = 64
path = '../data'
train_mode = 'generator'

model = _inceptionv4(input_shape=(224, 224, 3),
                     dropout_keep=0.8,
                     weigth=1,
                     include_top=True,
                     nb_class_age=10,
                     nb_class_gender=2)

reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=0, min_delta=0.0001, min_lr=0)
early_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=0, verbose=0)
tf_board = TensorBoard(log_dir='logs',
                       histogram_freq=0,
                       batch_size=32,
                       write_graph=True,
                       write_grads=True,
                       update_freq='epoch')

callbacks = [reduce_lr, early_stop, tf_board]
opt = Adam(lr=0.01)
model.compile(optimizer=opt,
              loss=['categorical_crossentropy', 'categorical_crossentropy'],
              metrics=['accuracy', 'accuracy'])

list_dict_images = get_path_images(path)

random.shuffle(list_dict_images)

all_path_image = [dict_path['path_image'] for dict_path in list_dict_images]
# print(all_path_image)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
input_shape = X_train.shape
K.clear_session()

modelLSTM_2a = Sequential()
# We specify the maximum input length to our Embedding layer
# so we can later flatten the embedded inputs

modelLSTM_2a.add(Embedding(num_words, 8, input_length=max_review_length))

modelLSTM_2a.add(LSTM(32))
modelLSTM_2a.add(Dense(1))
modelLSTM_2a.add(Activation('sigmoid'))
modelLSTM_2a.summary()

modelLSTM_2a.compile(optimizer="adam",
                     loss='binary_crossentropy',
                     metrics=['accuracy'])

tensorboard = TensorBoard(log_dir="imdb_rnn_logs",
                          histogram_freq=0,
                          write_graph=True,
                          write_images=True)

LSTM_history = modelLSTM_2a.fit(X_train,
                                y_train,
                                epochs=10,
                                batch_size=32,
                                validation_split=0.3,
                                callbacks=[tensorboard])
Ejemplo n.º 16
0
# load the saved data
data_train = np.load('saved-files/training_data.npy')
label_train = np.load('saved-files/training_labels.npy')
data_eval = np.load('saved-files/validation_data.npy')
label_eval = np.load('saved-files/validation_labels.npy')

# generate and compile the model
model = generate_model(len(data_train[0]))
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# initialize tensorboard
tensorboard = TensorBoard(log_dir='logs/',
                          histogram_freq=0,
                          write_graph=True,
                          write_images=True)

# only using 3 epochs otherwise the model would overfit to the data
history = model.fit(data_train,
                    label_train,
                    validation_data=(data_eval, label_eval),
                    epochs=2,
                    callbacks=[tensorboard])
loss_history = history.history["loss"]

numpy_loss_history = np.array(loss_history)
np.savetxt("saved-files/loss_history.txt", numpy_loss_history, delimiter=",")

# model = load_model('saved-files/model.h5')
Ejemplo n.º 17
0
#数据增强
train_datagen = ImageDataGenerator(
    # rotation_range=15,
    width_shift_range=0.1,
    height_shift_range=0.1,
    shear_range=0.1,
    zoom_range=0.1,
    horizontal_flip=True,
    fill_mode='nearest')

tb = TensorBoard(
    log_dir='./logs',  # log 目录
    histogram_freq=1,  # 按照何等频率(epoch)来计算直方图,0为不计算
    batch_size=32,  # 用多大量的数据计算直方图
    write_graph=True,  # 是否存储网络结构图
    write_grads=False,  # 是否可视化梯度直方图
    write_images=False,  # 是否可视化参数
    embeddings_freq=0,
    embeddings_layer_names=None,
    embeddings_metadata=None)
callbacks = [tb]


#生成小批次数据训练
def data_generator(image_path, filesname, labels, batch_size):
    batches = (len(labels) + batch_size - 1) // batch_size

    while (True):
        for i in range(batches):
            y = labels[i * batch_size:(i + 1) * batch_size]
            y0, y1 = label2vec(y)
Ejemplo n.º 18
0
model.add(BatchNormalization(axis=-1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))

model.add(Flatten())
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))

#compile model
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

#Tensorboard for visualise
tensorboard = TensorBoard(log_dir='Mnist_log/' + tensorboard_name,
                          histogram_freq=30)

#Feed the data
model.fit(x_train,
          y_train,
          epochs=2,
          batch_size=128,
          validation_data=(x_test, y_test),
          callbacks=[tensorboard])

#Save Model
model.save(model_Name + '.model')

#Delete existing model
del model
Ejemplo n.º 19
0
def train():
    start = time.time()

    # param for dataset directory
    data_dir = "gallery-dll/danbooru/face"

    # specify hyperparams for training
    batch_size = 128
    z_shape = 100
    epochs = 10000
    dis_learning_rate = 0.005
    gen_learning_rate = 0.005
    dis_momentum = 0.5
    gen_momentum = 0.5
    dis_nesterov = True
    gen_nesterov = True

    # optimizers for generator and discriminator networks
    dis_optimizer = SGD(lr=dis_learning_rate,
                        momentum=dis_momentum,
                        nesterov=dis_nesterov)
    gen_optimizer = SGD(lr=gen_learning_rate,
                        momentum=gen_momentum,
                        nesterov=gen_nesterov)

    # load all images
    all_images = []
    for _, filename in enumerate(glob.glob(data_dir)):
        all_images.append(imread(filename, flatten=False, mode='RGB'))

    X = np.array(all_images)
    X = (X - 127.5) / 127.5
    X = X.astype(np.float32)

    # build and compile generator model
    gen_model = build_generator()
    gen_model.compile(loss='mse', optimizer=gen_optimizer)

    # build and compile discriminator
    dis_model = build_discriminator()
    dis_model.compile(loss='binary_crossentropy', optimizer=dis_optimizer)

    # build and compile adversarial model
    adversarial_model = build_adversarial_model(gen_model, dis_model)
    adversarial_model.compile(loss='binary_crossentropy',
                              optimizer=gen_optimizer)

    # add tensorboard to visualize losses
    tensorboard = TensorBoard(log_dir="logs/{}".format(time.time()),
                              write_images=True,
                              write_grads=True,
                              write_graph=True)
    tensorboard.set_model(gen_model)
    tensorboard.set_model(dis_model)

    for epoch in range(epochs):
        print("-" * 50)
        print("[INFO] epoch: {}".format(epoch))

        dis_losses = []
        gen_losses = []

        num_batches = int(X.shape[0] / batch_size)
        print("[INFO] number of batches: {}".format(num_batches))

        for i in range(num_batches):
            print("-" * 25)
            print("[INFO] batch: {}".format(i))

            #sample a batch of noise vectors from a normal distribution
            z_noise = np.random.normal(0, 1, size=(batch_size, z_shape))

            # enerate a batch of fake images using the generator networ
            generated_images = gen_model.predict_on_batch(z_noise)
            """
            train the discriminator network
            """

            # start to train dis_model
            dis_model.trainable = True

            # sample a batch of real images from the set of all images
            image_batch = X[i * batch_size:(i + 1) * batch_size]

            # create real labels and fake labels
            y_real = np.ones((batch_size, )) * 0.9
            y_fake = np.zeros((batch_size, )) * 0.1

            # train the discriminator network on real images and real labels
            dis_loss_real = dis_model.train_on_batch(image_batch, y_real)

            # train the discriminator network on fake images and fake labels
            dis_loss_fake = dis_model.train_on_batch(generated_images, y_fake)

            # calculate the average loss
            d_loss = (dis_loss_real + dis_loss_fake) / 2
            print("[INFO] d_loss: {}".format(d_loss))

            # stop training dis_model
            dis_model.trainable = False
            """
            train the generator model (adversarial model)
            """
            z_noise = np.random.normal(0, 1, size=(batch_size, z_shape))

            g_loss = adversarial_model.train_on_batch(z_noise, y_real)
            print("[INFO] g_loss: {}".format(g_loss))

            dis_losses.append(d_loss)
            gen_losses.append(g_loss)
        """
        sample some images to check the performance
        """
        if epoch % 100 == 0:
            z_noise = np.random.normal(0, 1, size=(batch_size, z_shape))
            gen_images1 = gen_model.predict_on_batch(z_noise)

            for img in gen_images1[:2]:
                save_rgb_image(img, "results/one_{}.png".format(epoch))

        print("[INFO] epoch: {}, dis_loss: {}".format(epoch,
                                                      np.mean(dis_losses)))
        print("[INFO] epoch: {}, gen_loss: {}".format(epoch,
                                                      np.mean(gen_losses)))
        """
        save losses to tensorboard after each epoch
        """
        write_log(tensorboard, 'discriminator_loss', np.mean(dis_losses),
                  epoch)
        write_log(tensorboard, 'generator_loss', np.mean(gen_losses), epoch)
    """
    save our models
    """
    gen_model.save("generator_model.h5")
    dis_model.save("generator_model.h5")

    print("[INFO] time elapsed: {}s", (time.time() - start))
Ejemplo n.º 20
0
    optimizer = keras.optimizers.Adam()
elif opt == 'sgd':
    optimizer = keras.optimizers.SGD(lr=lr, momentum=0.9, nesterov=True)

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=optimizer,
              metrics=['accuracy'])

filepath = os.path.join(
    CHECKPOINT_DIR, model_type + '_' + opt + '_gloss' +
    "_weights-{epoch:02d}-{val_acc:.3f}.h5")
earlystop_callback = EarlyStopping(monitor='val_loss',
                                   patience=5,
                                   min_delta=0.01)
tensorboard_callback = TensorBoard(
    os.path.join(TENSORBOARD_DIR,
                 model_type + '_' + opt + '_gloss' + '_tb_logs'))
checkpoint = ModelCheckpoint(filepath,
                             monitor='val_acc',
                             verbose=1,
                             save_best_only=True,
                             mode='max')
reduce_lr = ReduceLROnPlateau(monitor='val_acc',
                              factor=0.1,
                              patience=5,
                              cooldown=0,
                              min_lr=1e-5)
callbacks_list = [checkpoint, reduce_lr, tensorboard_callback]

# training loop
model.fit(x_train,
Ejemplo n.º 21
0
model.add(Conv2D(512, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())

# Last dense layers must (not sure) have number of labels in data in parenthesis
model.add(Dense(32))
model.add(Activation('relu'))
model.add(Dropout(0.2))

model.add(Dense(2))
model.add(Activation('softmax'))

model.compile(loss='sparse_categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# Visualizing model open cmd cd to folder where the script is saved
# and type "tensorboard --logdir=logs\"
tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))

model.fit(X,
          y,
          batch_size=10,
          epochs=8,
          validation_split=0.2,
          callbacks=[tensorboard])
model.save(NAME)
Ejemplo n.º 22
0
# ----- PreProcessing -----
# Intensity normalisation
print("Apply intensity normalisation to input image dataset")

train_in_dataset = intensityNormalisation(train_in_dataset, 'float32')
valid_in_dataset = intensityNormalisation(valid_in_dataset, 'float32')

print("Training input image dataset dtype", train_in_dataset.dtype)
print("Validation input image dataset dtype", valid_in_dataset.dtype)

train_in_dataset = reshapeDataset(train_in_dataset)
train_gd_dataset = reshapeDataset(train_gd_dataset)
valid_in_dataset = reshapeDataset(valid_in_dataset)
valid_gd_dataset = reshapeDataset(valid_gd_dataset)

# ----- Model training -----
# Callbacks
tensorboardCB  = TensorBoard(log_dir=config["logs_folder"], histogram_freq=0, write_graph=True, write_grads=True, write_images=True)
csvLoggerCB    = CSVLogger(str(config["logs_folder"]+'training.log'))
checkpointCB   = ModelCheckpoint(filepath=str(config["logs_folder"]+'model-{epoch:03d}.h5'))
learningRateCB = learningRateSchedule()

print("Training")
model.fit_generator(generator=generatorRandomPatchsDolz(train_in_dataset, train_gd_dataset, config["batch_size"],
                                          config["patch_size_x"],config["patch_size_y"],config["patch_size_z"]),
                    steps_per_epoch=config["steps_per_epoch"], epochs=config["epochs"],
                    verbose=1, callbacks=[tensorboardCB, csvLoggerCB, checkpointCB, learningRateCB],
                    validation_data=generatorRandomPatchsDolz(valid_in_dataset, valid_gd_dataset, config["batch_size"],
                                          config["patch_size_x"],config["patch_size_y"],config["patch_size_z"]),
                    validation_steps=config["steps_per_epoch"])
Ejemplo n.º 23
0
        labels=expected_output,
        batch_size=batch_size,
        shuffle=False)
    test_generator = DataGenerator(data=test_input,
                                   labels=test_output,
                                   batch_size=batch_size,
                                   shuffle=False)
    print('####')
    print(scene_input.shape)

    #Compile model
    model.compile(loss='mse', optimizer=opt)

    #Setting TensorBoard
    tbCallback = TensorBoard(log_dir='graph/',
                             histogram_freq=0,
                             write_graph=False,
                             write_images=False)

    #Settig CheckpointCallback
    mcpCallback = ModelCheckpoint('sslstm_weights_checkpoint.h5',
                                  monitor='val_loss',
                                  save_weights_only=True,
                                  save_best_only=True,
                                  period=1)

    #Settig EarlyStoppingtCallback
    esCallback = EarlyStopping(monitor='val_loss',
                               min_delta=0,
                               patience=20,
                               start_epoch=1000)
# print(model.summary())

labelencoder = LabelEncoder()
integer_encoded = labelencoder.fit_transform(data['v1'])
y = to_categorical(integer_encoded)
X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.33,
                                                    random_state=42)
print(X_train.shape, Y_train.shape)
print(X_test.shape, Y_test.shape)
model = createmodel()

tensorboard = TensorBoard(log_dir='./SA_logsPart3',
                          histogram_freq=0,
                          write_graph=True,
                          write_images=False)
batch_size = 32
hist = model.fit(X_train,
                 Y_train,
                 epochs=5,
                 batch_size=batch_size,
                 verbose=2,
                 callbacks=[tensorboard])

score, acc = model.evaluate(X_test, Y_test, verbose=2, batch_size=batch_size)

#
# # serialize model to YAML
# model_yaml = model.to_yaml()
# with open("model_SA.yaml", "w") as yaml_file:
Ejemplo n.º 25
0
def _main():
    annotation_path = 'train.txt'
    log_dir = 'logs/000/'
    classes_path = 'model_data/coco_classes.txt'
    anchors_path = 'model_data/yolo_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    input_shape = (416, 416)  # multiple of 32, hw

    model, bottleneck_model, last_layer_model = create_model(
        input_shape,
        anchors,
        num_classes,
        freeze_body=2,
        weights_path='model_data/yolo_weights.h5'
    )  # make sure you know what you freeze

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(
        log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
        monitor='val_loss',
        save_weights_only=True,
        save_best_only=True,
        period=3)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=3,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=10,
                                   verbose=1)

    val_split = 0.1
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_val = int(len(lines) * val_split)
    num_train = len(lines) - num_val

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    if True:
        # perform bottleneck training
        if not os.path.isfile("bottlenecks.npz"):
            print("calculating bottlenecks")
            batch_size = 8
            bottlenecks = bottleneck_model.predict_generator(
                data_generator_wrapper(lines,
                                       batch_size,
                                       input_shape,
                                       anchors,
                                       num_classes,
                                       random=False,
                                       verbose=True),
                steps=(len(lines) // batch_size) + 1,
                max_queue_size=1)
            np.savez("bottlenecks.npz",
                     bot0=bottlenecks[0],
                     bot1=bottlenecks[1],
                     bot2=bottlenecks[2])

        # load bottleneck features from file
        dict_bot = np.load("bottlenecks.npz")
        bottlenecks_train = [
            dict_bot["bot0"][:num_train], dict_bot["bot1"][:num_train],
            dict_bot["bot2"][:num_train]
        ]
        bottlenecks_val = [
            dict_bot["bot0"][num_train:], dict_bot["bot1"][num_train:],
            dict_bot["bot2"][num_train:]
        ]

        # train last layers with fixed bottleneck features
        batch_size = 8
        print("Training last layers with bottleneck features")
        print('with {} samples, val on {} samples and batch size {}.'.format(
            num_train, num_val, batch_size))
        last_layer_model.compile(optimizer='adam',
                                 loss={
                                     'yolo_loss': lambda y_true, y_pred: y_pred
                                 })
        last_layer_model.fit_generator(
            bottleneck_generator(lines[:num_train], batch_size, input_shape,
                                 anchors, num_classes, bottlenecks_train),
            steps_per_epoch=max(1, num_train // batch_size),
            validation_data=bottleneck_generator(lines[num_train:], batch_size,
                                                 input_shape, anchors,
                                                 num_classes, bottlenecks_val),
            validation_steps=max(1, num_val // batch_size),
            epochs=30,
            initial_epoch=0,
            max_queue_size=1)
        model.save_weights(log_dir + 'trained_weights_stage_0.h5')

        # train last layers with random augmented data
        model.compile(
            optimizer=Adam(lr=1e-3),
            loss={
                # use custom yolo_loss Lambda layer.
                'yolo_loss': lambda y_true, y_pred: y_pred
            })
        batch_size = 16
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        model.fit_generator(data_generator_wrapper(lines[:num_train],
                                                   batch_size, input_shape,
                                                   anchors, num_classes),
                            steps_per_epoch=max(1, num_train // batch_size),
                            validation_data=data_generator_wrapper(
                                lines[num_train:], batch_size, input_shape,
                                anchors, num_classes),
                            validation_steps=max(1, num_val // batch_size),
                            epochs=50,
                            initial_epoch=0,
                            callbacks=[logging, checkpoint])
        model.save_weights(log_dir + 'trained_weights_stage_1.h5')

    # Unfreeze and continue training, to fine-tune.
    # Train longer if the result is not good.
    if True:
        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        model.compile(optimizer=Adam(lr=1e-4),
                      loss={
                          'yolo_loss': lambda y_true, y_pred: y_pred
                      })  # recompile to apply the change
        print('Unfreeze all of the layers.')

        batch_size = 4  # note that more GPU memory is required after unfreezing the body
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        model.fit_generator(
            data_generator_wrapper(lines[:num_train], batch_size, input_shape,
                                   anchors, num_classes),
            steps_per_epoch=max(1, num_train // batch_size),
            validation_data=data_generator_wrapper(lines[num_train:],
                                                   batch_size, input_shape,
                                                   anchors, num_classes),
            validation_steps=max(1, num_val // batch_size),
            epochs=100,
            initial_epoch=50,
            callbacks=[logging, checkpoint, reduce_lr, early_stopping])
        model.save_weights(log_dir + 'trained_weights_final.h5')
Ejemplo n.º 26
0
            print('Loading weights from file ...')
            model.load_weights(args.weights)
    except IOError:
        print("No model found")

    checkpointer = ModelCheckpoint(os.path.join(
        args.output, 'weights.{epoch:04d}-{val_loss:.3f}.hdf5'),
                                   monitor='val_loss',
                                   verbose=0,
                                   save_best_only=True,
                                   save_weights_only=False,
                                   mode='auto',
                                   period=1)
    # early_stop = EarlyStopping(monitor='val_loss', patience=50, verbose=0, mode='auto')
    logger = CSVLogger(filename=os.path.join(args.output, 'history.csv'))

    board = TensorBoard(log_dir=args.output,
                        histogram_freq=0,
                        write_graph=True,
                        write_images=True)

    history = model.fit_generator(
        generate_thunderhill_batches(genAll(args.dataset), args.batch),
        epochs=args.epoch,
        steps_per_epoch=50,
        validation_data=generate_thunderhill_batches(genSim001(args.dataset),
                                                     args.batch),
        validation_steps=5,
        callbacks=[checkpointer, logger, board]  #, early_stop]
    )
Ejemplo n.º 27
0
def get_tb_cb(modelName):
    return TensorBoard(log_dir='/tmp/tflearn_logs/keras-' + modelName + '-' +
                       date.today().isoformat() + '-' + str(time.time()),
                       histogram_freq=10,
                       write_graph=True,
                       write_images=False)
def train(data_type,
          seq_length,
          model,
          saved_model=None,
          concat=False,
          class_limit=None,
          image_shape=None,
          load_to_memory=True):
    # Set variables.
    nb_epoch = 1000000
    batch_size = 32

    # Helper: Save the model.
    checkpointer = ModelCheckpoint(                                             #This is for writing out the logs 
        filepath='./data/checkpoints/' + model + '-' + data_type + \
            '.{epoch:03d}-{val_loss:.3f}.hdf5',
        verbose=1,
        save_best_only=True)

    # Helper: TensorBoard
    tb = TensorBoard(log_dir='./data/logs')

    # Helper: Stop when we stop learning.
    early_stopper = EarlyStopping(
        patience=100000)  #this number of epoches with no impovement

    # Helper: Save results.
    timestamp = time.time()
    csv_logger = CSVLogger('./data/logs/' + model + '-' + 'training-' + \
        str(timestamp) + '.log')

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:
        data = DataSet(seq_length=seq_length,
                       class_limit=class_limit,
                       image_shape=image_shape)

    # Get samples per epoch.
    # Multiply by 0.7 to attempt to guess how much of data.data is the train set.

    steps_per_epoch = (len(data.data) * 0.7) // batch_size
    print("Iterations per epoach", steps_per_epoch)

    if load_to_memory:
        # Get data.

        X, y = data.get_all_sequences_in_memory('train', data_type, concat)
        X_test, y_test = data.get_all_sequences_in_memory(
            'test', data_type, concat)
    else:
        # Get generators.
        generator = data.frame_generator(batch_size, 'train', data_type,
                                         concat)
        val_generator = data.frame_generator(batch_size, 'test', data_type,
                                             concat)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length,
                        saved_model)  #object for the architecture we need
    print(rm)
    # Fit!
    if load_to_memory:
        # Use standard fit.
        rm.model.fit(X,
                     y,
                     batch_size=batch_size,
                     validation_data=(X_test, y_test),
                     verbose=1,
                     callbacks=[tb, early_stopper, csv_logger],
                     epochs=nb_epoch)
Ejemplo n.º 29
0
f1 = TimeDistributed(Flatten())(p2)
d1 = TimeDistributed(Dense(48))(f1)
d2 = TimeDistributed(Dense(10))(d1)

f2 = Flatten()(d1)
d3 = Dense(units=100, activation='sigmoid')(f2)
d4 = Dense(units=10, activation='sigmoid')(d3)
d5 = Dense(units=1, activation='sigmoid')(d4)

model = Model(inpTensor, d5)

model.compile(loss='mean_squared_logarithmic_error',
              optimizer='adam',
              metrics=['accuracy'])

tensorboard = TensorBoard(log_dir="logs/{}".format(time()))

filepath = "weights.best.h5"

callbacks_list = [tensorboard]
# Train model on dataset
model.fit_generator(generator=training_generator,
                    validation_data=validation_generator,
                    use_multiprocessing=True,
                    workers=8,
                    epochs=50,
                    callbacks=callbacks_list)

model.save(filepath)

X, y = validation_generator.__getitem__(0)
Ejemplo n.º 30
0
    model.save('/scratch/li.baol/checkpoint_test/' + job_name + '_' + str(current_epoch) + '.h5')
    print ('(SIGTERM) terminating the process')

    save_time = int(time.time() - save_start)
    message = job_name + ' save ' + str(save_time)
    send_signal.send(args.node, 10002, message)

    sys.exit()

signal.signal(signal.SIGTERM, terminateProcess)

#################################################################################

logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name

tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')

class PrintEpoch(keras.callbacks.Callback):
    def on_epoch_begin(self, epoch, logs=None):
        global current_epoch 
        #remaining_epochs = epochs - epoch
        current_epoch = epoch
        print('current epoch ' + str(current_epoch))
        global epoch_begin_time
        epoch_begin_time = time.time()

my_callback = PrintEpoch()

callbacks = [tensorboard_callback, my_callback]

load_time = int(time.time() - load_start)
 def on_epoch_end(self, epoch, logs=None):
     TensorBoardEmbeddingMixin.on_epoch_end(self, epoch)
     TensorBoard.on_epoch_end(self, epoch, logs)
Ejemplo n.º 32
0
from etp import etp
from keras import backend as K

# In[2]:

HIDDEN_SIZE = 1024
MAX_NODES = 100
MAX_VC_DIM = 10

# In[3]:

NAME = "double_lstm_encdec_{}".format(str(datetime.datetime.now()))

# In[4]:

tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))

# In[5]:

data = np.load('../../datasets/2019-05-29 14:16:06.689081_100.npy')

# In[6]:

X, y, phy_coordinates = data[0], data[1], data[2]

# In[7]:

X.shape

# In[8]: