Ejemplo n.º 1
0
def train_and_predict(data_path, data_filename, batch_size, n_epoch):
    """
    Create a model, load the data, and train it.
    """
    """
    Step 1: Load the data
    """
    hdf5_filename = os.path.join(data_path, data_filename)
    print("-" * 30)
    print("Loading the data from HDF5 file ...")
    print("-" * 30)

    imgs_train, msks_train, imgs_validation, msks_validation = \
        load_data(hdf5_filename)

    print("-" * 30)
    print("Creating and compiling model ...")
    print("-" * 30)
    """
    Step 2: Define the model
    """
    model = load_model(imgs_train.shape, msks_train.shape)

    model_filename, model_callbacks = get_callbacks()
    """
    Step 3: Train the model on the data
    """
    print("-" * 30)
    print("Fitting model with training data ...")
    print("-" * 30)

    history = model.fit(imgs_train,
                        msks_train,
                        batch_size=batch_size,
                        epochs=n_epoch,
                        validation_data=(imgs_validation, msks_validation),
                        verbose=1,
                        shuffle="batch",
                        callbacks=model_callbacks)

    # Append training log
    # with open("training.log","a+") as fp:
    #     fp.write("{}: {}\n".format(datetime.datetime.now(),
    #                              history.history["val_dice_coef"]))
    """
    Step 4: Evaluate the best model
    """
    print("-" * 30)
    print("Loading the best trained model ...")
    print("-" * 30)

    model = evaluate_model(model_filename, imgs_validation, msks_validation)
    """
    Step 5: Save the best model for inference
    """

    print("-" * 30)
    print("Saving the model for inference ...")
    print("-" * 30)
    save_inference_model(model, imgs_train.shape, msks_train.shape)
Ejemplo n.º 2
0
def set_up_model(model_name, input_shape, model_dir):
    if not os.path.isdir(model_dir):
        logger.info("Create directory {}".format(model_dir))
        os.makedirs(model_dir)

    model = get_model(input_shape, model_name, nclass=2)

    # callbacks
    callbacks = get_callbacks(os.path.join(model_dir, model_name))

    return model, callbacks
Ejemplo n.º 3
0
def train_model(train_filelist, config, classes, rare_classes):
    batch_size = config['train_params']['batch_size']
    different_classes_per_batch = config['train_params']['different_classes_per_batch']
    nb_epoch = config['train_params']['num_epochs']
    steps_per_epoch = config['train_params']['steps_per_epoch']
    img_size = config['img']['img_size']

    model = prepare_model(config)
    callbacks = get_callbacks(config)
    model.fit_generator(data_generator(train_filelist, classes, rare_classes, img_size, batch_size, different_classes_per_batch),
                        steps_per_epoch=steps_per_epoch, epochs=nb_epoch, callbacks=callbacks)
    return model
Ejemplo n.º 4
0
def train_model():
    arguments = parcero()
    (x_train, y_train), (x_test, y_test) = dataprep.get_and_pad_imdb_dataset(vocab_size=arguments.vocab_size,
                                                                            maxlen=arguments.max_seq_len)
    imdb_word_index = dataprep.get_imdb_word_index()
    max_index_value = max(imdb_word_index.values()) 

    classifier = model.get_model(vocab_size=max_index_value)
    callbacks = model.get_callbacks(arguments.job_dir)

    history = classifier.fit(x_train, y_train, epochs=arguments.epochs, batch_size=arguments.batch_size,
                            validation_data=(x_test, y_test), validation_steps=20,
                            callbacks=callbacks)
Ejemplo n.º 5
0
def train_and_predict(data_path, data_filename, batch_size, n_epoch):
    """
    Create a model, load the data, and train it.
    """
    """
    Step 1: Load the data
    """
    hdf5_filename = os.path.join(data_path, data_filename)
    print("-" * 30)
    print("Loading the data from HDF5 file ...")
    print("-" * 30)

    imgs_train, msks_train, imgs_validation, msks_validation = \
        load_data(hdf5_filename, args.batch_size,
                  [args.crop_dim, args.crop_dim])

    print("-" * 30)
    print("Creating and compiling model ...")
    print("-" * 30)
    """
    Step 2: Define the model
    """
    model = load_model(imgs_train.shape, msks_train.shape)

    model_filename, model_callbacks = get_callbacks()
    """
    Step 3: Train the model on the data
    """
    print("-" * 30)
    print("Fitting model with training data ...")
    print("-" * 30)

    model.fit(imgs_train,
              msks_train,
              batch_size=batch_size,
              epochs=n_epoch,
              validation_data=(imgs_validation, msks_validation),
              verbose=1,
              shuffle="batch",
              callbacks=model_callbacks)
    """
    Step 4: Evaluate the best model
    """
    print("-" * 30)
    print("Loading the best trained model ...")
    print("-" * 30)

    model = evaluate_model(model_filename, imgs_validation, msks_validation)
Ejemplo n.º 6
0
def train():
    generator_train = data_generator(train_tfrecord_dir, "train")
    generator_test = data_generator(test_tfrecord_dir, "test")
    images, masks = generator_train.__next__()
    # save_img(images, masks)

    input_shape = (512, 512, 1)
    model = uent2d_model(input_shape=input_shape,
                         loss_function=keras.losses.binary_crossentropy)
    # 打印模型结构
    model.summary()

    # # 保存模型图
    # from keras.utils import plot_model
    # plot_model(model, to_file="model.png")

    tensorboard = keras.callbacks.TensorBoard(
        log_dir='./logs',  # log 目录
        histogram_freq=0,  # 按照何等频率(epoch)来计算直方图,0为不计算
        batch_size=5,  # 用多大量的数据计算直方图
        write_graph=True,  # 是否存储网络结构图
        write_grads=True,  # 是否可视化梯度直方图
        write_images=True,  # 是否可视化参数
        embeddings_freq=0,
        embeddings_layer_names=None,
        embeddings_metadata=None)
    if not os.path.exists("./ckpt"):
        os.mkdir("./ckpt")
    filepath = "./ckpt/{epoch:03d}-{val_loss:.4f}.h5"
    callbacks = get_callbacks(filepath)
    callbacks.append(tensorboard)
    model.fit_generator(generator_train,
                        validation_data=generator_test,
                        steps_per_epoch=steps_per_epoch,
                        callbacks=callbacks,
                        epochs=epochs,
                        verbose=1,
                        validation_steps=1)
    model.save("./ckpt/save_model.h5")
    return None
TEST_SAMPLES = len(all_test_label)

num_steps_train = tf.math.ceil(float(TRAIN_SAMPLES) / BATCH_SIZE)
num_steps_val = tf.math.ceil(float(VALID_SAMPLES) / BATCH_SIZE)
num_steps_test = tf.math.ceil(float(TEST_SAMPLES) / BATCH_SIZE)

# outs = flor(input_size, 143, 0.001)
outs = minh_model(input_size, 143)

inputs, outputs, optimizer = outs
model = Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=optimizer, loss=ctc_loss_lambda_func)
print(model.summary())

checkpoint = './models/checkpoint_1.hdf5'
callback = get_callbacks(checkpoint)

start_time = datetime.datetime.now()

history = model.fit(train_ds,
                    steps_per_epoch=num_steps_train,
                    epochs=20,
                    validation_data=valid_ds,
                    callback=callback,
                    validation_steps=num_steps_val)

total_time = datetime.datetime.now() - start_time

loss = history.history['loss']
val_loss = history.history['val_loss']
Ejemplo n.º 8
0
    parser.add_argument("--train", action="store_true", default=False)
    parser.add_argument("--test", action="store_true", default=False)
    parser.add_argument("--path", type=str,
                        required=False)  # path to test data
    args = parser.parse_args()

    # ------------- TRAIN -------------
    if args.train:
        train_ds, num_steps_train, _ = build_dataset('train',
                                                     cache=True,
                                                     augment=True)
        test_ds, num_steps_val, _ = build_dataset('test', training=False)
        model = mb.build_model(input_size=INPUT_SIZE,
                               d_model=vocab_size + 1,
                               learning_rate=0.001)
        callbacks = mb.get_callbacks('weights_1.hdf5', 'val_loss', 1)
        batch_stats_callback = mb.CollectBatchStats()
        start_time = datetime.datetime.now()

        h = model.fit(train_ds,
                      steps_per_epoch=num_steps_train,
                      epochs=100,
                      validation_data=test_ds,
                      validation_steps=num_steps_val,
                      callbacks=callbacks)

        total_time = datetime.datetime.now() - start_time

        loss = h.history['loss']
        val_loss = h.history['val_loss']
        min_val_loss = min(val_loss)