예제 #1
0
def create_and_fit_model():
    input_shape = (CONFIG.IMAGE_TARGET_HEIGHT, CONFIG.IMAGE_TARGET_WIDTH, 3)
    model = create_cnn(input_shape, dropout=True)
    model.summary()
    logger.info(len(model.trainable_weights))

    # Add checkpoint callback.
    #best_model_path = os.path.join('validation','best_model.h5')
    best_model_path = str(DATA_DIR / f'outputs/{MODEL_CKPT_FILENAME}')
    checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=best_model_path,
        monitor="val_loss",
        save_best_only=True,
        verbose=1)
    dataset_batches = dataset_training.batch(CONFIG.BATCH_SIZE)

    training_callbacks = [
        AzureLogCallback(run),
        create_tensorboard_callback(),
        checkpoint_callback,
    ]
    if getattr(CONFIG, 'USE_WANDB', False):
        setup_wandb()
        wandb.init(project="ml-project", entity="cgm-team")
        wandb.config.update(CONFIG)
        training_callbacks.append(
            WandbCallback(log_weights=True,
                          log_gradients=True,
                          training_data=dataset_batches))

    optimizer = tf.keras.optimizers.RMSprop(learning_rate=CONFIG.LEARNING_RATE)

    # Compile the model.
    model.compile(optimizer=optimizer,
                  loss="binary_crossentropy",
                  metrics=["accuracy"])
    # Train the model.
    model.fit(dataset_training.batch(CONFIG.BATCH_SIZE),
              validation_data=dataset_validation.batch(CONFIG.BATCH_SIZE),
              epochs=CONFIG.EPOCHS,
              callbacks=training_callbacks,
              verbose=2)

    #  function use to tune the top convolution layer
    set_trainable_below_layers('block14_sepconv1', model)

    model.fit(dataset_training.batch(CONFIG.BATCH_SIZE),
              validation_data=dataset_validation.batch(CONFIG.BATCH_SIZE),
              epochs=CONFIG.TUNE_EPOCHS,
              callbacks=training_callbacks,
              verbose=2)
예제 #2
0
def train_network(file = 'input/cnews.train.txt'):
    """训练部分
    """
    acc_value = 0.0   # 验证集正确率
    iterations = 0    # 迭代次数

    loss, optimizer, acc = model.create_cnn()

    # 用来保存进度
    saver = tf.train.Saver()

    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    # 若有上次的进度,则加载
    if os.path.isfile(SAVER_DIC+'.index'):
        saver.restore(sess, SAVER_DIC)

    for i in range(TOTAL_ITERATIONS):
        view_bar('训练进度:', i, TOTAL_ITERATIONS)
        x_data, y_data = model.get_some_date(file, model.BATCH_SIZE)    # 取批次数据
        feed_dict = {model.x: x_data, model.y_: y_data, model.dropout_keep_prob: 0.5}
        _, loss_test_value, acc_test_value = sess.run([optimizer, loss, acc], feed_dict=feed_dict)
        # 输出 train的 loss 和正确率
        with open(TRAIN_LOG, 'a') as f:
            f.write(str(i) + ',' + str(loss_test_value) + ',' + str(acc_test_value) + '\n')

        # 在验证集中测试
        if i%VALID_ITERATION == 0:
            model.USE_L2 = False  # 关闭L2正则化
            x_valid_data, y_valid_data = model.get_some_date(VALID_DIC, model.BATCH_SIZE)
            feed_dict = {model.x: x_valid_data, model.y_: y_valid_data, model.dropout_keep_prob: 1}
            _, loss_valid_value, acc_valid_value = sess.run([optimizer, loss, acc], feed_dict=feed_dict)
            if acc_valid_value > acc_value:
                acc_value = acc_valid_value
                saver.save(sess, SAVER_DIC)
                iterations = 0
                # 输出 valid的 loss 和正确率
                with open(VALID_LOG, 'a') as f:
                    f.write(str(i) + ',' + str(loss_valid_value) + ',' + str(acc_valid_value) + '\n')
            else:
                iterations += 1
                # 正确率久不提升,则停止
                if iterations >= DELAY_NUM:
                    break
            model.USE_L2 = True  # 启用L2正则化
예제 #3
0
def test(file='input/cnews.test.txt'):
    """测试部分
    """
    loss, optimizer, acc = model.create_cnn()

    # 用来读取参数
    saver = tf.train.Saver()

    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    # 若进度存在,则读取学习到的参数
    if os.path.isfile(SAVER_DIC + '.index'):
        saver.restore(sess, SAVER_DIC)

    for i in range(ITERATIONS):
        view_bar('测试进度:', i, ITERATIONS)
        x_test_data, y_test_data = model.get_some_date(file, model.BATCH_SIZE)
        feed_dict = {model.x: x_test_data, model.y_: y_test_data, model.dropout_keep_prob: 1}
        loss_test_value, acc_test_value = sess.run([loss, acc], feed_dict=feed_dict)
        # 输出 test loss 和正确率
        with open(TEST_LOG, 'a') as f:
            f.write(str(i) + ',' + str(loss_test_value) + ',' + str(acc_test_value) + '\n')
예제 #4
0
from keras.optimizers import Adam, SGD
import numpy as np
import random
import os
import model as model_arch
from keras.callbacks import ReduceLROnPlateau, TensorBoard, ModelCheckpoint, EarlyStopping
from ImageBatchGenerator import ImageBatchGenerator
from config import EPOCHS, INIT_LR, BS, IMG_HEIGTH_WIDTH, NUM_MATCH_NOT_MATCH_PAIR_CNT, TRAIN_IMAGE_PATH, TEST_IMAGE_PATH

if __name__ == '__main__':
    random.seed(42)

    print("[INFO] compiling model...")
    model = model_arch.create_cnn(IMG_HEIGTH_WIDTH, IMG_HEIGTH_WIDTH, 3)

    # sgd = SGD(lr=0.01, clipvalue=0.5)
    opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
    model.compile(loss="binary_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    # train the network
    print("[INFO] training network...")

    reduce_lr_on_plateau = ReduceLROnPlateau(monitor='val_acc',
                                             factor=0.2,
                                             patience=2,
                                             verbose=0,
                                             mode='auto',
                                             min_delta=0.0001,
                                             cooldown=0,
예제 #5
0
파일: train.py 프로젝트: pankaja0285/cgm-ml
# Create dataset for activation
paths = paths_activate
dataset = tf.data.Dataset.from_tensor_slices(paths)
dataset_norm = dataset.map(
    lambda path: tf_load_pickle(path, CONFIG.NORMALIZATION_VALUE))
dataset_norm = dataset_norm.cache()
dataset_norm = dataset_norm.prefetch(tf.data.experimental.AUTOTUNE)
dataset_activation = dataset_norm
del dataset_norm

# Note: Now the datasets are prepared.

# Create the model.
input_shape = (CONFIG.IMAGE_TARGET_HEIGHT, CONFIG.IMAGE_TARGET_WIDTH, 1)
model = create_cnn(input_shape, dropout=True)
model.summary()

best_model_path = str(DATA_DIR / f'outputs/{MODEL_CKPT_FILENAME}')
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
    filepath=best_model_path,
    monitor="val_loss",
    save_best_only=True,
    verbose=1)
training_callbacks = [
    AzureLogCallback(run),
    create_tensorboard_callback(),
    checkpoint_callback,
]

optimizer = tf.keras.optimizers.Nadam(learning_rate=CONFIG.LEARNING_RATE)
예제 #6
0
labels = lb.fit_transform(labels)
# 0 is match and 1 becomes not_match

# partition the data into training and testing splits using 80% of
# the data for training and the remaining 80% for testing
print(len(data_list1))
(trainX, testX, trainXX, testXX, trainY, testY) = train_test_split(data_list1,data_list2,labels, test_size=0.2, random_state=42)

# construct the image generator for data augmentation
aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1,
	height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,
	horizontal_flip=True, fill_mode="nearest")

# initialize the model
print("[INFO] compiling model...")
cnn1 = model.create_cnn(56, 56, 3)
cnn2 = model.create_cnn(56, 56, 3)

# combining tensor output of the two cnn models
combinedInput = concatenate([cnn1.output, cnn2.output])


x = Dense(16, activation="relu")(combinedInput)

# binary predictor
y = Dense(1, activation="sigmoid")(x)

model = Model(inputs=[cnn1.input, cnn2.input], outputs=y)

opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt,	metrics=["accuracy"])
예제 #7
0
    def prepare_nn(self):
        nn_model = create_cnn(self.input_shape, self.n_classes, self.loss,
                              self.optimizer, self.summary)

        return nn_model
def gen_flow_for_two_inputs(X1, X2, y):
    genX1 = image_generator.flow(X1, y, batch_size=BS, seed=666)
    genX2 = image_generator.flow(X2, y, batch_size=BS, seed=666)
    while True:
        X1i = genX1.next()
        X2i = genX2.next()
        #Assert arrays are equal - this was for peace of mind, but slows down training
        #np.testing.assert_array_equal(X1i[0],X2i[0])
        yield [X1i[0], X2i[0]], X1i[1]


gen_flow = gen_flow_for_two_inputs(trainX, trainXX, trainY)

# initialize the model
print("[INFO] compiling model...")
cnn1 = model.create_cnn(img_heigth_width, img_heigth_width, 3)
cnn2 = model.create_cnn(img_heigth_width, img_heigth_width, 3)

# combining tensor output of the two cnn models
combinedInput = concatenate([cnn1.output, cnn2.output])

x = Dense(16, activation="relu")(combinedInput)

# binary predictor
y = Dense(1, activation="sigmoid")(x)

model = Model(inputs=[cnn1.input, cnn2.input], outputs=y)
sgd = SGD(lr=0.01, clipvalue=0.5)
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])