Exemplo n.º 1
0
def train(model = model(),checkpoint = "latest.hdf5"):
    dataset = build_dataset()
    callback = TensorBoard("_tensor_board")
    callback.set_model(model)
    labels = interested_words
    for i in range(10000):
        save_model(model, "checkpoint.model")
        print("Chunk "+str(i)+" of 10000...")
        X,Y = get_batch(dataset,batchsize=1000)
        for j in range(10):
            logs = model.train_on_batch(np.array(X),np.array(Y))
            print("loss:",logs)
            write_log(callback, ["training loss"], [logs], i*10+j)
        X,Y = get_batch(dataset,batchsize=100,batchtype="test")
        results = model.predict(X)
        accuracy = 0
        for result,actual in zip(results,Y):
            #print("running test")
            x =np.argmax(result)
            j =np.argmax(actual)
            try:
                print("expected "+labels[j]," got "+labels[x])
            except:
                pass
            if x == j: accuracy += 1
        write_log(callback,["test accuracy"],[accuracy],i)
Exemplo n.º 2
0
    os.mkdir("logs")
if not os.path.isdir("data"):
    os.mkdir("data")

data = load_data(ticker)

data["df"].to_csv(os.path.join("data", f"{ticker}_{date}.csv"))
model_name = f"{date}_{ticker}"
model = create_model(100)

# Tensorflow callbacks
checkpoint = ModelCheckpoint(os.path.join("results", model_name + ".h5"),
                             save_weights_only=True,
                             save_best_only=True,
                             verbose=1)
tensorboard = TensorBoard(log_dir=os.path.join("logs", model_name))

history = model.fit(data["x_train"],
                    data["y_train"],
                    batch_size=64,
                    epochs=400,
                    validation_data=(data["x_test"], data["y_test"]),
                    callbacks=[checkpoint, tensorboard],
                    verbose=1)

model.save(os.path.join("results", model_name) + ".h5")
model_path = os.path.join("results", model_name) + ".h5"
model.load_weights(model_path)

# evaluate the model
mse, mae = model.evaluate(data["x_test"], data["y_test"], verbose=0)
def train(args, model, input_shape):
    log_dir = 'logs'

    # callbacks for training process
    checkpoint = ModelCheckpoint(os.path.join(
        log_dir,
        'ep{epoch:03d}-val_loss{val_loss:.3f}-val_acc{val_acc:.3f}-val_top_k_categorical_accuracy{val_top_k_categorical_accuracy:.3f}.h5'
    ),
                                 monitor='val_acc',
                                 mode='max',
                                 verbose=1,
                                 save_weights_only=False,
                                 save_best_only=True,
                                 period=1)
    logging = TensorBoard(log_dir=log_dir,
                          histogram_freq=0,
                          write_graph=False,
                          write_grads=False,
                          write_images=False,
                          update_freq='batch')
    terminate_on_nan = TerminateOnNaN()
    learn_rates = [0.05, 0.01, 0.005, 0.001, 0.0005]
    lr_scheduler = LearningRateScheduler(
        lambda epoch: learn_rates[epoch // 30])

    # data generator
    train_datagen = ImageDataGenerator(
        preprocessing_function=preprocess,
        zoom_range=0.25,
        width_shift_range=0.05,
        height_shift_range=0.05,
        brightness_range=[0.5, 1.5],
        rotation_range=30,
        shear_range=0.2,
        channel_shift_range=0.1,
        #rescale=1./255,
        vertical_flip=True,
        horizontal_flip=True)

    test_datagen = ImageDataGenerator(preprocessing_function=preprocess)

    train_generator = train_datagen.flow_from_directory(
        args.train_data_path,
        target_size=input_shape,
        batch_size=args.batch_size)

    test_generator = test_datagen.flow_from_directory(
        args.val_data_path,
        target_size=input_shape,
        batch_size=args.batch_size)

    # get optimizer
    optimizer = get_optimizer(args.optim_type, args.learning_rate)

    # start training
    model.compile(optimizer=optimizer,
                  metrics=['accuracy', 'top_k_categorical_accuracy'],
                  loss='categorical_crossentropy')

    print('Train on {} samples, val on {} samples, with batch size {}.'.format(
        train_generator.samples, test_generator.samples, args.batch_size))
    model.fit_generator(
        train_generator,
        steps_per_epoch=train_generator.samples // args.batch_size,
        epochs=args.total_epoch,
        workers=cpu_count() -
        1,  #Try to parallized feeding image data but leave one cpu core idle
        initial_epoch=args.init_epoch,
        use_multiprocessing=True,
        max_queue_size=10,
        validation_data=test_generator,
        validation_steps=test_generator.samples // args.batch_size,
        callbacks=[logging, checkpoint, lr_scheduler, terminate_on_nan])

    # Finally store model
    model.save(os.path.join(log_dir, 'trained_final.h5'))
Exemplo n.º 4
0
train_data = train_data.shuffle(batch_size*4).padded_batch(batch_size, padded_shapes=data_shapes, padding_values=padding_values)
val_data = val_data.padded_batch(batch_size, padded_shapes=data_shapes, padding_values=padding_values)
#
ssd_model = get_model(hyper_params)
ssd_custom_losses = CustomLoss(hyper_params["neg_pos_ratio"], hyper_params["loc_loss_alpha"])
ssd_model.compile(optimizer=Adam(learning_rate=1e-3),
                  loss=[ssd_custom_losses.loc_loss_fn, ssd_custom_losses.conf_loss_fn])
init_model(ssd_model)
#
ssd_model_path = io_utils.get_model_path(backbone)
if load_weights:
    ssd_model.load_weights(ssd_model_path)
ssd_log_path = io_utils.get_log_path(backbone)
# We calculate prior boxes for one time and use it for all operations because of the all images are the same sizes
prior_boxes = bbox_utils.generate_prior_boxes(hyper_params["feature_map_shapes"], hyper_params["aspect_ratios"])
ssd_train_feed = train_utils.generator(train_data, prior_boxes, hyper_params)
ssd_val_feed = train_utils.generator(val_data, prior_boxes, hyper_params)

checkpoint_callback = ModelCheckpoint(ssd_model_path, monitor="val_loss", save_best_only=True, save_weights_only=True)
tensorboard_callback = TensorBoard(log_dir=ssd_log_path)
learning_rate_callback = LearningRateScheduler(train_utils.scheduler, verbose=0)

step_size_train = train_utils.get_step_size(train_total_items, batch_size)
step_size_val = train_utils.get_step_size(val_total_items, batch_size)
ssd_model.fit(ssd_train_feed,
              steps_per_epoch=step_size_train,
              validation_data=ssd_val_feed,
              validation_steps=step_size_val,
              epochs=epochs,
              callbacks=[checkpoint_callback, tensorboard_callback, learning_rate_callback])
Exemplo n.º 5
0
from tensorflow.keras.callbacks import TensorBoard
import
#Training Settings

validation_split = 0.2 # How much of the dataset is reserved for testing # Values range from 0 to 1
batch_size = 32
epochs = 5
verbose = 1
callbacks = [TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=batch_size, write_graph=True)]
random_dataset_loc=['testing/dataset/main_input.npy','testing/dataset/main_output.npy', 'testing/dataset/aux_input.npy','testing/dataset/aux_output.npy']
dataset_loc=['dataset/main_input.npy','dataset/main_output.npy', 'dataset/aux_input.npy','dataset/aux_output.npy']
random_save_dir = ('testing/models')
model_name = 'Haggis_Aero_Img_Rec.h5'
Exemplo n.º 6
0
              loss="sparse_categorical_crossentropy",
              metrics=["acc"])
stop = EarlyStopping(monitor='val_loss',
                     patience=20,
                     restore_best_weights=True,
                     verbose=1)
lr = ReduceLROnPlateau(monitor='val_loss', vactor=0.5, patience=10, verbose=1)
mcpath = 'C:/nmb/nmb_data/h5/speechvgg_mels_del4.h5'
mc = ModelCheckpoint(mcpath,
                     monitor='val_loss',
                     verbose=1,
                     save_best_only=True,
                     save_weights_only=True)
tb = TensorBoard(log_dir='C:/nmb/nmb_data/graph/' +
                 start.strftime("%Y%m%d-%H%M%S") + "/",
                 histogram_freq=0,
                 write_graph=True,
                 write_images=True)
history = model.fit(x_train,
                    y_train,
                    epochs=5000,
                    batch_size=8,
                    validation_split=0.2,
                    callbacks=[stop, lr, mc, tb])

# --------------------------------------
# 평가, 예측
model.load_weights('C:/nmb/nmb_data/h5/speechvgg_mels_del4.h5')

# _loss, _acc, _f1score = model.evaluate(x_test, y_test, batch_size=8)
# print('loss: {:.4f}, accuracy: {:.4f}, f1score: {:.4f}'.format(_loss, _acc, _f1score))
Exemplo n.º 7
0
run_params = {
    'n_layers': 4,
    'pool': 'max',
    "layers_n_channels": [16, 32, 64, 128],
    'layers_n_non_lins': 2,
}
n_epochs = 300
run_id = f'unet_af{AF}_{int(time.time())}'
chkpt_path = f'checkpoints/{run_id}' + '-{epoch:02d}.hdf5'

chkpt_cback = ModelCheckpoint(chkpt_path, period=100)
log_dir = op.join('logs', run_id)
tboard_cback = TensorBoard(
    log_dir=log_dir,
    profile_batch=0,
    histogram_freq=0,
    write_graph=True,
    write_images=False,
)
tqdm_cb = TQDMCallback(metric_format="{name}: {value:e}")
tqdm_cb.on_train_batch_begin = tqdm_cb.on_batch_begin
tqdm_cb.on_train_batch_end = tqdm_cb.on_batch_end

model = unet(input_size=(320, 320, 1), lr=1e-3, **run_params)
print(model.summary())

model.fit_generator(
    train_gen,
    steps_per_epoch=n_volumes_train,
    epochs=n_epochs,
    validation_data=val_gen,
model.add(tf.contrib.keras.layers.Dense(100, activation='relu'))
# 連接 Fully Connected Layer,接著一層 Softmax 的 Activation 函數
model.add(tf.contrib.keras.layers.Dense(100, activation='relu'))
model.add(tf.contrib.keras.layers.Dense(100, activation='relu'))
# 連接 Fully Connected Layer,接著一層 Softmax 的 Activation 函數
model.add(
    tf.contrib.keras.layers.Dense(units=category, activation=tf.nn.softmax))

model.summary()

# 設定模型的 Loss 函數、Optimizer 以及用來判斷模型好壞的依據(metrics)
model.compile(loss=tf.contrib.keras.losses.categorical_crossentropy,
              optimizer=tf.contrib.keras.optimizers.Adadelta(),
              metrics=['accuracy'])

tensorboard = TensorBoard(log_dir="logs")
# 訓練模型

gen = tf.keras.preprocessing.image.ImageDataGenerator(rotation_range=8,
                                                      width_shift_range=0.08,
                                                      shear_range=0.3,
                                                      height_shift_range=0.08,
                                                      zoom_range=0.08)

train_generator = gen.flow(x_train, y_train2, batch_size=64)

# 訓練模型
""" 
history=model.fit(x_train, y_train2,
          batch_size=10000,
          epochs=400,
model.add(LSTM(128, input_shape=(train_x.shape[1:], ), activation="tanh"))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(Dense(32, activation="relu"))
model.add(Dropout(0.2))

model.add(Dense(2, activation='softmax'))

opt = tf.keras.optimizers.Adam(lr=1e-3, decay=1e-6)

model.compile(loss='sparse_categorical_crossentropy',
              optimizer=opt,
              metrics=["accuracy"])

tensorboard = TensorBoard(log_dir=f"logs/{NAME}")

filepath = "RNN_Final-{epoch:02d}-{val_acc:.3f}"
checkpoint = ModelCheckpoint("models/{}.model".format(filepath,
                                                      monitor='val_acc',
                                                      verbose=1,
                                                      save_best_only=True,
                                                      mode='max'))

history = model.fit(train_x,
                    train_y,
                    batch_size=BATCH_SIZE,
                    epochs=EPOCHS,
                    validation_data=(val_x, val_y),
                    callbacks=[tensorboard, checkpoint])
Exemplo n.º 10
0
def main():
    args = cmd_parser()

    physical_devices = tf.config.list_physical_devices('GPU')
    tf.config.set_visible_devices(physical_devices[args.gpu:], 'GPU')

    if_fast_run = False

    print(f"TensorFlow version: {tf.__version__}.")  # Keras backend
    print(f"Keras version: {keras.__version__}.")
    print("If in eager mode: ", tf.executing_eagerly())
    assert tf.__version__[0] == "2"

    # Prepare model
    n = 2  # order of ResNetv2, 2 or 6
    version = 2
    depth = model_depth(n, version)
    model_type = "two_conv2d_net"
    model_type = 'ResNet%dv%d' % (depth, version)  # "ResNet20v2"

    # or model_type = "keras.applications.ResNet50V2"
    model_type = "keras.applications.ResNet50V2"

    # data path
    competition_name = "dogs-vs-cats-redux-kernels-edition"
    data_dir = os.path.expanduser(f"~/.kaggle/competitions/{competition_name}")

    # experiment time
    date_time = datetime.now().strftime("%Y%m%d-%H%M%S")

    prefix = os.path.join("~", "Documents", "DeepLearningData",
                          competition_name)
    subfix = os.path.join(
        model_type, '-'.join((date_time, "pretrain", str(args.pretrain))))
    ckpt_dir = os.path.expanduser(os.path.join(prefix, "ckpts", subfix))
    log_dir = os.path.expanduser(os.path.join(prefix, "logs", subfix))
    makedir_exist_ok(ckpt_dir)
    makedir_exist_ok(log_dir)

    # Input parameters
    IMAGE_WIDTH = IMAGE_HEIGHT = 128
    image_size = (IMAGE_WIDTH, IMAGE_HEIGHT)
    IMAGE_CHANNELS = 3
    input_shape = (IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS)
    num_classes = 2

    # Data loaders
    train_generator, validation_generator = data_generators(
        data_dir, target_size=image_size, batch_size=args.batch_size)

    # Create model
    model = create_model(model_type,
                         input_shape,
                         num_classes,
                         pretrain=args.pretrain)

    # Compile model
    from tensorflow.keras.optimizers import Adam
    from tensorflow.keras.losses import BinaryCrossentropy
    from tensorflow.keras.metrics import Recall, Precision, TruePositives, FalsePositives, TrueNegatives, FalseNegatives, BinaryAccuracy, AUC
    metrics = [
        Recall(name='recall'),
        Precision(name='precision'),
        TruePositives(name='tp'),  # thresholds=0.5
        FalsePositives(name='fp'),
        TrueNegatives(name='tn'),
        FalseNegatives(name='fn'),
        BinaryAccuracy(name='accuracy'),
        # AUC0(name='auc_cat_0'),  # 以 cat 为 positive 的 AUC
        AUC(name='auc_dog_1')  # 以 dog 为 positive 的 AUC
    ]
    model.compile(loss=BinaryCrossentropy(),
                  optimizer=Adam(learning_rate=lr_schedule(args.start_epoch)),
                  metrics=metrics)

    # Resume training
    # model_ckpt_file = MODEL_CKPT
    # if os.path.exists(model_ckpt_file):
    #     print("Model ckpt found! Loading...:%s" % model_ckpt_file)
    #     model.load_weights(model_ckpt_file)

    # define callbacks
    from tensorflow.keras.callbacks import CSVLogger, LearningRateScheduler, TensorBoard, ModelCheckpoint
    # model_name = "%s.start-%d-epoch-{epoch:03d}-val_loss-{val_loss:.4f}.h5" % (
    #     model_type, args.start_epoch)
    model_name = "%s-epoch-{epoch:03d}-val_loss-{val_loss:.4f}.h5" % (
        model_type)
    # Prepare model model saving directory.
    filepath = os.path.join(ckpt_dir, model_name)
    checkpoint = ModelCheckpoint(filepath=filepath,
                                 monitor='val_loss',
                                 verbose=1)

    file_writer = tf.summary.create_file_writer(
        os.path.join(log_dir, "metrics"))  # custom scalars
    file_writer.set_as_default()
    csv_logger = CSVLogger(os.path.join(log_dir, "training.log.csv"),
                           append=True)
    tensorboard_callback = TensorBoard(log_dir, histogram_freq=1)
    lr_scheduler = LearningRateScheduler(lr_schedule, verbose=1)
    callbacks = [csv_logger, tensorboard_callback, lr_scheduler, checkpoint]

    # Fit model
    epochs = 3 if if_fast_run else args.epochs
    model.fit(
        train_generator,
        epochs=epochs,
        validation_data=validation_generator,
        callbacks=callbacks,
        initial_epoch=args.start_epoch,
        verbose=1  # 2 for notebook
    )
Exemplo n.º 11
0
                                          render=False)
else:
    env = football_env.create_environment(env_name='academy_run_to_score',
                                          representation='simple115',
                                          logdir='./tmp1/football',
                                          write_video=False,
                                          render=False)

state = env.reset()
state_dims = env.observation_space.shape
n_actions = env.action_space.n

dummy_n = np.zeros((1, 1, n_actions))
dummy_1 = np.zeros((1, 1, 1))

tensor_board = TensorBoard(log_dir='./logs')

if image_based:
    model_actor = get_model_actor_image(input_dims=state_dims,
                                        output_dims=n_actions)
    model_critic = get_model_critic_image(input_dims=state_dims)
else:
    model_actor = get_model_actor_simple(input_dims=state_dims,
                                         output_dims=n_actions)
    model_critic = get_model_critic_simple(input_dims=state_dims)

ppo_steps = 128
target_reached = False
best_reward = 0
iters = 0
max_iters = 100
Exemplo n.º 12
0
                            'anchors': anchors,
                            'num_classes': num_classes,
                            'ignore_thresh': 0.5,
                            'label_smoothing': label_smoothing
                        })(loss_input)

    model = Model([model_body.input, *y_true], model_loss)

    #-------------------------------------------------------------------------------#
    #   训练参数的设置
    #   logging表示tensorboard的保存地址
    #   checkpoint用于设置权值保存的细节,period用于修改多少epoch保存一次
    #   reduce_lr用于设置学习率下降的方式
    #   early_stopping用于设定早停,val_loss多次不下降自动结束训练,表示模型基本收敛
    #-------------------------------------------------------------------------------#
    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(
        log_dir + "/ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5",
        save_weights_only=True,
        save_best_only=False,
        period=1)
    early_stopping = EarlyStopping(min_delta=0, patience=10, verbose=1)

    #----------------------------------------------------------------------#
    #   验证集的划分在train.py代码里面进行
    #   2007_test.txt和2007_val.txt里面没有内容是正常的。训练不会使用到。
    #   当前划分方式下,验证集和训练集的比例为1:9
    #----------------------------------------------------------------------#
    val_split = 0.1
    with open(annotation_path) as f:
        lines = f.readlines()
Exemplo n.º 13
0
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

model = Sequential()
model.add(LSTM((10), input_shape=(row,1)))
model.add(Dense(100))
model.add(Dense(200))
model.add(Dense(100))
model.add(Dense(10, activation='softmax'))

model.summary()

model.compile(loss='categorical_crossentropy', metrics=['acc'], optimizer='adam')
ealystopping = EarlyStopping(monitor='loss',patience=20, mode='auto')
to_hist = TensorBoard(log_dir='grahp',write_graph=True, write_images=True, histogram_freq=0)
model.fit(x_train, y_train, epochs=300, batch_size=512, validation_split=0.2, callbacks=[ealystopping, to_hist])

loss, acc=model.evaluate(x_test, y_test, batch_size=512)

x_predict = x_test[20:30]
y_answer = y_test[20:30]


y_predict = model.predict(x_predict)

y_predict = np.argmax(y_predict, axis=1)
y_answer = np.argmax(y_answer, axis=1)

print("acc",acc)
print("loss",loss)
def train(data_dir,
          model_output_dir,
          epochs=100,
          name=None,
          batch_size=16,
          gpus=1,
          learning_rate=0.1,
          nb_slices=1,
          threshold=10.0,
          load_weights=None,
          initial_epoch=0,
          nb_layers_per_block=4,
          nb_blocks=4,
          nb_initial_filters=16,
          growth_rate=12,
          compression_rate=0.5,
          activation='relu',
          initializer='glorot_uniform',
          batch_norm=True):

    args = locals()

    # Set up dataset
    train_image_dir = os.path.join(data_dir, 'images/train')
    val_image_dir = os.path.join(data_dir, 'images/val')
    train_meta_file = os.path.join(data_dir, 'meta/train.csv')
    val_meta_file = os.path.join(data_dir, 'meta/val.csv')
    train_labels = pd.read_csv(train_meta_file)['ZOffset'].values
    val_labels = pd.read_csv(val_meta_file)['ZOffset'].values

    train_generator = SliceSelectionSequence(train_labels,
                                             train_image_dir,
                                             batch_size,
                                             1000,
                                             jitter=True,
                                             sigmoid_scale=threshold)
    val_generator = SliceSelectionSequence(val_labels,
                                           val_image_dir,
                                           batch_size,
                                           50,
                                           sigmoid_scale=threshold)

    # Directories and files to use
    if name is None:
        name = 'untitled_model_' + datetime.datetime.now().strftime(
            '%Y_%m_%d_%H_%M_%S')
    output_dir = os.path.join(model_output_dir, name)
    tflow_dir = os.path.join(output_dir, 'tensorboard_log')
    weights_path = os.path.join(output_dir,
                                'weights-{epoch:02d}-{val_loss:.4f}.hdf5')
    architecture_path = os.path.join(output_dir, 'architecture.json')
    tensorboard = TensorBoard(log_dir=tflow_dir,
                              histogram_freq=0,
                              write_graph=False,
                              write_images=False)

    if load_weights is None:
        os.mkdir(output_dir)
        os.mkdir(tflow_dir)

        args_path = os.path.join(output_dir, 'args.json')
        with open(args_path, 'w') as json_file:
            json.dump(args, json_file, indent=4)

        # Create the model
        print('Compiling model')
        with tf.device('/cpu:0'):
            model = DenseNet(img_dim=(256, 256, 1),
                             nb_layers_per_block=nb_layers_per_block,
                             nb_dense_block=nb_blocks,
                             growth_rate=growth_rate,
                             nb_initial_filters=nb_initial_filters,
                             compression_rate=compression_rate,
                             sigmoid_output_activation=True,
                             activation_type=activation,
                             initializer=initializer,
                             output_dimension=nb_slices,
                             batch_norm=batch_norm)

        # Save the architecture
        with open(architecture_path, 'w') as json_file:
            json_file.write(model.to_json())

    else:
        with open(architecture_path, 'r') as json_file:
            model = model_from_json(json_file.read())

        # Load the weights
        model.load_weights(load_weights)

    # Move to multi GPUs
    # Use multiple devices
    if gpus > 1:
        parallel_model = multi_gpu_model(model, gpus)
        model_checkpoint = MultiGPUModelCheckpoint(weights_path,
                                                   monitor='val_loss',
                                                   save_best_only=False)
    else:
        parallel_model = model
        model_checkpoint = ModelCheckpoint(weights_path,
                                           monitor='val_loss',
                                           save_best_only=False)

    # Set up the learning rate scheduler
    def lr_func(e):
        print("Learning Rate Update at Epoch", e)
        if e > 0.75 * epochs:
            return 0.01 * learning_rate
        elif e > 0.5 * epochs:
            return 0.1 * learning_rate
        else:
            return learning_rate

    lr_scheduler = LearningRateScheduler(lr_func)

    # Compile multi-gpu model
    loss = 'mean_absolute_error'
    parallel_model.compile(optimizer=Adam(lr=learning_rate), loss=loss)

    print('Starting training...')

    parallel_model.fit_generator(
        train_generator,
        epochs=epochs,
        shuffle=False,
        validation_data=val_generator,
        callbacks=[model_checkpoint, tensorboard, lr_scheduler],
        use_multiprocessing=True,
        workers=16,
        initial_epoch=initial_epoch)

    return model
Exemplo n.º 15
0
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)

model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))

model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer=RMSprop(),
              metrics=['accuracy'])

history = model.fit(x_train, y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=1,
                    validation_data=(x_test, y_test),
                    callbacks=[TensorBoard(".")])
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
Exemplo n.º 16
0
    bbox_util = BBoxUtility(NUM_CLASSES, priors)

    model.load_weights(model_path, by_name=True, skip_mismatch=True)

    # 0.1用于验证,0.9用于训练
    val_split = 0.1
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_val = int(len(lines) * val_split)
    num_train = len(lines) - num_val

    # 训练参数设置
    logging = TensorBoard(log_dir="logs")
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.5,
                                  patience=2,
                                  verbose=1)
    checkpoint = ModelCheckpoint(
        'logs/ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
        monitor='val_loss',
        save_weights_only=True,
        save_best_only=False,
        period=1)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=6,
                                   verbose=1)
Exemplo n.º 17
0
    def train(self):
        self.logger.info("DnnOptimizer::train")

        training_generator = FluctuationDataGenerator(
            self.partition['train'],
            data_dir=self.dirinput_train,
            **self.params)
        validation_generator = FluctuationDataGenerator(
            self.partition['validation'],
            data_dir=self.dirinput_test,
            **self.params)
        model = u_net(
            (self.grid_phi, self.grid_r, self.grid_z, self.dim_input),
            depth=self.depth,
            batchnorm=self.batch_normalization,
            pool_type=self.pooling,
            start_channels=self.filters,
            dropout=self.dropout)
        model.compile(loss=self.lossfun,
                      optimizer=Adam(lr=self.adamlr),
                      metrics=[self.metrics])  # Mean squared error

        model.summary()
        plot_model(model,
                   to_file='plots/model_%s_nEv%d.png' %
                   (self.suffix, self.train_events),
                   show_shapes=True,
                   show_layer_names=True)

        #log_dir = "logs/" + datetime.datetime.now(datetime.timezone.utc).strftime("%Y%m%d_%H%M%S")
        log_dir = 'logs/' + '%s_nEv%d' % (self.suffix, self.train_events)
        tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)

        model._get_distribution_strategy = lambda: None
        his = model.fit(training_generator,
                        validation_data=validation_generator,
                        use_multiprocessing=False,
                        epochs=self.epochs,
                        callbacks=[tensorboard_callback])

        plt.style.use("ggplot")
        plt.figure()
        plt.yscale('log')
        plt.plot(np.arange(0, self.epochs),
                 his.history["loss"],
                 label="train_loss")
        plt.plot(np.arange(0, self.epochs),
                 his.history["val_loss"],
                 label="val_loss")
        plt.plot(np.arange(0, self.epochs),
                 his.history[self.metrics],
                 label="train_" + self.metrics)
        plt.plot(np.arange(0, self.epochs),
                 his.history["val_" + self.metrics],
                 label="val_" + self.metrics)
        plt.title("Training Loss and Accuracy on Dataset")
        plt.xlabel("Epoch #")
        plt.ylabel("Loss/Accuracy")
        plt.legend(loc="lower left")
        plt.savefig("plots/plot_%s_nEv%d.png" %
                    (self.suffix, self.train_events))

        model_json = model.to_json()
        with open("%s/model_%s_nEv%d.json" % (self.dirmodel, self.suffix, self.train_events), "w") \
            as json_file:
            json_file.write(model_json)
        model.save_weights("%s/model_%s_nEv%d.h5" %
                           (self.dirmodel, self.suffix, self.train_events))
        self.logger.info("Saved trained model to disk")
Exemplo n.º 18
0
model.compile(loss="mae", optimizer=sgd, metrics=[
    "accuracy"
])  # mae = Mean absolute Error = L1 Loss  mean(abs(T - P))

startEpoch = 0
if args.checkpoint is not None:
    infoPrint("Loading checkpoint...")
    model.load_weights(args.checkpoint)
    startEpoch = int(args.checkpoint.split("-")[0].split("/")[1])
    infoPrint("Starting from epoch {}".format(startEpoch))

# Training of the actual network
infoPrint("Training network...")
logdir = "logs/fit/" + datetime.now().strftime(
    "%Y%m%d-%H%M%S")  # Directory where to save the TensorBoard logs to.
tensorboard_callback = TensorBoard(
    log_dir=logdir)  # Define the TensorBoard callback
earlystop_callback = EarlyStopping(
    monitor="val_loss", patience=15
)  # Define EarlyStopping callback to make sure the network stops when it stagnates.
checkpoint_callback = ModelCheckpoint(  # Define a checkpoint callback, mostly for running in Colab and being disconnected
    "checkpoints/{epoch}-fast-depth-cp.h5",
    save_weights_only=
    True,  # Only save the weights of the network, not the whole model
    save_freq=10 * (train_len // args.batch_size)
)  # Save every 10 epochs (when 10x all data has passed through the network)

# Fit the model to the training data
H = model.fit(
    x=train_ds,  # The dataset with the training images
    validation_data=test_ds,  # The dataset with the validation images
    validation_steps=test_len // args.
log_dir = './logs_16_2'
checkpoint_models_path = './checkpoints_16_2/cp-{epoch:04d}-{loss:.4f}-{val_loss:.4f}.ckpt'
checkpoint_dir = os.path.dirname(checkpoint_models_path)

if __name__ == '__main__':
    # Parse arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-p",
                    "--pretrained",
                    help="path to save pretrained model files")
    args = vars(ap.parse_args())
    pretrained_path = args["pretrained"]

    # Callbacks
    tensor_board = TensorBoard(log_dir=log_dir,
                               histogram_freq=0,
                               write_graph=True,
                               write_images=True)
    # model_names = checkpoint_models_path + 'final.{epoch:02d}-{val_loss:.4f}.hdf5'
    model_checkpoint = ModelCheckpoint(filepath=checkpoint_models_path,
                                       monitor='val_loss',
                                       verbose=1,
                                       save_weights_only=True)
    early_stop = EarlyStopping('val_loss', patience=patience)
    reduce_lr = ReduceLROnPlateau('val_loss',
                                  factor=0.1,
                                  patience=int(patience / 4),
                                  verbose=1)

    class MyCbk(keras.callbacks.Callback):
        def __init__(self, model):
            keras.callbacks.Callback.__init__(self)
Exemplo n.º 20
0
def main(_argv):
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    for physical_device in physical_devices:
        tf.config.experimental.set_memory_growth(physical_device, True)
    if FLAGS.training_source == 'weight':
        model = YoloV3Tiny(classes=FLAGS.num_classes, training=True)
        model_pretrained = YoloV3Tiny(FLAGS.size,
                                      training=True,
                                      classes=FLAGS.num_classes)
        model_pretrained.load_weights(weigths_path)
        model.get_layer("yolo_darknet").set_weigths(
            model_pretrained.get_layer("yolo_darknet").get_weigths())

    elif FLAGS.training_source == 'model':
        model = tf.keras.models.load_model(FLAGS.source_path, compile=False)

    anchors = yolo_tiny_anchors
    anchor_masks = yolo_tiny_anchor_masks
    #model.load_weights(weights_path).expect_partial()
    model.summary()

    train_dataset = dataset.load_fake_dataset()
    classes_names = [c.strip() for c in open(FLAGS.classes).readlines()]
    train_dataset = dataset.load_tfrecord_dataset(FLAGS.datasets,
                                                  FLAGS.classes, FLAGS.size)
    train_dataset = train_dataset.shuffle(buffer_size=512)
    train_dataset = train_dataset.batch(FLAGS.batch_size)
    train_dataset = train_dataset.map(lambda x, y: (
        dataset.transform_images(x, FLAGS.size),
        dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))
    train_dataset = train_dataset.prefetch(
        buffer_size=tf.data.experimental.AUTOTUNE)

    val_dataset = dataset.load_fake_dataset()
    val_dataset = val_dataset.batch(FLAGS.batch_size)
    val_dataset = val_dataset.map(lambda x, y: (
        dataset.transform_images(x, FLAGS.size),
        dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))

    optimizer = tf.keras.optimizers.Adam(lr=FLAGS.learning_rate)
    loss = [
        YoloLoss(anchors[mask], classes=FLAGS.num_classes)
        for mask in anchor_masks
    ]

    model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
    callbacks = [
        ReduceLROnPlateau(verbose=1),
        EarlyStopping(patience=5, verbose=1),
        ModelCheckpoint('./checkpoints/yolov3_train_test_' + FLAGS.method +
                        '.h5',
                        verbose=1,
                        save_best_only=True),
        TensorBoard(log_dir='logs')
    ]

    history = model.fit(train_dataset,
                        epochs=FLAGS.epochs,
                        callbacks=callbacks,
                        validation_data=val_dataset)
    """
Exemplo n.º 21
0
def create_and_train_LSTM_lyrics(
    train_data,
    log_dir='logs/fit',
    model=None,
    batch_size=4096,
    lr=1e-2,
    epochs=100,
    validation_split=0.2,
    patience=3,
    min_delta=.001,
    loss='sparse_categorical_crossentropy',
    optimizer=None,
    metrics=['accuracy'],
    prefix='',
    verbose=1,
    embedding_dim=300,
    PAD=' <PAD>',
    maxlen=6630,
    embedding=None,
    shuffle=True,
    random_state=2,
):
    if optimizer is None:
        optimizer = Adam(lr=lr)

    run_time = datetime.datetime.now().strftime('%m%d-%H%M%S')
    run_name = f'{prefix}_{run_time}_lr{lr}_b{batch_size}'

    df_train, le_train = load_and_preprocess(csv_path=train_data,
                                             PAD=PAD,
                                             maxlen=maxlen)

    train_text = np.concatenate(
        df_train.encoded if PAD is None else df_train.encoded_padded)

    X = train_text[:-1]
    y = train_text[1:]

    X_train, X_val, y_train, y_val = train_test_split(
        X, y, test_size=validation_split, random_state=random_state)

    ds_train = create_tf_dataset(X_train, y_train, batch_size=batch_size)
    ds_val = create_tf_dataset(X_val, y_val, batch_size=batch_size)

    vocab_size = len(le_train.classes_)

    if embedding is not None:
        word_vectors = get_weight_matrix(embedding, le_train.classes_)
        embedding_layer = Embedding(
            vocab_size,
            embedding_dim,
            weights=[word_vectors],
            trainable=False,
        )
    else:
        embedding_layer = Embedding(
            vocab_size,
            embedding_dim,
        )

    # define model
    if model is None:
        model = Sequential([
            embedding_layer,
            LSTM(embedding_dim),
            Dense(vocab_size, activation='softmax')
        ])
        print(model.summary())

    callbacks = [
        EarlyStopping(patience=patience, min_delta=min_delta, verbose=verbose),
        ModelCheckpoint(f'{run_name}.h5',
                        verbose=0,
                        save_best_only=True,
                        save_weights_only=True)
    ]

    if tf.__version__[0] == '2':
        callbacks.append(TensorBoard(log_dir=f'{log_dir}/{run_name}'))

    model.compile(loss=loss, optimizer=optimizer, metrics=metrics)

    history = model.fit(
        ds_train,
        epochs=epochs,
        callbacks=callbacks,
        verbose=verbose,
        shuffle=True,
        steps_per_epoch=math.ceil(len(X_train) / batch_size),
        #       batch_size=batch_size,
        validation_data=ds_val,
        validation_steps=math.ceil(len(X_val) / batch_size),
        #       validation_split=validation_split
    )
    return model, le_train, callbacks
output_folder = 'models'
if not os.path.exists(output_folder):
    os.makedirs(output_folder)

model.load_weights(os.path.join(output_folder, model_name + '.hdf5'))

# training parameters
batch_size = 128
nb_epoch = 400

# callbacks
checkpointer = ModelCheckpoint(filepath=os.path.join(output_folder,
                                                     model_name + '.hdf5'),
                               save_best_only=True)
early_stopping = EarlyStopping(patience=10)
tensorboard = TensorBoard()

# training loop
model.fit(
    RotNetDataGenerator(train_filenames,
                        input_shape=input_shape,
                        batch_size=batch_size,
                        one_hot=False,
                        preprocess_func=preprocess_input,
                        crop_center=True,
                        crop_largest_rect=True,
                        shuffle=True),
    steps_per_epoch=len(train_filenames) / batch_size,
    epochs=nb_epoch,
    validation_data=RotNetDataGenerator(test_filenames,
                                        input_shape=input_shape,
Exemplo n.º 23
0
X = pickle.load(open('X.pickle', 'rb'))
y = pickle.load(open('y.pickle', 'rb'))

X = X / 255.00

# Naming the models
dense_layers = [0]
layer_sizes = [64]
conv_layers = [3]

for dense_layer in dense_layers:
    for layer_size in layer_sizes:
        for conv_layer in conv_layers:
            NAME = f"{conv_layer}-conv-{layer_size}-nodes-{dense_layer}-dense-512DO"
            # Adding TensorBoard
            tensorboard = TensorBoard(log_dir=f'logss/{NAME}')
            # Building the Model
            model = Sequential()

            model.add(Conv2D(layer_size, (3, 3), input_shape=X.shape[1:]))
            model.add(Activation('relu'))
            model.add(MaxPooling2D(pool_size=(2, 2)))

            for l in range(conv_layer - 1):
                model.add(Conv2D(layer_size, (3, 3)))
                model.add(Activation('relu'))
                model.add(MaxPooling2D(pool_size=(2, 2)))

            model.add(Flatten())

            for l in range(dense_layer):
                     "rb"))

X = X / 255

#%%
dense_layers = [0, 1, 2]
layer_sizes = [32, 64, 128]
conv_layers = [1, 2, 3]

for dense_layer in dense_layers:
    for layer_size in layer_sizes:
        for conv_layer in conv_layers:
            NAME = f"{conv_layer}-conv-{layer_size}-nodes-{dense_layer}-dnes-{int(time.time())}"
            print(NAME)

            tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))

            model = Sequential()
            model.add(Conv2D(64, (3, 3), input_shape=X.shape[1:]))
            model.add(Activation("relu"))
            model.add(MaxPooling2D(pool_size=(2, 2)))

            for l in range(conv_layer - 1):
                model.add(Conv2D(64, (3, 3)))
                model.add(Activation("relu"))
                model.add(MaxPooling2D(pool_size=(2, 2)))

            model.add(Flatten())
            for l in range(dense_layer):
                model.add(Dense(layer_size))
                model.add(Activation("relu"))
Exemplo n.º 25
0
model.add(Dense(32, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))

model.add(Dense(10, activation='softmax'))

opt = tf.keras.optimizers.Adam(lr=_learning_rate, decay=_decay_rate)

model.compile(loss='sparse_categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])

model.summary()

tensorboard = TensorBoard(log_dir='./logs',
                          histogram_freq=0,
                          write_graph=True,
                          write_images=False)

filepath = "models/weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"

checkpoint = ModelCheckpoint(filepath,
                             monitor='val_acc',
                             verbose=1,
                             save_best_only=True,
                             mode='max')

callbacks_list = [tensorboard, checkpoint]

model.fit(x_train,
          y_train,
          validation_split=_validation_split,
Exemplo n.º 26
0
from numpy import sin, newaxis, linspace, float32, squeeze
from numpy.random import seed, rand, randn
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Dense
from tensorflow.keras.backend import variable
import tensorflow_probability as tfp
tfpl = tfp.layers
tfpd = tfp.distributions

tensorboard_callback = TensorBoard(log_dir="./regression-1-logs", histogram_freq=1)

def load_dataset(w0, b0, x_range, n=150, n_tst=150):
  seed(43)
  def s(x):
    g = (x - x_range[0]) / (x_range[1] - x_range[0])
    return 3 * (0.25 + g**2.)
  x = (x_range[1] - x_range[0]) * rand(n) + x_range[0]
  eps = randn(n) * s(x)
  y = (w0 * x * (1. + sin(x)) + b0) + eps
  x = x[..., newaxis]
  x_tst = linspace(*x_range, num=n_tst).astype(float32)
  x_tst = x_tst[..., newaxis]
  return y, x, x_tst

output_train, input_train, input_test = load_dataset(0.125, 5.0, [-20, 60])

def evaluate(truth, prediction):
  return -prediction.log_prob(truth)
def train(base_dir):
    #%% Init model
    encoder = keras_applications.MobileNet(input_shape=(224, 224, 3), include_top=False, pooling="avg")
    support_layer = CentroidsMatrix(
        kernel={
            "name": "MixedNorms",
            "init": {
                "norms": [
                    lambda x: 1 - tf.nn.l2_normalize(x[0]) * tf.nn.l2_normalize(x[1]),
                    lambda x: tf.math.abs(x[0] - x[1]),
                    lambda x: tf.nn.softmax(tf.math.abs(x[0] - x[1])),
                    lambda x: tf.square(x[0] - x[1]),
                ],
                "use_bias": True,
            },
        },
        activation="linear",
    )

    #%% Init training
    callbacks = [
        TensorBoard(base_dir, write_images=True, histogram_freq=1),
        ModelCheckpoint(str(base_dir / "best_loss.h5"), save_best_only=True),
        ReduceLROnPlateau(),
    ]

    #%% Init data
    @tf.function(input_signature=(tf.TensorSpec(shape=[None, None, 3], dtype=tf.uint8),))
    def preprocessing(input_tensor):
        output_tensor = tf.cast(input_tensor, dtype=tf.float32)
        output_tensor = tf.image.resize_with_pad(output_tensor, target_height=224, target_width=224)
        output_tensor = keras_applications.mobilenet.preprocess_input(output_tensor, data_format="channels_last")
        return output_tensor

    @tf.function(input_signature=(tf.TensorSpec(shape=[None, None, 3], dtype=tf.float32),))
    def data_augmentation(input_tensor):
        output_tensor = tf.image.random_flip_left_right(input_tensor)
        output_tensor = tf.image.random_flip_up_down(output_tensor)
        output_tensor = tf.image.random_brightness(output_tensor, max_delta=0.25)
        return output_tensor

    all_annotations = pd.read_csv(base_dir / "annotations" / "all_annotations.csv").assign(
        label_code=lambda df: df.label.astype("category").cat.codes
    )
    class_count = all_annotations.groupby("split").apply(lambda group: group.label.value_counts())

    #%% Train model
    k_shot = 4
    cache = base_dir / "cache"
    datasets = all_annotations.groupby("split").apply(
        lambda group: (
            ToKShotDataset(
                k_shot=k_shot,
                preprocessing=compose(preprocessing, data_augmentation),
                cache=str(cache / group.name),
                reset_cache=True,
                dataset_mode="with_cache",
                label_column="label_code",
            )(group)
        )
    )

    y_true = Input(shape=(None,), name="y_true")
    output = support_layer([encoder.output, y_true])
    model = Model([encoder.inputs, y_true], output)

    batch_size = 64
    batched_datasets = datasets.map(
        lambda dataset: dataset.batch(batch_size, drop_remainder=True)
        .map(lambda x, y: (x, get_dummies(y)[0]), num_parallel_calls=tf.data.experimental.AUTOTUNE)
        .map(lambda x, y: ((x, y), y), num_parallel_calls=tf.data.experimental.AUTOTUNE)
        .repeat()
    )

    encoder.trainable = False
    optimizer = Adam(lr=1e-4)
    model.compile(
        optimizer=optimizer, loss="binary_crossentropy", metrics=["categorical_accuracy", "categorical_crossentropy"]
    )
    model.fit(
        datasets["train"].batch(batch_size).repeat(),
        steps_per_epoch=len(class_count["train"]) * k_shot // batch_size * 150,
        validation_data=datasets["val"].batch(batch_size).repeat(),
        validation_steps=max(len(class_count["val"]) * k_shot // batch_size, 100),
        initial_epoch=0,
        epochs=3,
        callbacks=callbacks,
    )

    encoder.trainable = True
    optimizer = Adam(lr=1e-5)
    model.compile(
        optimizer=optimizer, loss="binary_crossentropy", metrics=["categorical_accuracy", "categorical_crossentropy"]
    )
    model.fit(
        datasets["train"].batch(batch_size).repeat(),
        steps_per_epoch=len(class_count["train"]) * k_shot // batch_size * 150,
        validation_data=datasets["val"].batch(batch_size).repeat(),
        validation_steps=max(len(class_count["val"]) * k_shot // batch_size, 100),
        initial_epoch=3,
        epochs=10,
        callbacks=callbacks,
    )

    #%% Evaluate on test set. Each batch is a k_shot, n_way=batch_size / k_shot task
    model.load_weights(str(base_dir / "best_loss.h5"))
    model.evaluate(batched_datasets["test"], steps=max(len(class_count["test"]) * k_shot // batch_size, 100))
Exemplo n.º 28
0
model.add(Conv2D(64, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())  # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Activation('relu'))

model.add(Dense(1))
model.add(Activation('sigmoid'))

tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'],
              )
model.fit(X, y,
          batch_size=32,
          epochs=10,
          validation_split=0.3,
          callbacks=[tensorboard])




                        for hidden_dense_layer_size in hidden_dense_layer_sizes:
                            for optimazer in optimazers:
                                for learning_rate in learning_rates:
                                    for batch_size in batch_sizes:
                                        
                                        model = build_model(train_X.shape[1:],
                                                            rnn_size, 
                                                            dropout_level, 
                                                            batch_normalization, 
                                                            hidden_dense_layer_size,
                                                            optimazer,
                                                            learning_rate, 
                                                            batch_size)
                                        
                                        NAME = f"{MODEL_TYPE}-{sequence_size}-W_LEN-{scaler_name}-SCL-{rnn_size}-RNN_S-{int(time.time())}"
                                        tensorboard = TensorBoard(log_dir=f"logs_{diff_function}/{NAME}")
                                    
                                        model.fit(train_X, train_y, 
                                                    epochs=EPOCHS, batch_size=batch_size, 
                                                    validation_data=(val_X, val_y), 
                                                    callbacks=[tensorboard])
                                        model.save(f"models_{diff_function}/{NAME}")

                                        # score model
                                        pred_y = scaler.inverse_transform(model.predict(train_X))
                                        true_y = scaler.inverse_transform(train_y)
                                        print(f"RMSE_train = {mse(true_y, pred_y) ** (1/2)}")


                                        pred_y = scaler.inverse_transform(model.predict(val_X))
                                        true_y = scaler.inverse_transform(val_y)
Exemplo n.º 30
0
op = Adadelta(lr=1e-3)
batch_size = 32

model.compile(optimizer=op,
              loss="sparse_categorical_crossentropy",
              metrics=['acc'])
es = EarlyStopping(monitor='val_loss',
                   patience=20,
                   restore_best_weights=True,
                   verbose=1)
lr = ReduceLROnPlateau(monitor='val_loss', vactor=0.5, patience=10, verbose=1)
path = 'C:/nmb/nmb_data/h5/5s_last/Conv2D_adadelta_mms.h5'
mc = ModelCheckpoint(path, monitor='val_loss', verbose=1, save_best_only=True)
tb = TensorBoard(log_dir='C:/nmb/nmb_data/graph/' + 'Conv2D_adadelta_mms' +
                 "/",
                 histogram_freq=0,
                 write_graph=True,
                 write_images=True)
# history = model.fit(x_train, y_train, epochs=5000, batch_size=batch_size, validation_split=0.2, callbacks=[es, lr, mc, tb])

# 평가, 예측
model.load_weights('C:/nmb/nmb_data/h5/5s_last/Conv2D_adadelta_mms.h5')
result = model.evaluate(x_test, y_test, batch_size=batch_size)
print("loss : {:.5f}".format(result[0]))
print("acc : {:.5f}".format(result[1]) + '\n')

############################################ PREDICT ####################################
pred = [
    'C:/nmb/nmb_data/predict/5s_last/F', 'C:/nmb/nmb_data/predict/5s_last/M'
]
Exemplo n.º 31
0
def main(args):
    InputDir = args.dir_train

    if args.val_folds:
        val_folds = args.val_folds
        InputdirTrain = [os.path.join(InputDir,fold,'Scans') for fold in os.listdir(InputDir) if fold not in val_folds and not fold.startswith(".")]
        InputdirLabel = [os.path.join(InputDir,fold,'Segs') for fold in os.listdir(InputDir) if fold not in val_folds and not fold.startswith(".")]
        InputdirValTrain = [os.path.join(InputDir,fold,'Scans') for fold in val_folds]
        InputdirValLabel = [os.path.join(InputDir,fold,'Segs') for fold in val_folds]
    else:
        val_dir = args.val_dir
        InputdirTrain = [os.path.join(InputDir,'Scans')]
        InputdirLabel = [os.path.join(InputDir,'Segs')]
        InputdirValTrain = [os.path.join(val_dir,'Scans')]
        InputdirValLabel = [os.path.join(val_dir,'Segs')]

    number_epochs = args.epochs
    save_frequence = args.save_frequence
    width = args.width
    height = args.height
    batch_size = args.batch_size
    NumberFilters = args.number_filters
    dropout = args.dropout
    lr = args.learning_rate

    savedModel = os.path.join(args.save_model, args.model_name+"_{epoch}.hdf5")
    logPath = args.log_dir

    # GPUs Initialization
    gpus = tf.config.list_physical_devices('GPU')
    if gpus:
        try:
            # Currently, memory growth needs to be the same across GPUs
            for gpu in gpus:
                tf.config.experimental.set_memory_growth(gpu, True)
            logical_gpus = tf.config.experimental.list_logical_devices('GPU')
            print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
        except RuntimeError as e:
            # Memory growth must be set before GPUs have been initialized
            print(e)

    print("Loading paths...")
    # Input files and labels
    input_paths = sorted([file for file in [os.path.join(dir, fname) for dir in InputdirTrain for fname in os.listdir(dir)] if not os.path.basename(file).startswith(".")])
    label_paths = sorted([file for file in [os.path.join(dir, fname) for dir in InputdirLabel for fname in os.listdir(dir)] if not os.path.basename(file).startswith(".")])

    # Folder with the validations scans and labels
    ValInput_paths = sorted([file for file in [os.path.join(dir, fname) for dir in InputdirValTrain for fname in os.listdir(dir)] if not os.path.basename(file).startswith(".")])
    ValLabel_paths = sorted([file for file in [os.path.join(dir, fname) for dir in InputdirValLabel for fname in os.listdir(dir)] if not os.path.basename(file).startswith(".")])

    print("Processing training dataset...")
    # Read and process the input files
    x_train = np.array([ProcessDataset(path, label=False) for path in input_paths])
    y_train = np.array([ProcessDataset(path, label=True) for path in label_paths])
    
    x_train, y_train = remove_empty_slices(x_train, y_train, ratio=args.ratio)
    x_train, y_train = shuffle(x_train, y_train)
    
    x_train = np.reshape(x_train, x_train.shape+(1,))
    y_train = np.reshape(y_train, y_train.shape+(1,))
    
    dataset_training = create_dataset(x_train, y_train, batch_size)
    del(x_train)
    del(y_train)

    print("Processing validation dataset...")
    x_val = np.array([ProcessDataset(path,label=False) for path in ValInput_paths])
    y_val = np.array([ProcessDataset(path, label=True) for path in ValLabel_paths])

    # x_val, y_val = remove_empty_slices(x_val, y_val, ratio=args.ratio)
    x_val, y_val = shuffle(x_val, y_val)

    x_val = np.reshape(x_val, x_val.shape+(1,))
    y_val = np.reshape(y_val, y_val.shape+(1,))
    
    dataset_validation = create_dataset(x_val, y_val, batch_size)
    del(x_val)
    del(y_val)
  
    print("Dataset info...")
    for images, labels in dataset_training.take(1):
        numpy_images = images.numpy()
        numpy_labels = labels.numpy()

    print("=====================================================================")
    print()
    print("Inputs shape: ", np.shape(numpy_images), "min:", np.amin(numpy_images), "max:", np.amax(numpy_images), "unique:", len(np.unique(numpy_images)))
    print("Labels shape: ", np.shape(numpy_labels), "min:", np.amin(numpy_labels), "max:", np.amax(numpy_labels), "unique:", len(np.unique(numpy_labels)))
    print()
    print("=====================================================================")


    model = unet_2D(width, height, NumberFilters, dropout, lr)

    model_checkpoint = ModelCheckpoint(savedModel, monitor='loss',verbose=1, period=save_frequence)
    log_dir = os.path.join(logPath,args.model_name+"_"+datetime.datetime.now().strftime("%Y_%d_%m-%H:%M:%S"))
    tensorboard_callback = TensorBoard(log_dir=log_dir,histogram_freq=1)
    
    if args.learning_rate_schedular:
        LR_callback = tf.keras.callbacks.LearningRateScheduler(scheduler)
        callbacks_list = [model_checkpoint, tensorboard_callback, LR_callback]
    else:
        callbacks_list = [model_checkpoint, tensorboard_callback]

    model.fit(
        dataset_training,
        epochs=number_epochs,
        validation_data=dataset_validation,
        verbose=2,
        callbacks=callbacks_list,
    )