Example #1
0
                                                            random_state=SEED)

        if opt.function == 'train':
            print('>> Preparing for Training ########')
            model = lstm_count_3()
            filepath = opt.working_dir + "/saved-model-{epoch:02d}-{loss:.2f}-" + str(
                opt.window) + ".hdf5"
            checkpoint = ModelCheckpoint(filepath,
                                         monitor='loss',
                                         verbose=1,
                                         save_best_only=True,
                                         mode='auto',
                                         period=1)
            logdir = "../tf_logs/lstm/" + opt.task + str(
                opt.window) + datetime.now().strftime("%Y%m%d-%H%M%S")
            tensorboard_callback = TensorBoard(log_dir=logdir)
            model.fit(X_train,
                      y_train,
                      epochs=opt.epochs,
                      batch_size=32,
                      verbose=1,
                      shuffle=False,
                      callbacks=[checkpoint, tensorboard_callback])
        else:
            print('>> Loading a Pre-trained Model ########')
            model = load_model(opt.model)
        print('>> Running Tests ########')

        if opt.plot:
            if 'hist' in opt.plot:
                plot_distributions(model, X_test, y_test, scaler_l)
Example #2
0
    def create_model(self, batchsize=False):
        optimal_lay = None

        total_result = []
        max_bs_list = []
        max_ls_list = []
        optimal_lay_list = []
        return_per_racehorse_list = []

        kf = KFold(n_splits=6, random_state=1, shuffle=True)

        n = 0
        for self.train_idx, self.test_idx in kf.split(range(len(self.X))):
            n += 1

            self.trainX = self.X[self.train_idx, :]
            self.trainY = self.Y[self.train_idx]
            self.train_payoff = self.Y[self.train_idx, 1].astype('float')
            self.testX = self.X[self.test_idx, :]
            self.testY = self.Y[self.test_idx]
            self.test_payoff = self.Y[self.test_idx, 1].astype('float')

            # self.weights = abs(self.weights_original[self.train_idx])

            # maximum number of hidden neurons to avoid overfitting
            # https://stats.stackexchange.com/questions/181/how-to-choose-the-number-of-hidden-layers-and-nodes-in-a-feedforward-neural-netw
            samples = self.trainX.shape[0]
            input_neurons = self.trainX.shape[1]
            output_neurons = 1
            alpha = 2
            max_hidden_neurons = 1000
            hidden_neurons = min(
                max_hidden_neurons,
                int(samples / (alpha * (input_neurons + output_neurons))))
            log.info("Hidden neurons: {}".format(hidden_neurons))

            self.normalize_data()

            epochs = 100
            # default batch size
            if not batchsize:
                batchsize = 1
            log.info("Start training with batch size: {}".format(batchsize))

            self.model = Sequential()
            self.model.add(
                Dense(hidden_neurons,
                      activation='relu',
                      input_shape=(input_neurons, )))
            self.model.add(Dropout(0.2))
            self.model.add(
                Dense(hidden_neurons,
                      activation='relu',
                      input_shape=(input_neurons, )))
            self.model.add(Dropout(0.2))
            self.model.add(Dense(output_neurons, activation='sigmoid'))

            self.model.compile(loss=self.cp.custom_cross_entropy,
                               optimizer=Adam(),
                               metrics=[
                                   self.cp.acc, self.cp.profit, self.cp.tp,
                                   self.cp.fp, self.cp.tn, self.cp.fn
                               ])

            timestr = time.strftime("%Y%m%d-%H%M%S") + "_" + str(n)
            self.tbCallBack = TensorBoard(log_dir='./Graph/{}'.format(timestr),
                                          histogram_freq=0,
                                          write_graph=True,
                                          write_images=False)

            self.early_stop = EarlyStopping(monitor='val_loss',
                                            min_delta=1e-7,
                                            patience=2,
                                            verbose=2,
                                            mode='auto')

            self.model.fit(self.trainX,
                           self.trainY,
                           validation_data=(self.testX, self.testY),
                           epochs=epochs,
                           batch_size=int(batchsize),
                           verbose=1,
                           callbacks=[self.tbCallBack, self.early_stop])

            scores = self.model.evaluate(self.testX, self.testY)
            log.info("\n%s: %.2f%%" %
                     (self.model.metrics_names[1], scores[1] * 100))

            average_profit_per_bet, cumulative_returns, cumulative_all_in = \
                self.backtest(strategy=self.strategy)

            return_per_racehorse_list.append(average_profit_per_bet)
            total_result.append(average_profit_per_bet)
            plt.plot(cumulative_returns, label='strategy, kfold {}'.format(n))
            if self.strategy == 'lay':
                plt.plot(cumulative_all_in,
                         'b:',
                         label='bet all, kfold {}'.format(n))
            plt.title("")
            plt.legend()

            if len(total_result) == 1 or average_profit_per_bet > max(
                    total_result[:-1]):
                self.save_model()

        self.save_hyperparams(max_bs_list, max_ls_list, features_per_horse,
                              batchsize, return_per_racehorse_list,
                              optimal_lay_list, self.norm)

        log.info("Mean of mean returns: {0:.2f}".format(np.mean(total_result)))

        ranksum_stat, p_stat = ranksums(total_result,
                                        np.zeros(len(total_result)))
        log.info(
            "Statistically significantly different to zero p-value: {0:.2f}".
            format(p_stat))
        log.info(
            "statistically significantly different to zero on 1% level: {}".
            format(p_stat < 0.01))
        log.info(
            "statistically significantly different to zero on 5% level: {}".
            format(p_stat < 0.05))

        timestr = time.strftime("%Y%m%d-%H%M%S")
        plt.savefig(
            os.path.join(dir_path, 'plots/training-{}.png'.format(timestr)))
Example #3
0
                        arguments={
                            'anchors': anchors,
                            'num_classes': num_classes,
                            'ignore_thresh': 0.5
                        })(loss_input)

    model = Model([model_body.input, *y_true], model_loss)

    freeze_layers = 249  ### 载入预训练权重,是为了使用darknet53层的权重,因为这里的权重很适合提取图像特征 冻结后只训练后面的四层
    for i in range(freeze_layers):
        model_body.layers[i].trainable = False
    print('Freeze the first {} layers of total {} layers.'.format(
        freeze_layers, len(model_body.layers)))

    # 训练参数设置
    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(
        log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
        monitor='val_loss',
        save_weights_only=True,
        save_best_only=False,
        period=5)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.5,
                                  patience=2,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=6,
                                   verbose=1)
Example #4
0
def main(logging, params, trainops):
    # load model architectire and weights
    model = trainops.model(params)

    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD(lr=params.learning_rate,
                                momentum=params.momentum),
                  metrics=['accuracy'])

    # print model structure
    model.summary()

    # get standard configured data generators
    train_generator, valid_generator, test_generator = i.create_generators(
        params.data_path)

    # get data number of samples for training
    num_training_images, num_validation_images, num_test_images = i.get_data_statistics(
        params.data_path)
    '''callbacks'''
    lr_scheduler = LearningRateScheduler(trainops.step_decay)
    csv_logger = CSVLogger(filename=logging.model_directory + '/history.csv',
                           append=True,
                           separator=",")

    print("save directory is", logging.model_directory)
    checkpoint = ModelCheckpoint(filepath=logging.model_directory +
                                 "/weights.hdf5",
                                 monitor='val_acc',
                                 save_best_only=True,
                                 verbose=1,
                                 save_weights_only=True)

    tb = TensorBoard(log_dir=logging.tensorboard_directory,
                     histogram_freq=0,
                     write_graph=True,
                     write_images=True,
                     embeddings_freq=0,
                     embeddings_layer_names=None,
                     embeddings_metadata=None)

    rlr = ReduceLROnPlateau(monitor='val_acc',
                            factor=0.1,
                            patience=20,
                            verbose=0,
                            mode='auto',
                            min_delta=0.0001,
                            cooldown=0,
                            min_lr=0)

    # saving model config file to model output dir
    logging.save_dict_to_json(logging.model_directory + "/config.json")

    history = model.fit_generator(
        generator=train_generator,
        steps_per_epoch=int(num_training_images / (params.batch_size * 1)),
        epochs=params.num_epochs,
        validation_data=valid_generator,
        use_multiprocessing=False,
        workers=8,
        validation_steps=int(num_validation_images / (1)),
        callbacks=[checkpoint, lr_scheduler, tb, csv_logger, rlr])

    pd.DataFrame(history.history).to_csv(logging.model_directory +
                                         "/loss_files.csv")

    print(
        "###################### inititing predictions and evaluations ######################"
    )
    pred = model.predict_generator(generator=test_generator,
                                   steps=int(num_test_images / (1)),
                                   verbose=1,
                                   use_multiprocessing=False,
                                   workers=1)

    # get predictions and labels in list format
    preds = np.argmax(pred, axis=1).tolist()
    lbls = test_generator.labels.tolist()[:len(preds)]

    # instantiate the evaluation class
    evaluation = Evaluation(history=pd.DataFrame(history.history),
                            labels=lbls,
                            predictions=preds,
                            softmax_output=pred,
                            model_dir=logging.model_directory,
                            filenames=test_generator.filenames,
                            params=params)

    # get and save 5 example fundus images for each class in "./predictions and assemble to canvas"
    #evaluation.plot_examples()
    evaluation.write_plot_evaluation()
import os
import logging
from random import choice
from keras import backend as K

stopping_criteria = EarlyStopping(monitor='acc',
                                  min_delta=0,
                                  patience=5,
                                  verbose=0,
                                  mode='auto',
                                  baseline=None,
                                  restore_best_weights=True)
batch_size = 400
training_batch_size = 50

visualizer_128 = TensorBoard(log_dir='logs/{}'.format(time()), update_freq=100)
visualizer_512 = TensorBoard(log_dir='./logs/model_512/',
                             histogram_freq=1,
                             batch_size=batch_size,
                             write_graph=True,
                             write_grads=False,
                             write_images=False,
                             update_freq=100)
visualizer_128_reg = TensorBoard(log_dir='./logs/model_128_reg/',
                                 histogram_freq=1,
                                 batch_size=batch_size,
                                 write_graph=True,
                                 write_grads=False,
                                 write_images=True,
                                 update_freq=100)
                                              batch_size=batch_size,
                                              class_mode='categorical')

test_generator = datagen.flow_from_directory(test_data_dir,
                                             target_size=(img_width, img_height),
                                             batch_size=batch_size,
                                             class_mode='categorical',
                                             shuffle=False)

validation_generator = datagen.flow_from_directory(val_data_dir,
                                                   target_size=(img_width, img_height),
                                                   batch_size=batch_size,
                                                   class_mode='categorical')

tbCallBack = TensorBoard(log_dir=log_dir + 'tboriginal',
                         histogram_freq=10,
                         write_graph=True,
                         profile_batch=0)

save_callback = ModelCheckpoint(filepath=weights_dir + 'weights.{epoch:02d}-{val_loss:.2f}.hdf5',
                                monitor='val_acc',
                                verbose=1,
                                save_best_only=True,
                                mode='max')

earlyCallBack = EarlyStopping(monitor='val_loss',
                              min_delta=0,
                              patience=4,
                              verbose=1,
                              mode='min',
                              baseline=None,
                              restore_best_weights=True)
Example #7
0
model.add(advanced_activations.ELU(alpha=1.0))
model.add(Dropout(0.6))

model.add(Dense(HIDDEN4_SIZE, W_regularizer=l2(0.001), init='uniform'))
model.add(advanced_activations.ELU(alpha=1.0))

model.add(Dense(OUTPUT, init='uniform', activation='softmax'))
'''
 * tensorboard and checkpoints saver callbacks
 * Keras Tensorboard graph is not prettier than original Tensorflow graph, but much simple.
 * Checkpoint callback shows improved weights on the output console.
'''
checkpointer = ModelCheckpoint(filepath="/tmp/weights.hdf5",
                               verbose=1,
                               save_best_only=True)
tensorboard = TensorBoard(log_dir='./logs',
                          histogram_freq=0,
                          write_graph=True,
                          write_images=False)

Adam = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-06, decay=0.0)
model.compile(loss='categorical_crossentropy',
              optimizer=Adam,
              metrics=['accuracy'])
model.fit(X_train,
          y_train,
          nb_epoch=MAX_RANGE,
          batch_size=100,
          validation_data=(X_test, y_test),
          callbacks=[checkpointer, tensorboard])
Example #8
0
# Setup ===========================================
base_model_name = "InceptionV3"   # InceptionV3, InceptionV4, InceptionResnet
base_folder = "Z:/PaperResearch/VFbySS-OCT"
vf_file = "/train_data.xlsm"
weight_save_folder = "/Weights/" + base_model_name
pretrained_weights = ""   # if no pretrained weight, just leave ""
tensorboard_log_folder = "/Logs"
# ==================================================

# Data loading ===============================
print("Data loading...")
x_train, y_train, pids = LoadData(base_folder, base_folder + vf_file, False, "Train", "Train")

# model build ================================
model = GetModel(base_model_name)
if pretrained_weights != "":
    model.load_weights(base_folder + weight_save_folder + pretrained_weights)

# compile the model (should be done *after* setting layers to non-trainable)
model.compile(optimizer='rmsprop', loss='mean_squared_error')

# checkpoint
filepath = base_folder + weight_save_folder + "/" + base_model_name + "_24-2-improvement-{epoch:02d}-{loss:.2f}-{val_loss:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
tensorboard = TensorBoard(log_dir=tensorboard_log_folder, histogram_freq=1, write_graph=True, write_images=True)
callbacks_list = [checkpoint, tensorboard]

# Train ===========================================
model.fit(x_train, y_train, batch_size=32, validation_split=0.1, epochs=10000, callbacks=callbacks_list)
Example #9
0
                                             shuffle=True)  #does this matter?

test_batches = test_datagen.flow_from_directory(test_path,
                                                target_size=(48, 48),
                                                classes=['nothappy', 'happy'],
                                                color_mode="grayscale",
                                                batch_size=batch_size,
                                                shuffle=True)

for dense_layer in dense_layers:
    for layer_size in layer_sizes:
        for conv_layer in conv_layers:
            NAME = 'Conv-layers{}-num-filters{}-denselayers{}-SingleEmotionFIXED-{}'.format(
                conv_layer, layer_size, dense_layer, int(time.time()))
            print('\n', NAME)
            tensorboard = TensorBoard(log_dir='conv/logs/{}'.format(NAME))

            model = Sequential()

            model.add(
                Conv2D(layer_size, (3, 3),
                       padding='same',
                       activation='relu',
                       input_shape=(48, 48, 1)))
            model.add(MaxPooling2D(pool_size=(2, 2)))

            for l in range(conv_layer - 1):
                model.add(
                    Conv2D(layer_size, (3, 3),
                           padding='same',
                           activation='relu'))
Example #10
0
    sampling_rate /= sampling_rate.sum()
    sample_indices = np.transpose(position_indices)[np.random.choice(
        np.arange(len(sampling_rate)),
        min(len(sampling_rate), 2048),
        p=sampling_rate,
        replace=False)]

    replay_input = np.array(
        [stack_to_input(stacks[i], [j, k]) for i, j, k in sample_indices])
    replay_target = moves[tuple(sample_indices.T)]

    training_input.append(replay_input.astype(np.float32))
    training_target.append(replay_target.astype(np.float32))

now = datetime.datetime.now()
tensorboard = TensorBoard(log_dir='./logs/' + now.strftime('%Y.%m.%d %H.%M'))
training_input = np.concatenate(training_input, axis=0)
training_target = np.concatenate(training_target, axis=0)
indices = np.arange(len(training_input))
np.random.shuffle(
    indices)  #  shuffle our training samples to help avoid overfitting
training_input = training_input[indices]
training_target = training_target[indices]

# this is where we fit our model
# couple of key points, we cross validate on 20% of our data that we put into memory - avoids overfitting
# if we don't improve in 10 epochs we give up and stop
# max num of epochs is 1000
model.fit(training_input,
          training_target,
          validation_split=0.2,
Example #11
0
        # save_dir = './models/cifar-Res-8/10/client_%d'%count
        save_dir = './models/mnist/mobilenet/1/client_%d' % count
        model_name = 'best_model.h5'
        if not os.path.isdir(save_dir):
            os.makedirs(save_dir)
        filepath = os.path.join(save_dir, model_name)

        # Prepare callbacks for model saving and for learning rate adjustment.
        checkpoint = ModelCheckpoint(filepath=filepath,
                                     monitor='val_accuracy',
                                     verbose=1,
                                     save_best_only=True)

        # set callback
        cbks = [
            TensorBoard(log_dir='./Mobilenet/client_{:d}/'.format(count),
                        histogram_freq=0), checkpoint
        ]

        # dump checkpoint if you need.(add it to cbks)
        # ModelCheckpoint('./checkpoint-{epoch}.h5', save_best_only=False, mode='auto', period=10)

        # set data augmentation
        print("== USING REAL-TIME DATA AUGMENTATION, START TRAIN... ==")
        train_datagen = ImageDataGenerator(rotation_range=15,
                                           width_shift_range=0.1,
                                           height_shift_range=0.1,
                                           shear_range=0.2,
                                           zoom_range=0.1)

        # model.fit_generator(train_datagen.flow(x_train, y_train, batch_size = batch_size),
        #                      steps_per_epoch=11188 // batch_size,
Example #12
0
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None, utils.num_decoder_tokens))
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_lstm = LSTM(utils.latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
                                     initial_state=encoder_states)
decoder_dense = Dense(utils.num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)

# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)

# Run training
tensorboard = TensorBoard(log_dir="logs2/{}".format(time()))

model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=["accuracy"])
model.fit([utils.encoder_input_data, utils.decoder_input_data],
          utils.decoder_target_data,
          batch_size=batch_size,
          epochs=epochs,
          validation_split=0.2,
          callbacks=[tensorboard])
# Save model
model.save(utils.MODEL_PATH)
Example #13
0
            averaged = self.alpha * K.repeat_elements(summed, f, axis=3)
        denom = K.pow(self.k + averaged, self.beta)
        return x / denom

    def get_output_shape_for(self, input_shape):
        return input_shape


filepath = "weights/cnn/one_fourth_alexnet/weights@epoch-{epoch:03d}-{val_acc:.2f}.hdf5"

value_shift = 0.1
value_range = 0.2
rotation_degree = 20

tensorboard = TensorBoard(log_dir='./Graph/one_fourth_alexnet',
                          histogram_freq=0,
                          write_graph=True,
                          write_images=True)
checkpoint = ModelCheckpoint(filepath,
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True)
early_stopping = EarlyStopping(monitor='val_loss', verbose=1, patience=5)

callbacks = [tensorboard, checkpoint]

#callbacks = [tensorboard, checkpoint, early_stopping]

train_set = '../dataset/image/train/'

test_set = '../dataset/image/test/'
Example #14
0
    train_features,  # x: 训练特征值
    train_targets,  # y: 训练目标值
    batch_size=32,  # 一次性使用多少个样本一起计算
    epochs=1,  # 训练次数
    verbose=1,  # 是否打印每次训练的损失值和准确度
    # callbacks=None, # 是否使用其他预处理函数
    # validation_split=0.0, # 从训练数据集中取多少作为验证数据 0.2,就是取剩下的20%作为验证
    validation_data=(valid_features,
                     valid_targets),  # 或者另外使用独立的验证数据,None 是没有另外数据
    shuffle=True,  # 是否每次训练前打乱样本顺序
    class_weight=None,  # 目标值的权重设置
    sample_weight=None,  # 样本的权重设置
    initial_epoch=0,  # 从第几个训练开始训练
    callbacks=[
        TensorBoard(
            histogram_freq=1,
            #   log_dir="E:/Deep Learning/Comm/OutPut"), # 画图保存
            log_dir="/Users/Natsume/Desktop/log_comm"),  # 画图保存
        ModelCheckpoint(filepath=best_model_in_training,
                        save_best_only=True,
                        mode='min')
    ])  # 训练时保存最优秀的模型,并非最后一轮训练的模型版本

# model.summary()
# 调用模型
model = load_model(best_model_in_training)

# 评估模型: 查看最终损失值和准确度
out = model.evaluate(valid_features,
                     valid_targets,
                     batch_size=32,
                     verbose=1,
                      all_zeros_baseline
                  ])

    # print(model.summary())
    # import sys
    # sys.exit()

    return model


if __name__ == '__main__':
    model = get_model()

    callbacks = [
        TensorBoard(log_dir=EXPERIMENT_NAME + '_logs',
                    histogram_freq=0,
                    write_graph=True,
                    write_images=True),
        CustomCallback(batch_generator(), batch_generator())
    ]

    history = model.fit_generator(generator=batch_generator(),
                                  epochs=config.N_EPOCHS,
                                  steps_per_epoch=100,
                                  validation_data=batch_generator(),
                                  validation_steps=10,
                                  verbose=1,
                                  shuffle=False,
                                  callbacks=callbacks)

    model.save(EXPERIMENT_NAME + '.h5')
Example #16
0
                                                                img_height),
                                                   batch_size=batch_size,
                                                   class_mode='categorical')
#Callbacks definition:
checkpoint = ModelCheckpoint(WEIGHTS_FNAME,
                             monitor='val_loss',
                             verbose=0,
                             save_best_only=True,
                             save_weights_only=True,
                             mode='auto',
                             period=1)
tb = TensorBoard(log_dir='./logs/week4/' + experiment_name + '/',
                 histogram_freq=0,
                 batch_size=batch_size,
                 write_graph=True,
                 write_grads=False,
                 write_images=True,
                 embeddings_freq=0,
                 embeddings_layer_names=None,
                 embeddings_metadata=None)
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                              factor=0.1,
                              patience=5,
                              verbose=0,
                              mode='auto',
                              epsilon=0.0001,
                              cooldown=0,
                              min_lr=0)

history = model.fit_generator(train_generator,
                              steps_per_epoch=400 // batch_size,
Example #17
0
def _main():
    annotation_path = 'train_list.txt'
    log_dir = 'logs/000/'
    classes_path = 'model_data/hat_classes.txt'
    anchors_path = 'model_data/yolo_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    input_shape = (416, 416)  # multiple of 32, hw

    is_tiny_version = len(anchors) == 6  # default setting
    if is_tiny_version:
        model = create_tiny_model(input_shape,
                                  anchors,
                                  num_classes,
                                  load_pretrained=False)
    else:
        model = create_model(input_shape, anchors, num_classes, freeze_body=1)
        # make sure you know what you freeze

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(
        log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
        monitor='val_loss',
        save_weights_only=True,
        save_best_only=True,
        period=3)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=10,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=10,
                                   verbose=1)

    val_split = 0.2
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_val = int(len(lines) * val_split)
    num_train = len(lines) - num_val

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    if True:
        model.compile(
            optimizer=Adam(lr=1e-3),
            loss={
                # use custom yolo_loss Lambda layer.
                'yolo_loss': lambda y_true, y_pred: y_pred
            })

        batch_size = 32
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        model.fit_generator(data_generator_wrapper(lines[:num_train],
                                                   batch_size, input_shape,
                                                   anchors, num_classes),
                            steps_per_epoch=max(1, num_train // batch_size),
                            validation_data=data_generator_wrapper(
                                lines[num_train:], batch_size, input_shape,
                                anchors, num_classes),
                            validation_steps=max(1, num_val // batch_size),
                            epochs=50,
                            initial_epoch=0,
                            callbacks=[logging, checkpoint])
        model.save_weights(log_dir + 'trained_weights_stage_1.h5')

    # Unfreeze all layers and  training
    if True:
        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        model.compile(optimizer=Adam(lr=1e-4),
                      loss={
                          'yolo_loss': lambda y_true, y_pred: y_pred
                      })
        # recompile to apply the change
        print('Unfreeze all of the layers.')

        batch_size = 8  # note that more GPU memory is required after unfreezing the body
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        model.fit_generator(
            data_generator_wrapper(lines[:num_train], batch_size, input_shape,
                                   anchors, num_classes),
            steps_per_epoch=max(1, num_train // batch_size),
            validation_data=data_generator_wrapper(lines[num_train:],
                                                   batch_size, input_shape,
                                                   anchors, num_classes),
            validation_steps=max(1, num_val // batch_size),
            epochs=300,
            initial_epoch=50,
            callbacks=[logging, checkpoint, reduce_lr, early_stopping])
        model.save_weights(log_dir + 'trained_weights_fina3.h5')
    train_dataset_info[valid_indexes],
    batch_size, (SIZE, SIZE, 3),
    augument=val_aug,
    oversample_factor=0)
#validation_generator2 = data_generator.create_train(train_dataset_info[valid_indexes],
#                                                   batch_size, (SIZE, SIZE, 3), augument=val_aug, oversample_factor=0)

#checkpoint = ModelCheckpoint(MODEL_PATH + 'model_f1all{}.h5'.format(exp_suffix), monitor='val_f1_all', verbose=1,
#                             save_best_only=True, mode='max', save_weights_only=True)
checkpoint = ModelCheckpoint(MODEL_PATH + 'model_loss{}.h5'.format(exp_suffix),
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True,
                             mode='min',
                             save_weights_only=True)
tensorboard = TensorBoard(MODEL_PATH + 'logs{}'.format(fold_id) +
                          '{}'.format(exp_suffix) + '/')
# reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3,
#                                   verbose=1, mode='auto', epsilon=0.0001)
# early = EarlyStopping(monitor="val_loss",
#                      mode="min",
#                      patience=6)
#f1_metric = F1Metric(validation_generator2,2*len(valid_indexes)//batch_size,batch_size,28) #2 times val because of val_aug

nb_epochs = epochs[1]
nb_cycles = 15
init_lr = 0.001


def _cosine_anneal_schedule(t):

    cos_inner = np.pi * (t % (nb_epochs // nb_cycles))
Example #19
0
        # Make the discriminator network as non-trainable
        discriminator.trainable = False

        # Get the probability of generated high-resolution images
        probs = discriminator(generated_high_resolution_images)

        # Create and compile an adversarial model
        adversarial_model = Model(
            [input_low_resolution, input_high_resolution], [probs, features])
        adversarial_model.compile(loss=['binary_crossentropy', 'mse'],
                                  loss_weights=[1e-3, 1],
                                  optimizer=common_optimizer)

        # Add Tensorboard
        tensorboard = TensorBoard(log_dir="logs/".format(time.time()))
        tensorboard.set_model(generator)
        tensorboard.set_model(discriminator)

        for epoch in range(epochs):
            print("Epoch:{}".format(epoch))
            """
            Train the discriminator network
            """

            # Sample a batch of images
            high_resolution_images, low_resolution_images = sample_images(
                data_dir=data_dir,
                batch_size=batch_size,
                low_resolution_shape=low_resolution_shape,
                high_resolution_shape=high_resolution_shape)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(8, (2, 2), activation='sigmoid', name='conv_7')(x)

# this model maps an input to its reconstruction
autoencoder = Model(input_img, decoded)

# First, we'll configure our model to use a per-pixel binary crossentropy loss, and the Adadelta optimizer:
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')

autoencoder.fit(x_train,
                x_train,
                epochs=50,
                batch_size=128,
                shuffle=True,
                validation_data=(x_test, x_test),
                callbacks=[TensorBoard(log_dir='/tmp/autoencoder')])

encoded_imgs = encoder.predict(x_test)
decoded_imgs = autoencoder.predict(x_test)

# Save encoded images with its labels
encoded_imgs_reshaped_processed = []
for row in [en_img.reshape(1, 4 * 8) for en_img in encoded_imgs]:
    encoded_imgs_reshaped_processed.append(row[0])

print('processed images to reshaped list')

with open("encoded_imgs.csv", "w", newline="") as f:
    writer = csv.writer(f)
    writer.writerows(encoded_imgs_reshaped_processed)
Example #21
0
valid_generator = generate_arrays_from_file('VALIDATION', validdf)

# Use tensorboard to log training. To view the training log with the
# tensorboard gui you can run tensorboard to fire up a web server
# so you can use your browser to view the results.
#
# Note: you may need to move the log file to your
# local desktop and run tensorboard there.
#
#  tensorboard --logdir=./logs
tensorboard = TensorBoard(log_dir='./logs',
                          histogram_freq=0,
                          batch_size=BS * GPUS,
                          write_graph=True,
                          write_grads=False,
                          write_images=False,
                          embeddings_freq=0,
                          embeddings_layer_names=None,
                          embeddings_metadata=None,
                          embeddings_data=None,
                          update_freq='epoch')

#-----------------------------------------------------
# class checkpointModel
#-----------------------------------------------------
# There is a bug in keras that causes an error when trying to save a model
# trained on multiple GPUs. The work around is to save the original model
# at the end of every epoch using a callback. See
#    https://github.com/keras-team/kersas/issues/8694
if not os.path.exists('model_checkpoints'): os.mkdir('model_checkpoints')
Example #22
0
        class_mode='categorical')

validation_generator = val_datagen.flow_from_directory(
        validation_dir,
        target_size=config['input_shape'][:-1],
        batch_size=config['valbatch_size'],
        class_mode='categorical')

test_generator = test_datagen.flow_from_directory(
        test_dir,
        target_size=config['input_shape'][:-1],
        batch_size=config['tsbatch_size'],
        class_mode='categorical')

#%%
tensorboard = TensorBoard(log_dir=f'./{folder_name}', histogram_freq=0, write_graph=True, write_images=True)
#%%
start = time()
history = model.fit_generator(
      train_generator,
      steps_per_epoch=ceil(train_generator.samples // 
      config['trbatch_size']),
      epochs=config['epochs'],
      validation_data=validation_generator,
      validation_steps=ceil(validation_generator.samples // 
      config['valbatch_size']),
      callbacks=[tensorboard])
print()
print('Train duration:', time()-start)

start = time()
Example #23
0
    if FLAGS.is_tiny:
        model = create_tiny_model(input_shape,
                                  anchors,
                                  num_classes,
                                  freeze_body=2,
                                  weights_path=weights_path)
    else:
        model = create_model(
            input_shape,
            anchors,
            num_classes,
            freeze_body=2,
            weights_path=weights_path)  # make sure you know what you freeze

    log_dir_time = os.path.join(log_dir, "{}".format(int(float(time()))))
    logging = TensorBoard(log_dir=log_dir_time)
    checkpoint = ModelCheckpoint(
        os.path.join(log_dir, "checkpoint.h5"),
        monitor="val_loss",
        save_weights_only=True,
        save_best_only=True,
        period=5,
    )
    reduce_lr = ReduceLROnPlateau(monitor="val_loss",
                                  factor=0.1,
                                  patience=3,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor="val_loss",
                                   min_delta=0,
                                   patience=10,
                                   verbose=1)
Example #24
0
def main(args):
    epochs = args.epochs
    batch_size = args.batch_size

    print("Loading data...")
    X_train, y_train, X_val, y_val, X_test, y_test = ld.load_final_data(args)

    print("X_train shape:", X_train.shape)
    print("y_train shape:", y_train.shape)
    print("X_val shape:", X_val.shape)
    print("y_val shape:", y_val.shape)
    print("X_test shape:", X_test.shape)
    print("y_test shape:", y_test.shape)

    print("Build model...")
    if (args.model_name == 'model1'):
        model = bm.build_model1()
    elif (args.model_name == 'model2'):
        model = bm.build_model2()
    elif (args.model_name == 'model3'):
        model = bm.build_model3()
    elif (args.model_name == 'model4'):
        model = bm.build_model4()
    elif (args.model_name == 'deepcfp'):
        model = bm.build_DeepCFP()

    model.summary()

    print("Model compiling...")
    opt = keras.optimizers.Adam(lr=args.learning_rate)
    model.compile(loss='binary_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    class get_result(Callback):
        def on_epoch_end(self, epoch, logs={}):
            # print('train_loss:',logs.get('loss'))
            # print('train_acc:',logs.get('acc'))
            print('val_loss:', logs.get('val_loss'))
            print('val_acc', logs.get('val_acc'))
            print('')

    result = get_result()

    checkpoint_path = os.path.join(
        args.path, 'weights', args.cell_line,
        args.cell_line + '_' + args.model_name + '.h5df')
    checkpoint = ModelCheckpoint(filepath=checkpoint_path,
                                 save_best_only=True,
                                 save_weights_only=True,
                                 monitor='val_acc',
                                 mode=max)

    tb = TensorBoard(log_dir=os.path.join(
        args.path, 'logs', args.cell_line, args.cell_line + '_' +
        args.model_name))

    callbacks = [result, checkpoint, tb]

    print("Training...")
    history = model.fit(X_train,
                        y_train,
                        epochs=epochs,
                        batch_size=batch_size,
                        validation_data=(X_val, y_val),
                        shuffle=True,
                        verbose=2,
                        callbacks=callbacks)
Example #25
0
model = Sequential()
model.add(Flatten(input_shape=(1, ) + env.observation_space.shape))
model.add(Dense(30))
model.add(Activation('relu'))
model.add(Dense(30))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
# print(model.summary())

memory = SequentialMemory(limit=50000, window_length=1)
policy = LinearAnnealedPolicy(NewEpsGreedyQPolicy(),
                              attr='eps',
                              value_max=1.,
                              value_min=.01,
                              value_test=.05,
                              nb_steps=100000)
tb_callback = TensorBoard(log_dir='./logs/weeklyR4')
test_callback = TestCallback(tb_callback)
callbacks = [tb_callback, test_callback]
dqn = NewDQNAgent(model=model,
                  nb_actions=nb_actions,
                  memory=memory,
                  policy=policy,
                  target_model_update=10000)
dqn.compile(Adam(lr=.001), metrics=['mae'])
dqn.fit(env, nb_steps=1747200, visualize=True, verbose=2, callbacks=callbacks)
# hist = dqn.fit(env, nb_steps=1680, visualize=True, verbose=2, callbacks=[tensorboard])
#print(hist.history.keys())
Example #26
0
def main(job_dir, **args):

    # Reset everything to rerun in jupyter
    tf.reset_default_graph()

    # Setting up the path for saving logs
    logs_path = job_dir + '/logs/'

    # Create the model using Keras functional API
    # input is 2D tensor of L* (lightness) grayscale values
    input_img = Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 1))
    conv_1 = Conv2D(64, (3, 3), activation='relu', padding='same')(input_img)
    conv_1_downsample = Conv2D(64, (3, 3),
                               activation='relu',
                               padding='same',
                               strides=2)(conv_1)
    conv_2 = Conv2D(128, (3, 3), activation='relu',
                    padding='same')(conv_1_downsample)
    conv_2_downsample = Conv2D(128, (3, 3),
                               activation='relu',
                               padding='same',
                               strides=2)(conv_2)
    conv_3 = Conv2D(256, (3, 3), activation='relu',
                    padding='same')(conv_2_downsample)
    conv_3_downsample = Conv2D(256, (3, 3),
                               activation='relu',
                               padding='same',
                               strides=2)(conv_3)
    conv_4 = Conv2D(512, (3, 3), activation='relu',
                    padding='same')(conv_3_downsample)
    conv_5 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv_4)
    conv_6 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv_5)
    conv_6_upsample = UpSampling2D((2, 2))(conv_6)
    conv_7 = Conv2D(64, (3, 3), activation='relu',
                    padding='same')(conv_6_upsample)
    conv_7_upsample = UpSampling2D((2, 2))(conv_7)
    conv_8 = Conv2D(32, (3, 3), activation='relu',
                    padding='same')(conv_7_upsample)
    conv_9 = Conv2D(2, (3, 3), activation='tanh', padding='same')(conv_8)
    conv_9_upsample = UpSampling2D((2, 2))(conv_9)
    model = Model(input_img, conv_9_upsample)

    print "*** Compile Model ***"
    model.compile(optimizer='rmsprop', loss='mse')

    print "*** Get Training Images: ***"

    # Training image URIs provided in a text file on Google Cloud
    image_paths_file = 'gs://mars-vr-3/image_URIs.txt'
    input_file = file_io.FileIO(image_paths_file, mode='r')
    contents = input_file.read()
    paths = contents.split('\n')

    # Read training image data and load it into NumPy array
    X = []
    for filepath in paths:
        if filepath != "":
            file = file_io.FileIO(filepath, mode='r').read()
            X.append(
                np.array(
                    tf.image.decode_jpeg(file).eval(session=tf.Session())))
            print filepath
    X = np.array(X, dtype=float)

    # Scale RGB train data to be -1.0 to 1.0
    split = int(0.95 * len(X))
    Xtrain = X[:split]
    Xtrain = 1.0 / 255 * Xtrain

    datagen = ImageDataGenerator(shear_range=0.2,
                                 zoom_range=0.2,
                                 rotation_range=20,
                                 horizontal_flip=True)

    # Generator function that yields training data (inputs, targets) in lab colourspace
    def image_a_b_gen(batch_size):
        for batch in datagen.flow(Xtrain, batch_size=batch_size):
            lab_batch = rgb2lab(batch)

            # select grayscale layer
            X_batch = lab_batch[:, :, :, 0]

            # select the a (green-red) and b (blue-yellow) layers
            Y_batch = lab_batch[:, :, :, 1:] / 128

            # yield a tuple of (inputs, targets)
            yield (X_batch.reshape(X_batch.shape + (1, )), Y_batch)

    # Print the model summary
    model.summary()

    # Add the callback for TensorBoard and History
    tensorboard = TensorBoard(log_dir=logs_path,
                              histogram_freq=0,
                              write_graph=True,
                              write_images=True)

    print "*** Train Model ***\n"

    # Train the model with TensorFlow
    model.fit_generator(image_a_b_gen(BATCH_SIZE),
                        callbacks=[tensorboard],
                        epochs=EPOCHS,
                        steps_per_epoch=STEPS_PER_EPOCH)

    # Save the model weights
    model_json = model.to_json()
    with open("model.json", "w") as json_file:
        json_file.write(model_json)
    model.save_weights("model.h5")

    # Print the loss value and metrics values for the model in test mode
    Xtest = rgb2lab(1.0 / 255 * X[split:])[:, :, :, 0]
    Xtest = Xtest.reshape(Xtest.shape + (1, ))
    Ytest = rgb2lab(1.0 / 255 * X[split:])[:, :, :, 1:]
    Ytest = Ytest / 128
    print(model.evaluate(Xtest, Ytest, batch_size=BATCH_SIZE))

    # Save model.h5 to specified job directory
    with file_io.FileIO('model.h5', mode='r') as input_f:
        with file_io.FileIO(job_dir + '/model.h5', mode='w+') as output_f:
            output_f.write(input_f.read())

    print "*** Model Saved as model.h5 ***\n"
Example #27
0
def train(batch, epochs, num_classes, size, weights, tclasses):
    """Train the model.

    # Arguments
        batch: Integer, The number of train samples per batch.
        epochs: Integer, The number of train iterations.
        num_classes, Integer, The number of classes of dataset.
        size: Integer, image size.
        weights, String, The pre_trained model weights.
        tclasses, Integer, The number of classes of pre-trained model.
    """

    train_generator, validation_generator, count1, count2 = generate(batch, size)

    train_generator = ImageDataGenerator(
        rescale=1. / 255,
        shear_range=0.2,
        zoom_range=0.2,
        rotation_range=90,
        width_shift_range=0.2,
        height_shift_range=0.2,
        brightness_range=(1, 1.3),
        horizontal_flip=True)
    
    directory="/home/gnss/Desktop/garbage_train"
    train_generator = MixupImageDataGenerator(generator=train_generator,
                                          directory=directory,
                                          batch_size=32,
                                          img_height=224,
                                          img_width=224,
                                          subset='training')

    if weights:
        model = MobileNetv2((size, size, 3), tclasses)
        model = fine_tune(num_classes, weights, model)
        print(num_classes)
    else:
        model = MobileNetv2((size, size, 3), num_classes)
        print(num_classes)

    opt = Adam(1e-2)
    # earlystop = EarlyStopping(monitor='val_acc', patience=30, verbose=0, mode='auto')
    tensorboard = TensorBoard('/home/gnss/Desktop/MobileNetV2/logs',write_images=True)
    # reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5,patience=3, verbose=0, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)

    warmup_epoch = 5
    
    warm_up_lr = WarmUpCosineDecayScheduler(learning_rate_base=1e-2,
                                            total_steps=count1 // batch)

    checkpointer = ModelCheckpoint(filepath='/home/gnss/Desktop/MobileNetV2/mobilenet.h5',verbose=1,save_best_only=True)

    model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])

    

    # lr=XTensorBoard('/home/gnss/Desktop/MobileNetV2/logslr')

    hist = model.fit_generator(
        train_generator,
        validation_data=validation_generator,
        steps_per_epoch=count1 // batch,
        validation_steps=count2 // batch,
        epochs=epochs,
        callbacks=[warm_up_lr,tensorboard,checkpointer])
    '''
    learning_rate_base=1e-2
    total_steps=count1 // batch
    plt.plot(warm_up_lr.learning_rates)
    plt.xlabel('Step', fontsize=20)
    plt.ylabel('lr', fontsize=20)
    plt.axis([0, total_steps, 0, learning_rate_base*1.1])
    plt.xticks(np.arange(0, total_steps, 50))
    plt.grid()
    plt.title('Cosine decay with warmup', fontsize=20)
    plt.show()
    '''
    if not os.path.exists('model'):
        os.makedirs('model')

    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/hist.csv', encoding='utf-8', index=False)
    model.save_weights('model/weights.h5')
def model(train_gen, test_gen):
    '''
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''

    #numpy.random.seed(8994)
    nb_classes = 11
    sequence_length = 5
    img_rows, img_cols, img_depth = 64, 64, 3
    input_shape = (img_rows, img_cols, img_depth)

    nb_epoch = 12
    nb_filters = {{choice([32, 64, 128])}}
    pool_size = (2, 2)
    kernel_size = (3, 3)
    cls_weights = [1., 1., 1., 1., 1., 1., 2*{{uniform(0, 1)}}]
    #optimizer = {{choice(['rmsprop', 'adam', 'sgd', 'adadelta'])}} # TODO: leads to inf loss
    optimizer = 'adadelta'

    main_input = Input(shape=input_shape, dtype='float32', name='main_input')
    shared = Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
                           border_mode='valid', input_shape=input_shape)(main_input)
    shared = Activation('relu')(shared)
    shared = Convolution2D(nb_filters, kernel_size[0], kernel_size[1])(shared)
    shared = Activation('relu')(shared)
    shared = MaxPooling2D(pool_size=pool_size)(shared)

    # Conditional extra convolutional layer
    if conditional({{choice(['two', 'three'])}}) == 'three':
        shared = Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
                               border_mode='valid', input_shape=input_shape)(main_input)
        shared = Activation('relu')(shared)
        shared = MaxPooling2D(pool_size=pool_size)(shared)

    dropout_coef = {{uniform(0, 1)}}
    shared = Dropout(dropout_coef)(shared)
    shared = Flatten()(shared)
    shared = Dense(128)(shared)
    shared = Activation('relu')(shared)
    shared = Dropout(dropout_coef)(shared)

    length_cls = Dense((sequence_length + 1))(shared)  # to account for sequence length + 1
    length_cls = Activation('softmax', name="length_cls")(length_cls)

    # 2. First digit classifier
    first_cls = Dense(nb_classes)(shared)
    first_cls = Activation('softmax', name="first_cls")(first_cls)

    # 3. Second digit classifier
    second_cls = Dense(nb_classes)(shared)
    second_cls = Activation('softmax', name="second_cls")(second_cls)

    # 4. Third digit classifier
    third_cls = Dense(nb_classes)(shared)
    third_cls = Activation('softmax', name="third_cls")(third_cls)

    # 5. Forth digit classifier
    forth_cls = Dense(nb_classes)(shared)
    forth_cls = Activation('softmax', name="forth_cls")(forth_cls)

    # 6. Fifth digit classifier
    fifth_cls = Dense(nb_classes)(shared)
    fifth_cls = Activation('softmax', name="fifth_cls")(fifth_cls)

    #7. Digit boxes coordinates regresssion
    coord_regr = Dense(20, name="coord_regr")(shared)

    # model compilation and training
    model = Model(input=[main_input], output=[length_cls, first_cls, second_cls, third_cls, forth_cls, fifth_cls, coord_regr])

    model.compile(loss={'length_cls': 'categorical_crossentropy', 'first_cls': 'categorical_crossentropy',
                        'second_cls': 'categorical_crossentropy', 'third_cls': 'categorical_crossentropy',
                        'forth_cls': 'categorical_crossentropy', 'fifth_cls': 'categorical_crossentropy',
                        'coord_regr': 'mean_squared_error'},
                  optimizer=optimizer,
                  metrics={'length_cls': 'accuracy', 'first_cls': 'accuracy',
                           'second_cls': 'accuracy', 'third_cls': 'accuracy',
                           'forth_cls': 'accuracy', 'fifth_cls': 'accuracy',
                           'coord_regr': iou_metric_func}, loss_weights=cls_weights)

    directory = "output/" + str(datetime.datetime.now())
    if not os.path.exists(directory):
        os.makedirs(directory)

    checkpointer = ModelCheckpoint(filepath=directory + "/weights.hdf5", verbose=1, save_best_only=True)
    tensorboard = TensorBoard(log_dir=directory, histogram_freq=0, write_graph=True, write_images=False)
    # Todo: P2 add EarlyStopping callback
    # Todo: P3 add LearningRateSchedule callback
    # TODO: P2 batch size are not randomized
    model.fit_generator(train_gen, samples_per_epoch=33401, nb_epoch=nb_epoch, verbose=1, validation_data=test_gen,
                        nb_val_samples=13068, callbacks=[checkpointer, tensorboard])


    score = model.evaluate_generator(test_gen, val_samples=13068)

    print('Test accuracy:', score)
    return {'loss': score[0], 'status': STATUS_OK, 'model': model}
Example #29
0
               str(current_epoch) + '.h5')
    print('(SIGTERM) terminating the process')

    message = job_name + ' checkpoint'
    send_signal.send(args.node, 10002, message)

    sys.exit()


signal.signal(signal.SIGTERM, terminateProcess)

#################################################################################

logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name

tensorboard_callback = TensorBoard(log_dir=logdir)  #, update_freq='batch')

first_epoch_start = 0


class PrintEpoch(keras.callbacks.Callback):
    def on_epoch_begin(self, epoch, logs=None):
        global current_epoch, first_epoch_start
        #remaining_epochs = epochs - epoch
        current_epoch = epoch
        print('current epoch ' + str(current_epoch))
        global epoch_begin_time
        epoch_begin_time = time.time()
        if epoch == starting_epoch and args.resume:
            first_epoch_start = time.time()
            message = job_name + ' d_end'
Example #30
0

if __name__ == '__main__':
    toolkit_file.purge_folder('logs')
    shape = X_dataset.shape
    model = buildModel(shape)

    callback = EarlyStopping(monitor="val_loss",
                             patience=30,
                             verbose=1,
                             mode="auto")
    tbCallBack = TensorBoard(
        log_dir='./logs',  # log 目录
        histogram_freq=1,  # 按照何等频率(epoch)来计算直方图,0为不计算
        #                  batch_size=batch_size,     # 用多大量的数据计算直方图
        write_graph=True,  # 是否存储网络结构图
        write_grads=True,  # 是否可视化梯度直方图
        write_images=True,  # 是否可视化参数
        embeddings_freq=0,
        embeddings_layer_names=None,
        embeddings_metadata=None)

    # model.fit(X_dataset, Y_dataset, epochs=1000, shuffle=True, batch_size=batch_size,
    #           validation_split=0.1, callbacks=[callback, tbCallBack])
    model.fit(X_dataset,
              Y_dataset,
              epochs=1000,
              shuffle=True,
              batch_size=batch_size,
              validation_split=0.1,
              callbacks=[tbCallBack])
    model.save(model_name)