Example #1
0
def main(_argv):
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    for physical_device in physical_devices:
        tf.config.experimental.set_memory_growth(physical_device, True)

    if FLAGS.tiny:
        model = YoloV3Tiny(FLAGS.size,
                           training=True,
                           classes=FLAGS.num_classes)
        anchors = yolo_tiny_anchors
        anchor_masks = yolo_tiny_anchor_masks
    else:
        model = YoloV3(FLAGS.size, training=True, classes=FLAGS.num_classes)
        anchors = yolo_anchors
        anchor_masks = yolo_anchor_masks

    post_process_outputs = post_process_block(model.outputs,
                                              classes=FLAGS.num_classes)
    post_process_model = Model(model.inputs, post_process_outputs)

    train_dataset = dataset.load_fake_dataset()
    if FLAGS.dataset:
        train_dataset = dataset.load_tfrecord_dataset(FLAGS.dataset,
                                                      FLAGS.classes,
                                                      FLAGS.size)
    train_dataset = train_dataset.shuffle(buffer_size=512)
    train_dataset = train_dataset.batch(FLAGS.batch_size)
    train_dataset = train_dataset.map(
        lambda x, y: (dataset.transform_images(x, FLAGS.size), y))
    # dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))
    train_dataset = train_dataset.prefetch(
        buffer_size=tf.data.experimental.AUTOTUNE)

    val_dataset = dataset.load_fake_dataset()
    if FLAGS.val_dataset:
        val_dataset = dataset.load_tfrecord_dataset(FLAGS.val_dataset,
                                                    FLAGS.classes, FLAGS.size)
    val_dataset = val_dataset.batch(FLAGS.batch_size)
    val_dataset = val_dataset.map(lambda x, y:
                                  (dataset.transform_images(x, FLAGS.size), y))
    # dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))

    # Configure the model for transfer learning
    if FLAGS.transfer == 'none':
        pass  # Nothing to do
    elif FLAGS.transfer in ['darknet', 'no_output']:
        # Darknet transfer is a special case that works
        # with incompatible number of classes

        # reset top layers
        if FLAGS.tiny:
            model_pretrained = YoloV3Tiny(FLAGS.size,
                                          training=True,
                                          classes=FLAGS.weights_num_classes
                                          or FLAGS.num_classes)
        else:
            model_pretrained = YoloV3(FLAGS.size,
                                      training=True,
                                      classes=FLAGS.weights_num_classes
                                      or FLAGS.num_classes)
        model_pretrained.load_weights(FLAGS.weights)

        if FLAGS.transfer == 'darknet':
            model.get_layer('yolo_darknet').set_weights(
                model_pretrained.get_layer('yolo_darknet').get_weights())
            freeze_all(model.get_layer('yolo_darknet'))

        elif FLAGS.transfer == 'no_output':
            for l in model.layers:
                if not l.name.startswith('yolo_output'):
                    l.set_weights(
                        model_pretrained.get_layer(l.name).get_weights())
                    freeze_all(l)
    else:
        # All other transfer require matching classes
        model.load_weights(FLAGS.weights)
        if FLAGS.transfer == 'fine_tune':
            # freeze darknet and fine tune other layers
            darknet = model.get_layer('yolo_darknet')
            freeze_all(darknet)
        elif FLAGS.transfer == 'frozen':
            # freeze everything
            freeze_all(model)

    optimizer = tf.keras.optimizers.Adam(lr=FLAGS.learning_rate)
    loss = [
        YoloLoss(anchors[mask], classes=FLAGS.num_classes)
        for mask in anchor_masks
    ]

    # (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...cls))
    # model.outputs shape: [[N, 13, 13, 3, 85], [N, 26, 26, 3, 85], [N, 52, 52, 3, 85]]
    # labels shape: ([N, 13, 13, 3, 6], [N, 26, 26, 3, 6], [N, 52, 52, 3, 6])
    if FLAGS.mode == 'eager_tf':
        # Eager mode is great for debugging
        # Non eager graph mode is recommended for real training
        avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)
        avg_val_loss = tf.keras.metrics.Mean('val_loss', dtype=tf.float32)

        for epoch in range(1, FLAGS.epochs + 1):
            for batch, (images, labels) in enumerate(train_dataset):
                with tf.GradientTape() as tape:
                    outputs = model(images, training=True)
                    regularization_loss = tf.reduce_sum(model.losses)
                    pred_loss = []
                    transf_labels = dataset.transform_targets(
                        labels, anchors, anchor_masks, FLAGS.size)
                    for output, label, loss_fn in zip(outputs, transf_labels,
                                                      loss):
                        pred_loss.append(loss_fn(label, output))
                    total_loss = tf.reduce_sum(pred_loss,
                                               axis=None) + regularization_loss

                grads = tape.gradient(total_loss, model.trainable_variables)
                optimizer.apply_gradients(zip(grads,
                                              model.trainable_variables))

                log_batch(logging, epoch, batch, total_loss, pred_loss)
                avg_loss.update_state(total_loss)

                if batch >= 100:
                    break

            true_pos_total = np.zeros(FLAGS.num_classes)
            false_pos_total = np.zeros(FLAGS.num_classes)
            n_pos_total = np.zeros(FLAGS.num_classes)
            for batch, (images, labels) in enumerate(val_dataset):
                # get losses
                outputs = model(images)
                regularization_loss = tf.reduce_sum(model.losses)
                pred_loss = []
                transf_labels = dataset.transform_targets(
                    labels, anchors, anchor_masks, FLAGS.size)
                for output, label, loss_fn in zip(outputs, transf_labels,
                                                  loss):
                    pred_loss.append(loss_fn(label, output))
                total_loss = tf.reduce_sum(pred_loss) + regularization_loss
                log_batch(logging, epoch, batch, total_loss, pred_loss)
                avg_val_loss.update_state(total_loss)

                # get true positives, false positives, and positive labels
                preds = post_process_model(images)
                true_pos, false_pos, n_pos = batch_true_false_positives(
                    preds.numpy(), labels.numpy(), FLAGS.num_classes)
                true_pos_total += true_pos
                false_pos_total += false_pos
                n_pos_total += n_pos

                if batch >= 20:
                    break

            # precision-recall by class
            precision, recall = batch_precision_recall(true_pos_total,
                                                       false_pos_total,
                                                       n_pos_total)
            for c in range(FLAGS.num_classes):
                print('Class {} - Prec: {}, Rec: {}'.format(
                    c, precision[c], recall[c]))
            # total precision-recall
            print('Total - Prec: {}, Rec: {}'.format(
                calc_precision(np.sum(true_pos_total),
                               np.sum(false_pos_total)),
                calc_recall(np.sum(true_pos_total), np.sum(n_pos_total))))
            import pdb
            pdb.set_trace()

            # log losses
            logging.info("{}, train: {}, val: {}".format(
                epoch,
                avg_loss.result().numpy(),
                avg_val_loss.result().numpy()))

            # reset loop and save weights
            avg_loss.reset_states()
            avg_val_loss.reset_states()
            model.save_weights(
                os.path.join(FLAGS.checkpoint_dir, 'yolov3_train_{}.tf'\
                    .format(epoch)))
    else:
        model.compile(optimizer=optimizer,
                      loss=loss,
                      run_eagerly=(FLAGS.mode == 'eager_fit'))

        callbacks = [
            ReduceLROnPlateau(verbose=1),
            EarlyStopping(patience=3, verbose=1),
            ModelCheckpoint(os.path.join(FLAGS.checkpoint_dir,
                                         'yolov3_train_{epoch}.tf'),
                            verbose=1,
                            save_weights_only=True),
            TensorBoard(log_dir=FLAGS.log_dir)
        ]

        history = model.fit(train_dataset,
                            epochs=FLAGS.epochs,
                            callbacks=callbacks,
                            validation_data=val_dataset)
Example #2
0
        model = ki.RT_training_net(id_dim=args.iden_dimension, loop_constant=2)

        file = 'RT_data_dim=' + str(args.iden_dimension**3) + '.npy'

        data1 = np.load(file)[:50000]

        model.compile(
            optimizer=tf.keras.optimizers.Adam(lr=args.learning_rate),
            loss=ki.RT_loss(args.iden_dimension))

        model_name = "knot_model_dim=" + str(args.iden_dimension) + ".h5"

        checkpoint = ModelCheckpoint(model_name,
                                     monitor='loss',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='min')

        callbacks_list = [checkpoint]

        history = model.fit(data1,
                            data1[:, :args.iden_dimension *
                                  args.iden_dimension],
                            batch_size=args.batch_size,
                            epochs=args.epoch,
                            shuffle=True,
                            verbose=1,
                            callbacks=callbacks_list)

    else:
model = Sequential()
model.add(Dense(6 * o, input_dim=d, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(4 * o, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(2 * o, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(o, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['acc'])

cp = ModelCheckpoint('model_dense_softmax.hdf5',
                     monitor='val_acc',
                     verbose=1,
                     save_best_only=True)

history = model.fit(x_train,
                    y_train,
                    epochs=10,
                    batch_size=16,
                    validation_data=(x_val, y_val),
                    callbacks=[cp])

#score = model.evaluate(x_test, y_test, batch_size=32)


def predic(probabilities):
    y = np.zeros(len(probabilities))
    y[np.argmax(probabilities)] += 1
Example #4
0
matrix = vectorizer.fit_transform(corpus)
# x = tfidf_data
x = matrix.toarray()
y = df3.iloc[:,1].values

#
#
#    
#
#
#

x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.25, random_state=42)
# first hidden last has to have input matching the dimension of a row in x

checkpointer = ModelCheckpoint(filepath="dnn/best_weights.hdf5", verbose=0, save_best_only=True)

# for loop to re-initialize models to avoid local optimum

for i in range(5):
    model = Sequential()
    model.add(Dense(25, input_dim = x.shape[1], activation='relu'))
    model.add(Dense(10, activation='relu'))
    model.add(Dense(1))
    model.compile(loss='mean_squared_error', optimizer='adam')
    monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=2, verbose=2, mode='auto')
    model.fit(x_train, y_train, validation_data=(x_test, y_test), callbacks=[monitor, checkpointer], verbose=2, epochs=10)


# now to 
pred = model.predict(x_test)
Example #5
0
    #Resize image to match training data
    img = resize(img, (IMG_HEIGHT, IMG_WIDTH),
                 mode='constant',
                 preserve_range=True)

    #Append image to numpy array for test dataset
    X_test[n] = img

print('Done!')

# train the model
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
output = u_net_output(inputs)

model = Model(inputs=[inputs], outputs=[output])
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])
model.summary()

# Fit model
earlystopper = EarlyStopping(patience=15, verbose=1)
checkpointer = ModelCheckpoint('model_unet_checkpoint.h5',
                               verbose=1,
                               save_best_only=True)
results = model.fit(X_train,
                    Y_train,
                    validation_split=0.1,
                    batch_size=16,
                    epochs=100,
                    callbacks=[earlystopper, checkpointer])
Example #6
0
    TimeDistributed(
        Conv2D(1, (3, 3), activation='relu', padding='same', strides=(1, 1))))
CNN.add(TimeDistributed(Dropout(0.4)))
CNN.add(TimeDistributed(MaxPool2D(pool_size=(3, 3), strides=(2, 2))))
CNN.add(TimeDistributed(Dropout(0.5)))
CNN.add(TimeDistributed(Flatten()))
CNN.add(LSTM(128, return_sequences=False))
CNN.add(Dense(2, activation='softmax'))
CNN.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
CNN.summary()

CNN.save('ConvLSTM_improved_pen.h5')

callback_list = [
    ModelCheckpoint(filepath='ConvLSTM_improved_pen_checkpoint.h5',
                    monitor='val_loss',
                    save_best_only=True),
    TensorBoard(log_dir="logs".format(time.asctime()))
]
history = CNN.fit(X_train,
                  y_train,
                  batch_size=batch,
                  epochs=epoch,
                  shuffle=True,
                  validation_data=(X_valid, y_valid),
                  callbacks=callback_list)

epochs = np.arange(1, epoch + 1)
plt.plot(epochs, history.history['loss'])
plt.plot(epochs, history.history['val_loss'])
plt.xlabel('epochs')
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'colorized_ae_model.{epoch:03d}.h5'
if not os.path.isdir(save_dir):
    os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)

# reduce learning rate by sqrt(0.1) if the loss does not improve in 5 epochs
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                               cooldown=0,
                               patience=5,
                               verbose=1,
                               min_lr=0.5e-6)

# save weights for future use (e.g. reload parameters w/o training)
checkpoint = ModelCheckpoint(filepath=filepath,
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True)

# Mean Square Error (MSE) loss function, Adam optimizer
autoencoder.compile(loss='mse', optimizer='adam')

# called every epoch
callbacks = [lr_reducer, checkpoint]

# train the autoencoder
autoencoder.fit(x_train_gray,
                x_train,
                validation_data=(x_test_gray, x_test),
                epochs=30,
                batch_size=batch_size,
                callbacks=callbacks)
Example #8
0
def main(args):
    # Fixed parameters
    age_classes = 101
    gender_classes = 1

    data_dir = args.data_dir
    dataset = args.dataset
    model_type = args.model
    image_size = args.image_size
    batch_size = args.batch_size
    epochs = args.epochs
    lr = args.lr

    # Data preparation
    ((train_paths, train_ages, train_genders),
     (val_paths, val_ages, val_genders)) = utils.load_data(data_dir, dataset)

    num_train = len(train_paths)
    num_val = len(val_paths)
    print(f'Model type: {model_type}')
    print(f'Number of training examples: {len(train_paths)}')
    print(f'Number of validation examples: {len(val_paths)}')

    train_data = tf.data.Dataset.from_tensor_slices(
        (train_paths, train_ages, train_genders))
    train_data = train_data.shuffle(1000) \
        .map(lambda x, y, z: parse_fn(x, y, z, image_size),
             num_parallel_calls=tf.data.experimental.AUTOTUNE) \
        .batch(batch_size) \
        .repeat() \
        .prefetch(tf.data.experimental.AUTOTUNE)

    val_data = tf.data.Dataset.from_tensor_slices(
        (val_paths, val_ages, val_genders))
    val_data = val_data.map(
        lambda x, y, z: parse_fn(x, y, z, image_size, False),
        num_parallel_calls=tf.data.experimental.AUTOTUNE) \
        .batch(batch_size) \
        .prefetch(tf.data.experimental.AUTOTUNE)

    # from matplotlib import pyplot as plt
    # for batch in train_data.take(1):
    #     pass
    #
    # images = batch[0]
    # aug_images = batch[1]
    # ages = batch[2]['age']
    # for image, aug_image, age in zip(images[:10], aug_images[:10], ages[:10]):
    #     image = image.numpy() * 255.
    #     image = image.astype('uint8')
    #     aug_image = aug_image.numpy() * 255.
    #     aug_image = aug_image.astype('uint8')
    #     print(np.argmax(age.numpy()))
    #
    #     plt.subplot(121)
    #     plt.imshow(image)
    #     plt.subplot(122)
    #     plt.imshow(aug_image)
    #     plt.show()

    # Build the model
    input_shape = (image_size, image_size, 3)
    base_model_fn = getattr(sys.modules['models'], model_type)
    base_model = base_model_fn(input_shape, include_top=False)

    age_out = layers.Dense(units=age_classes, activation='softmax',
                           name='age')(base_model.output)
    gender_out = layers.Dense(units=gender_classes,
                              activation='sigmoid',
                              name='gender')(base_model.output)
    model = Model(base_model.input, [age_out, gender_out])

    opt = Adam(learning_rate=lr)
    opt = tf.train.experimental.enable_mixed_precision_graph_rewrite(opt)
    model.compile(loss={
        'age': 'categorical_crossentropy',
        'gender': 'binary_crossentropy'
    },
                  optimizer=opt,
                  metrics={
                      'age': mae,
                      'gender': 'acc'
                  })
    model.summary()

    # Prepare model saving directory.
    save_dir = os.path.join(os.getcwd(), args.save_dir)
    model_name = 'age_gender_%s.{epoch:03d}.{val_loss:.4f}.h5' % model_type
    if not os.path.isdir(save_dir):
        os.makedirs(save_dir)
    filepath = os.path.join(save_dir, model_name)

    # Prepare callbacks for saving the model and learning rate schedule
    checkpoint = ModelCheckpoint(filepath=filepath,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True)

    lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                                   cooldown=0,
                                   patience=3,
                                   verbose=1,
                                   min_lr=0.5e-6)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   mode='auto',
                                   patience=5,
                                   verbose=1,
                                   restore_best_weights=True)
    callbacks = [checkpoint, lr_reducer, early_stopping]

    # Train the model
    hist = model.fit(train_data,
                     steps_per_epoch=num_train // batch_size,
                     validation_data=val_data,
                     validation_steps=num_val // batch_size,
                     epochs=epochs,
                     callbacks=callbacks)

    if args.save_history:
        history_path = os.path.join(save_dir, 'history.npy')
        np.save(history_path, hist.history)
    def train(self,
              batch_size,
              num_epochs,
              steps_per_epoch=None,
              lr_decay_type='plateau',
              init_learn_rate=None,
              verbose=0,
              data_grouper=None,
              additional_callbacks=[],
              val_batch_size=64,
              hook_tensorbord=True,
              chkpt_monitor=('val_loss', 'auto'),
              prefetch_data=False):
        """

        :param batch_size:
        :param num_epochs:
        :param steps_per_epoch:
        :param lr_decay_type: 'interval' or 'plateau'
        :return:
        """

        assert lr_decay_type == 'plateau' or lr_decay_type == 'interval', 'invalid option for lr_decay_type'

        ds_train_ = BatchData(self.train_ds, batch_size, remainder=False)
        if (steps_per_epoch is None):
            steps_per_epoch = ds_train_.size()

        if (data_grouper is not None):
            ds_train_ = data_grouper(ds_train_)

        #for parallel loading
        if (prefetch_data):
            ds_train_ = MultiProcessRunnerZMQ(ds_train_, num_proc=15)
        #ds_train_ = BatchData(ds_train_, 256)

        ds_train_ = RepeatedData(ds_train_, -1)

        ds_train_.reset_state()
        batcher_train = ds_train_.get_data()

        ds_val_ = BatchData(self.val_ds, val_batch_size, remainder=True)
        if (data_grouper is not None):
            ds_val_ = data_grouper(ds_val_)

        # ds_val_ = FixedSizeData(ds_val_ , ds_val_.size()/1) #only evaluate on the first 50% of the data
        val_steps = ds_val_.size()
        ds_val_ = RepeatedData(ds_val_, -1)

        ds_val_.reset_state()
        batcher_val = ds_val_.get_data()
        # val_steps =  20#ds_val_.size()/2  # only evaluate on 50% of data

        if (init_learn_rate is not None):
            K.set_value(self.model.model_train.optimizer.lr, init_learn_rate)

        print("Training with:  ")
        print('    nepochs', num_epochs)
        print('    number of iterations/epoch', steps_per_epoch)

        print('lr before for loop',
              K.get_value(self.model.model_train.optimizer.lr))

        if (lr_decay_type == 'plateau'):
            reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                          mode='auto',
                                          factor=0.25,
                                          patience=5,
                                          min_lr=1e-6)
        else:
            reduce_lr = get_interval_lrscheduler_callback(
                self.model.model_train, epoch_interval=18, factor=0.1)

        if (not os.path.exists(self.model_save_dir)):
            os.mkdir(self.model_save_dir)

        model_filepath = os.path.join(self.model_save_dir,
                                      self.prefix + '.hd5')

        monitor, mode = chkpt_monitor
        if (self.model.multigpu_train):
            model_checkpoint = CustomModelCheckpointCallback(
                model_filepath,
                self.model.model_main,
                monitor=monitor,
                verbose=1,
                save_best_only=True,
                save_weights_only=True,
                mode=mode,
                period=1)
        else:
            model_checkpoint = ModelCheckpoint(model_filepath,
                                               monitor=monitor,
                                               verbose=1,
                                               save_best_only=True,
                                               save_weights_only=True,
                                               mode=mode,
                                               period=1)

        lr_printer = LearningRatePrinter()

        tensor_board = TensorBoard(log_dir=self.model_save_dir)

        callbacks = [reduce_lr, lr_printer, model_checkpoint]

        callbacks.extend(additional_callbacks)
        if (hook_tensorbord):
            callbacks.append(tensor_board)

        self.save_model_json()

        def new_batcher(b):
            for d in b:
                yield tuple(d)  #(d[0], d[1])

        tfv = tf.__version__.split('.')[0]
        if (tfv == '1'):
            self.model.model_train.fit_generator(
                new_batcher(batcher_train),
                steps_per_epoch=steps_per_epoch,
                epochs=num_epochs,
                verbose=verbose,
                callbacks=callbacks,
                validation_data=batcher_val,
                validation_steps=val_steps)
        else:

            #for tensorflow 2.0.2 and greater
            #does not work for versions < 2.0.2 and >=1.0.x
            self.model.model_train.fit(
                new_batcher(batcher_train),
                steps_per_epoch=steps_per_epoch,
                epochs=num_epochs,
                verbose=verbose,
                callbacks=callbacks,
                validation_data=new_batcher(batcher_val),
                validation_steps=val_steps)
Example #10
0
        lr *= 1e-1

    print('Learning rate: ', lr)

    return lr


LRScheduler = LearningRateScheduler(lrSchedule)

# Create checkpoint for the training
# This checkpoint performs model saving when
# an epoch gives highest testing accuracy
filepath = modelname + ".hdf5"
checkpoint = ModelCheckpoint(filepath,
                             monitor='val_acc',
                             verbose=0,
                             save_best_only=True,
                             mode='max')

# Log the epoch detail into csv
csv_logger = CSVLogger(modelname + '.csv')
callbacks_list = [checkpoint, csv_logger, LRScheduler]

# Fit the model
datagen = ImageDataGenerator(width_shift_range=0.1,
                             height_shift_range=0.1,
                             rotation_range=20,
                             horizontal_flip=True,
                             vertical_flip=False)

model.fit_generator(
Example #11
0
def _main(args):
    # todo 训练数据
    annotation_train_file = args.train_file
    assert os.path.exists(annotation_train_file), "train file {} is not exists".format(annotation_train_file)
    annotation_val_file = args.val_file
    assert os.path.exists(annotation_val_file), "val file {} is not exists".format(annotation_val_file)
    model_file = args.model_file
    assert model_file.endswith('.h5'), '{} is not a .cfg file'.format(model_file)
    # 验证日志目录
    logs_path = args.logs_path
    if not os.path.exists(logs_path):
        os.mkdir(logs_path)

    # todo classes and anchors 数据
    classes_file = args.classes_file
    assert os.path.exists(classes_file), "classes file {} is not exists".format(classes_file)
    anchors_file = args.anchors_file
    assert os.path.exists(anchors_file), "anchor file {} is not exists".format(anchors_file)

    batch_size = int(args.batch_size)
    if batch_size <= 0:
        batch_size = 128

    epochs = int(args.epochs)
    if epochs <= 0:
        epochs = 100

    # todo 加载 class and anchors
    class_names = get_classes(classes_file)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_file)

    # todo model
    # multiple of 32, hw
    input_shape = (416, 416)

    # todo train
    model = create_model(input_shape, anchors, num_classes, freeze_body=2,
                         weights_path=model_file)
    logging = TensorBoard(log_dir=logs_path)
    checkpoint = ModelCheckpoint(logs_path + "{epoch:02d}.h5",
                                 monitor="val_loss",
                                 save_weights_only=True, save_best_only=True, period=3)
    # 更改学习率策略
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=15, verbose=1)

    # 加载数据
    # todo 加载数据
    train_sequence = SequenceData(annotation_train_file, input_shape, anchors, num_classes, batch_size)
    val_sequence = SequenceData(annotation_val_file, input_shape, anchors, num_classes, batch_size)

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    if True:
        model.compile(optimizer=Adam(lr=1e-3), loss={'yolo_loss': lambda y_true, y_pred: y_pred})
        model.summary()
        model.fit(
            train_sequence,
            batch_size=batch_size,
            epochs=epochs,
            callbacks=[logging, checkpoint],
            validation_data=val_sequence,
            initial_epoch=0,
            steps_per_epoch=train_sequence.get_epochs(),
            validation_steps=val_sequence.get_epochs(),
            # validation_batch_size=batch_size,
            max_queue_size=20,
            workers=4)

        model.save_weights(logs_path + 'trained_weights_stage_1.h5')
        model.save(logs_path + "yolov3-model-1.h5")

    if True:
        for i in range(int(len(model.layers)/2)):
            model.layers[i].trainable = True
        model.compile(optimizer=Adam(lr=1e-4),
                      loss={'yolo_loss': lambda y_true, y_pred: y_pred})  # recompile to apply the change
        print('Unfreeze all of the layers.')

        model.fit(
            train_sequence,
            batch_size=batch_size,
            epochs=epochs*3,
            callbacks=[logging, checkpoint, reduce_lr, early_stopping],
            validation_data=val_sequence,
            initial_epoch=epochs,
            steps_per_epoch=train_sequence.get_epochs(),
            validation_steps=val_sequence.get_epochs(),
            #validation_batch_size=batch_size,
            max_queue_size=20,
            workers=4)

        model.save_weights(logs_path + 'trained_weights_Unfreeze.h5')
        model.save(logs_path + "yolov3-model-2.h5")
Example #12
0
                y_train = y_train[0:-train_trailing_samples]
            if test_trailing_samples != 0:
                X_test_ = X_test_[0:-test_trailing_samples]
                y_test_one_hot = y_test_one_hot[0:-test_trailing_samples]
                y_test = y_test[0:-test_trailing_samples]

            print(y_train.shape, y_test.shape)

            rnn_model = model(x_train = X_train_, num_labels = NUM_LABELS, LSTM_units = LSTM_UNITS, \
                dropout = DROPOUT, num_conv_filters = CNN_FILTERS, batch_size = BATCH_SIZE)

            model_filename = SAVE_DIR + '/best_model_baseline_' + str(
                DATA_FILE[0:-4]) + '_fold_' + str(i) + '.h5'
            callbacks = [
                ModelCheckpoint(filepath=model_filename,
                                monitor='val_acc',
                                save_weights_only=True,
                                save_best_only=True),
                EarlyStopping(monitor='val_acc', patience=PATIENCE)
            ]  #, LearningRateScheduler()]

            opt = optimizers.Adam(clipnorm=1.)

            rnn_model.compile(optimizer=opt,
                              loss='categorical_crossentropy',
                              metrics=['accuracy'])

            history = rnn_model.fit(X_train_,
                                    y_train_one_hot,
                                    epochs=EPOCH,
                                    batch_size=BATCH_SIZE,
                                    verbose=1,
Example #13
0
def train(input_ckpt,output_ckpt,tfrec_dir,tboard_dir,input_height,input_width, \
          input_chan,batchsize,epochs,learnrate,target_acc):
    '''
    tf.data pipelines
    '''
    # train and test folders
    train_dataset = input_fn_trn(tfrec_dir, batchsize)
    test_dataset = input_fn_test(tfrec_dir, batchsize)
    '''
    Call backs
    '''
    tb_call = TensorBoard(log_dir=tboard_dir)

    chkpt_call = ModelCheckpoint(filepath=output_ckpt,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 save_weights_only=True)

    early_stop_call = EarlyStoponAcc(target_acc)

    callbacks_list = [tb_call, chkpt_call, early_stop_call]

    # if required, tf.set_pruning_mode must be set before defining the model
    if (input_ckpt != ''):
        tf.set_pruning_mode()
    '''
    Define the model
    '''
    model = mobilenetv2(input_shape=(input_height, input_width, input_chan),
                        classes=2,
                        alpha=1.0,
                        incl_softmax=False)
    '''
    Compile model
    Adam optimizer to change weights & biases
    Loss function is categorical crossentropy
    '''
    model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=learnrate),
                  loss=SparseCategoricalCrossentropy(from_logits=True),
                  metrics=['accuracy'])
    '''
    If an input checkpoint is specified then assume we are fine-tuning a pruned model,
    so load the weights into the model, otherwise we are training from scratch
    '''
    if (input_ckpt != ''):
        print('Loading checkpoint - fine-tuning from', input_ckpt)
        model.load_weights(input_ckpt)
    else:
        print('Training from scratch..')

        print('\n' + DIVIDER)
        print(' Model Summary')
        print(DIVIDER)
        print(model.summary())
        print("Model Inputs: {ips}".format(ips=(model.inputs)))
        print("Model Outputs: {ops}".format(ops=(model.outputs)))
    '''
    Training
    '''
    print('\n' + DIVIDER)
    print(' Training model with training set..')
    print(DIVIDER)

    # make folder for saving trained model checkpoint
    os.makedirs(os.path.dirname(output_ckpt), exist_ok=True)

    # run training
    train_history = model.fit(train_dataset,
                              epochs=epochs,
                              steps_per_epoch=20000 // batchsize,
                              validation_data=test_dataset,
                              validation_steps=5000 // batchsize,
                              callbacks=callbacks_list,
                              verbose=1)
    '''
    save just the model architecture (no weights) to a JSON file
    '''
    with open(os.path.join(os.path.dirname(output_ckpt), 'baseline_arch.json'),
              'w') as f:
        f.write(model.to_json())

    print(
        "\nTensorBoard can be opened with the command: tensorboard --logdir={dir} --host localhost --port 6006"
        .format(dir=tboard_dir))

    return
Example #14
0
def main(args):

    mpi = False
    if 'sourcedir.tar.gz' in args.tensorboard_dir:
        tensorboard_dir = re.sub('source/sourcedir.tar.gz', 'model',
                                 args.tensorboard_dir)
    else:
        tensorboard_dir = args.tensorboard_dir
    logging.info("Writing TensorBoard logs to {}".format(tensorboard_dir))
    if 'sagemaker_mpi_enabled' in args.fw_params:
        if args.fw_params['sagemaker_mpi_enabled']:
            import horovod.tensorflow.keras as hvd
            mpi = True
            # Horovod: initialize Horovod.
            hvd.init()

            # Horovod: pin GPU to be used to process local rank (one GPU per process)
            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            config.gpu_options.visible_device_list = str(hvd.local_rank())
            K.set_session(tf.Session(config=config))
    else:
        hvd = None

    logging.info("Running with MPI={}".format(mpi))
    logging.info("getting data")
    train_dataset = train_input_fn()
    eval_dataset = eval_input_fn()
    validation_dataset = validation_input_fn()

    logging.info("configuring model")
    model = keras_model_fn(args.learning_rate, args.weight_decay,
                           args.optimizer, args.momentum, mpi, hvd)
    callbacks = []
    if mpi:
        callbacks.append(hvd.callbacks.BroadcastGlobalVariablesCallback(0))
        callbacks.append(hvd.callbacks.MetricAverageCallback())
        callbacks.append(
            hvd.callbacks.LearningRateWarmupCallback(warmup_epochs=5,
                                                     verbose=1))
        callbacks.append(
            tf.keras.callbacks.ReduceLROnPlateau(patience=10, verbose=1))
        if hvd.rank() == 0:
            callbacks.append(
                ModelCheckpoint(args.output_dir + '/checkpoint-{epoch}.h5'))
            callbacks.append(
                CustomTensorBoardCallback(log_dir=tensorboard_dir))
    else:
        callbacks.append(
            ModelCheckpoint(args.output_dir + '/checkpoint-{epoch}.h5'))
        callbacks.append(CustomTensorBoardCallback(log_dir=tensorboard_dir))
    logging.info("Starting training")
    size = 1
    if mpi:
        size = hvd.size()
    model.fit(
        x=train_dataset[0],
        y=train_dataset[1],
        steps_per_epoch=(num_examples_per_epoch('train') // args.batch_size) //
        size,
        epochs=args.epochs,
        validation_data=validation_dataset,
        validation_steps=(num_examples_per_epoch('validation') //
                          args.batch_size) // size,
        callbacks=callbacks)

    score = model.evaluate(eval_dataset[0],
                           eval_dataset[1],
                           steps=num_examples_per_epoch('eval') //
                           args.batch_size,
                           verbose=0)

    logging.info('Test loss:{}'.format(score[0]))
    logging.info('Test accuracy:{}'.format(score[1]))

    # Horovod: Save model only on worker 0 (i.e. master)
    if mpi:
        if hvd.rank() == 0:
            return save_model(model, args.model_output_dir)
    else:
        return save_model(model, args.model_output_dir)
Example #15
0
#save_graph_json(model1, project_paths["weights"] + "/reg_model.json")

checkpoint_path = project_paths["checkpoints"] + "/weights_epoch-{epoch}.ckpt"

project_paths = get_project_paths(sys.argv[0], to_tmp=False)

history1 = model1.fit(x_train,
                      x_train,
                      epochs=epochs - skip_epoch,
                      shuffle=True,
                      validation_data=(x_test, x_test),
                      callbacks=[
                          csv_logger,
                          ModelCheckpoint(filepath=checkpoint_path,
                                          save_weights_only=False,
                                          period=1,
                                          weight_pred_ind=1,
                                          skip_info=skip_info)
                      ])

model1.summary()
predicted_images_reg = model1(x_test)
print("<-------------------Reinitializing the Orig model ----------------->")
logs2 = project_paths["weights"] + "/orig_model_history_log.csv"
csv_logger2 = CSVLogger(logs2, append=True)
load_model = project_paths["checkpoints"] + "/weights_epoch-" + str(
    skip_from
) + ".ckpt"  # -1 is adjusted to pick the checkpoint just before reg , the epoch counter starts from 0 is adjusted with ckpt stored from 1
print("Loading model ")
print(load_model)
model2 = tf.keras.models.load_model(load_model)
        if (swap_dim):
            input_dim = (sample_length, 2)
        else:
            input_dim = (2, sample_length)

        # load data
        filename = 'pkl_data/' + str(sample_length) + '.pkl'
        x_train, y_train, x_val, y_val, x_test, y_test, val_SNRs, test_SNRs = utils.radioml_IQ_data(
            filename, mod_name, swap_dim=swap_dim)

        # callbacks
        early_stopping = EarlyStopping(monitor='val_loss', patience=patience)
        best_model_path = 'result/models/LSTM/' + str(
            sample_length) + '/' + str(mod_name) + 'best.h5'
        checkpointer = ModelCheckpoint(best_model_path,
                                       verbose=1,
                                       save_best_only=True)
        TB_dir = 'result/TB/' + str(mod_name) + '_' + str(sample_length)
        tensorboard = TensorBoard(TB_dir)

        model = utils.lstm(lr, input_dim)

        history = model.fit(
            x_train,
            y_train,
            epochs=max_epoch,
            batch_size=batch_size,
            verbose=1,
            shuffle=True,
            validation_data=(x_val, y_val),
            callbacks=[early_stopping, checkpointer, tensorboard])
# checkpoint
checkpoint_path = 'C:/cpi_image_test2/model/checkpoint_Inception-based model.ckpt'

from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau

patient = 5
callback_list = [
    ReduceLROnPlateau(moniter='val_loss',
                      factor=0.1,
                      patience=patient,
                      min_lr=0.00001,
                      mode='auto'),
    ModelCheckpoint(checkpoint_path,
                    monitor='val_loss',
                    verbose=1,
                    mode='auto',
                    save_weights_only=True,
                    save_best_only=True),
    EarlyStopping(monitor='val_loss', patience=patient)
]

model.fit(X_train,
          y_train,
          batch_size=64,
          epochs=40,
          validation_split=0.15,
          callbacks=callback_list)

model.load_weights(checkpoint_path)

print("정확도 : %.2f " % (model.evaluate(X_test, y_test)[1]))
Example #18
0
NN_model.add(Dense(32, kernel_initializer='normal', activation='relu'))
NN_model.add(Dense(32, kernel_initializer='normal', activation='relu'))
NN_model.add(Dense(32, kernel_initializer='normal', activation='relu'))

# The Output Layer :
# TODO: add S_LEN here too
NN_model.add(Dense(1125, kernel_initializer='normal', activation='linear'))

# Compile the network :
NN_model.compile(loss='mean_absolute_error',
                 optimizer='adam',
                 metrics=['mean_absolute_error'])
NN_model.summary()

# CallBacks:
checkpoint_name = 'Weights-{epoch:03d}--{val_loss:.5f}.hdf5'
checkpoint = ModelCheckpoint(checkpoint_name,
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True,
                             mode='auto')
callbacks_list = [checkpoint]

# Learn CNN:
NN_model.fit(train,
             target,
             epochs=500,
             batch_size=32,
             validation_split=0.2,
             callbacks=callbacks_list)
from tensorflow.keras import regularizers
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau


def bird_model1():
   input1 = Input(shape=(data1.shape[1], data1.shape[2],))
#   conv1 = Conv2D()
   gru1 = GRU(64, return_sequences= True)(input1)
   gru2 = GRU(64, return_sequences=False)(gru1)
#   flatten1 = Flatten()(input1)
   dense1 = Dense(10, activation='relu')(gru2)
   sigmoid = Dense(1, activation='sigmoid')(dense1)
   
   model = Model(inputs=input1, outputs=sigmoid)
   model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
   return model

model = bird_model1()
model.summary()

callbacks = [
    EarlyStopping(patience=2),
    ModelCheckpoint(filepath='model.{epoch:02d}-{val_loss:.2f}.h5', monitor='val_loss'),
    ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, verbose=1, min_lr=0.000001),
]

history = model.fit(X_train, y_train, batch_size=128, epochs=150, callbacks=callbacks, validation_data=(X_test, y_test))



                                                                    
Example #20
0
print('RNN Model (Decoder) Summary : ')
print(model.summary())

"""
    *Train the model save after each epoch
"""
num_of_epochs = config['num_of_epochs']
batch_size = config['batch_size']
steps_train = len(X2train)//batch_size
if len(X2train)%batch_size!=0:
    steps_train = steps_train+1
steps_val = len(X2val)//batch_size
if len(X2val)%batch_size!=0:
    steps_val = steps_val+1
model_save_path = config['model_data_path']+"model_"+str(config['model_type'])+"_epoch-{epoch:02d}_train_loss-{loss:.4f}_val_loss-{val_loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(model_save_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
callbacks = [checkpoint]

print('steps_train: {}, steps_val: {}'.format(steps_train,steps_val))
print('Batch Size: {}'.format(batch_size))
print('Total Number of Epochs = {}'.format(num_of_epochs))

# Shuffle train data
ids_train = list(X2train.keys())
random.shuffle(ids_train)
X2train_shuffled = {_id: X2train[_id] for _id in ids_train}
X2train = X2train_shuffled

# Create the train data generator
# returns [[img_features, text_features], out_word]
generator_train = data_generator(X1train, X2train, tokenizer, max_length, batch_size, config['random_seed'])
Example #21
0
model = tf.keras.Model(inputs, outputs)

print("compiling the model...")
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.008),
              loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False),
              metrics=['accuracy'])

#number_of_epochs = 30
number_of_epochs = 10

# callbacks to implement early stopping and saving the model
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10)
mc = ModelCheckpoint(
    monitor='val_accuracy',
    mode='max',
    verbose=1,
    save_freq='epoch',
    filepath='MobileNetV2_Handwashing_dataset.{epoch:02d}-{val_accuracy:.2f}.h5'
)

print("fitting the model...")
history = model.fit(train_ds,
                    epochs=number_of_epochs,
                    validation_data=val_ds,
                    class_weight=weights_dict,
                    callbacks=[es, mc])

# visualise accuracy
train_acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
def main():

    dataDir = 'D:/SYNS_Dataset_ForDNN3/'
    noEpochs = 50000
    batch_size = 22
    EarlyStopping_patience = 20

    #load pretrained resnet50 backbone
    model = ResNet50_Pretrained(input_shape=(300, 383, 3))

    #load Resnet50 with dropout
    #model = ResNet50_manual(input_shape = (300, 383, 3), dropoutRate = 0.4)

    model = multi_gpu_model(model, gpus=2)

    # optimizer
    optimizer_adam = tf.keras.optimizers.Adam(lr=0.00001, decay=0.01)
    #    optimizer_Nadam= tf.keras.optimizers.Nadam()

    model.compile(optimizer=optimizer_adam,
                  loss=reverseHuber,
                  metrics=[
                      metrics.mean_absolute_error, reverseHuber,
                      coeff_determination
                  ])

    print(model.metrics_names)

    #tp_X = np.random.randn(5,300,383,3)
    #tp_Y = np.random.randn(5,194,258,3)
    #y_pred = model.evaluate(tp_X,tp_Y)

    # load SYNS data (train, val and test)
    pickle_in = open(dataDir + "train_X.pickle", "rb")
    X_train = pickle.load(pickle_in)

    pickle_in = open(dataDir + "train_Y.pickle", "rb")
    Y_train = pickle.load(pickle_in)
    Y_train = np.expand_dims(Y_train, axis=3)

    pickle_in = open(dataDir + "test_X.pickle", "rb")
    X_test = pickle.load(pickle_in)

    pickle_in = open(dataDir + "test_Y.pickle", "rb")
    Y_test = pickle.load(pickle_in)
    Y_test = np.expand_dims(Y_test, axis=3)

    pickle_in = open(dataDir + "val_X.pickle", "rb")
    X_val = pickle.load(pickle_in)

    pickle_in = open(dataDir + "val_Y.pickle", "rb")
    Y_val = pickle.load(pickle_in)
    Y_val = np.expand_dims(Y_val, axis=3)

    print("number of training examples = " + str(X_train.shape[0]))
    print("number of test examples = " + str(X_test.shape[0]))
    print("X_train shape: " + str(X_train.shape))
    print("Y_train shape: " + str(Y_train.shape))
    print("X_test shape: " + str(X_test.shape))
    print("Y_test shape: " + str(Y_test.shape))

    tensorboard = TensorBoard(log_dir="logs/{}".format(time()))

    # Save the checkpoint in the /output folder
    filepath = "output/FCRN-SYNS_FlipAug_Resnet_Pretrained_optimizer_adam3-best.hdf5"

    # Keep only a single checkpoint, the best over test accuracy.
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_mean_absolute_error',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')

    preds = model.evaluate(X_val, Y_val)
    preds = model.evaluate(X_test, Y_test)

    # early stopping
    es = EarlyStopping(monitor='val_mean_absolute_error',
                       mode='min',
                       verbose=1,
                       patience=EarlyStopping_patience)

    #model.fit(X_train, Y_train, epochs = 60, batch_size = 22, verbose=1, callbacks=[tensorboard, checkpoint])
    model.fit(X_train,
              Y_train,
              validation_data=(X_val, Y_val),
              epochs=noEpochs,
              batch_size=batch_size,
              shuffle=True,
              verbose=1,
              callbacks=[tensorboard, checkpoint, es])

    #evaluate on test set
    preds = model.evaluate(X_test, Y_test)
Example #23
0
def trainModel(model_path, train_generator, epochs, epoch_steps, output_path=None, strategy=None, loss_history_path='loss_history.csv')
    # If no strategy is specified, create a mirrored strategy by default
    if not strategy:
        strategy = tf.distribute.MirroredStrategy()
        print(f'Number of devices: {strategy.num_replicas_in_sync}')

    # If no output path is given, overwrite the input model file with trained model
    if not output_path:
        output_path = model_path

    # Open a strategy scope and load the model
    with strategy.scope():
        model = tf.keras.models.load_model(model_path, compile=True)

    # Train the model
    model_checkpoint = ModelCheckpoint(output_path, monitor='loss', verbose=1, save_best_only=True)
    history = model.fit(train_generator, steps_per_epoch=epoch_steps, epochs=epochs, callbacks=model_checkpoint) # Defaults: epoch_steps=100, epochs=10
    
    # Save loss history
    loss_history = np.array(history.history['loss'])
    np.savetxt(loss_history_path, loss_history, delimiter=",")



if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-m', '--model_path', help='Path to model file', required=True)
    parser.add_argument('-i', '--image_path', help='Path to images', required=True)
    parser.add_argument('-c', '--class_path', help='Path to masks', required=True)
    parser.add_argument('-e', '--epochs', help='Number of epochs', type=int, default=10)
    parser.add_argument('-s', '--epoch_steps', help='Number of steps per epoch', type=int, default=100)
input_shape = (HEIGHT, WIDTH, NUM_CHANNELS)
img_input = tf.keras.layers.Input(shape=input_shape)

model = resnet.resnet56(img_input=img_input, classes=NUM_CLASSES)

# define optimizer
sgd = tf.keras.optimizers.SGD(lr=0.1)
model.compile(optimizer=sgd, loss='sparse_categorical_crossentropy', metrics=['accuracy'])

# checkpoint
outputFolder = './output-cifar'
if not os.path.exists(outputFolder):
    os.makedirs(outputFolder)
filepath=outputFolder+"/model-{epoch:02d}-{val_accuracy:.2f}.hdf5"
checkpoint_callback = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, \
                             save_best_only=False, save_weights_only=False, \
                             mode='auto', save_frequency=1)

# train the model for the first time
model.fit(train_dataset,
          epochs=NUM_EPOCHS_1, callbacks=[checkpoint_callback],
          validation_data=test_dataset,
          validation_freq=1)

# resume training from the checkpoint
model_info = model.fit(train_dataset,
                       epochs=NUM_EPOCHS_2, callbacks=[checkpoint_callback],
                       validation_data=test_dataset,
                       validation_freq=1,
                       initial_epoch = INIT_EPOCH_2)
# split into samples
X, y = split_sequence(raw_seq, n_steps_in, n_steps_out)
# summarize the data
#for i in range(len(X)):
#	print(X[1], y[1])

# reshape from [samples, timesteps] into [samples, timesteps, features]
n_features = 1
X = X.reshape((X.shape[0], X.shape[1], n_features))
y = y.reshape(y.shape[0], y.shape[1])

model = Sequential()
model.add(
    LSTM(100,
         activation='relu',
         return_sequences=True,
         input_shape=(n_steps_in, n_features)))
model.add(LSTM(100, activation='relu'))
model.add(Dense(n_steps_out))
model.compile(optimizer='adam', loss='mse')

# checkpoint
filepath = "saturweights/text-gen-best.hdf5"

# Keep only a single checkpoint, the best over test accuracy.
checkpoint = ModelCheckpoint(filepath,
                             monitor='loss',
                             verbose=1,
                             save_best_only=True,
                             mode='min')
model.fit(X, y, epochs=100, verbose=1, callbacks=[checkpoint])
IMG_SHAPE = (IMG_WIDTH, IMG_HEIGHT, 3)
model = build_model(IMG_SHAPE)
model.summary()
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# tensorboard
#logs="C:/Users/bhaga/Desktop/logs"
log_file_name = f'brain_tumor_detection_cnn_{int(time.time())}/'
logs_log_dir = os.path.join("C:/Users/bhaga/Desktop/6th sem project/logs/",log_file_name)
tensorboard = TensorBoard(log_dir=logs_log_dir, profile_batch = 100000000)
# checkpoint
# unique file name that will include the epoch and the validation (development) accuracy
filepath="cnn-parameters-improvement-{epoch:02d}-{val_accuracy:.2f}"
print(filepath)
# save the model with the best validation (development) accuracy till now

checkpoint = ModelCheckpoint("C:/Users/bhaga/Desktop/6th sem project/models/{}.model/".format(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max'))
start_time = time.time()

model.fit(x=X_train, y=y_train, batch_size=32, epochs=10, validation_data=(X_val, y_val), callbacks=[tensorboard, checkpoint])

end_time = time.time()
execution_time = (end_time - start_time)
print(f"Elapsed time: {hms_string(execution_time)}")

start_time = time.time()

model.fit(x=X_train, y=y_train, batch_size=32, epochs=7, validation_data=(X_val, y_val), callbacks=[tensorboard, checkpoint])

end_time = time.time()
execution_time = (end_time - start_time)
print(f"Elapsed time: {hms_string(execution_time)}")
    shuffle=True,
    seed=42)


tmp = pd.DataFrame(columns=['ClassId', 'ModelId', 'SignName'])
csv_data = pd.read_csv(CSV_PATH)
for i, item in csv_data.iterrows():
    tmp.loc[i] = [item['ClassId'], train_generator.class_indices[str(item['ClassId'])], item['SignName']]
tmp.to_csv(CSV_PATH, sep=',', index = False)

model = call_model.build_cnn((WIDTH, HEIGHT, 3), 43)

steps_per_epoch=train_generator.n//train_generator.batch_size
val_steps=validation_generator.n//validation_generator.batch_size+1

modelCheckpoint = ModelCheckpoint(MODEL_PATH, monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
earlyStop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=6, verbose=0, mode='auto')

callbacks_list = [modelCheckpoint, earlyStop]

history = model.fit_generator(
    train_generator,
    workers=6,
    epochs=EPOCHS,
    verbose=1,
    steps_per_epoch=steps_per_epoch,
    validation_steps=val_steps,
    validation_data=validation_generator,
    callbacks=callbacks_list,
    shuffle=True)
# model.add(Dropout(0.2))
model.add(Activation('relu'))
model.add(Dense(256))
model.add(Dense(10, activation='softmax'))

# 3. 컴파일, 훈련
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['acc'])

es = EarlyStopping(monitor='val_loss', mode='min', patience=6)

modelpath = './model/cancer-{epoch:02d}-{val_loss:.4f}.hdf5'
cp = ModelCheckpoint(filepath=modelpath,
                     save_weights_only=True,
                     save_best_only=True,
                     monitor='val_loss',
                     verbose=1)

reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                              patience=3,
                              factor=0.5,
                              verbose=1)

model.fit(x_train,
          y_train,
          epochs=30,
          batch_size=32,
          verbose=1,
          validation_split=0.5,
          callbacks=[es, cp])
Example #29
0
model = Sequential()
model.add(Dense(1024, activation='relu', input_shape=(2048, )))
model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax'))

# defining a function to save the weights of best model
from tensorflow.keras.callbacks import ModelCheckpoint

mcp_save = ModelCheckpoint('weight_80_90000.hdf5',
                           save_best_only=True,
                           monitor='val_loss',
                           mode='min')

# compiling the model
model.compile(loss='categorical_crossentropy',
              optimizer='Adam',
              metrics=['accuracy'])

y_train = y_train.to_numpy(copy=False)
y_test = y_test.to_numpy(copy=False)

y_train = y_train.astype('float32')
y_test = y_test.astype('float32')

# training the model
history = model.fit(X_train,
Example #30
0
x_train = x_train.reshape(60000, 28, 28, 1).astype('float32') / 255.
x_test = x_test.reshape(10000, 28, 28, 1).astype('float32') / 255.

from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

model = ak.ImageClassifier(overwrite=True,
                           max_trials=2,
                           loss='mse',
                           metrics=['acc'])

from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
es = EarlyStopping(patience=6, verbose=1)
lr = ReduceLROnPlateau(factor=0.5, patience=3, verbose=1)
mc = ModelCheckpoint('c:/data/modelcheckpoint/auto_checkpoint1.hdf5',
                     save_best_only=True,
                     save_weights_only=True,
                     verbose=1)

model.fit(x_train,
          y_train,
          epochs=20,
          validation_split=0.2,
          callbacks=[es, lr, mc])
results = model.evaluate(x_test, y_test)

print(results)

# [0.03433186933398247, 0.989799976348877]