Ejemplo n.º 1
0
                                              patience=10,
                                              verbose=1)
early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss',
                                               min_delta=0,
                                               patience=20,
                                               verbose=1)

model = keras.models.Model([model_yolo.input, *y_true], model_loss)
model.compile(optimizer=keras.optimizers.Adam(1e-4),
              loss={
                  'yolo_loss': lambda y_true, y_pred: y_pred
              })

g_train = data_generator(label_lines=train_lines,
                         batch_size=config.batch_size,
                         input_shape=config.image_input_shape,
                         anchors=config.anchors,
                         num_classes=config.num_classes)

g_valid = data_generator(label_lines=valid_lines,
                         batch_size=config.batch_size,
                         input_shape=config.image_input_shape,
                         anchors=config.anchors,
                         num_classes=config.num_classes)
print('fire!')
model.fit(g_train,
          validation_data=g_valid,
          steps_per_epoch=len(label_lines) // config.batch_size,
          validation_steps=int(
              len(label_lines) * config.validation_split * 0.2),
          epochs=config.epochs,
Ejemplo n.º 2
0
model.add(Convolution2D(300, (3, 3), activation='relu', padding='same'))

model.add(Dropout(0.1))
model.add(Convolution2D(
    2, (1, 1)))  # this is called upscore layer for some reason?
model.add(
    Conv2DTranspose(2, (31, 31),
                    strides=(16, 16),
                    activation='softmax',
                    padding='same'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

csvlogger = CSVLogger('training_0823.log', append=True)
model_check = ModelCheckpoint('model_0823.h5')

model.fit_generator(data_generator(train_set),
                    steps_per_epoch=100,
                    epochs=epochs,
                    validation_data=data_generator(valid_set),
                    validation_steps=20,
                    verbose=1,
                    workers=10,
                    max_q_size=1,
                    callbacks=[csvlogger, model_check],
                    initial_epoch=initial_epoch)

model.save('model_0823.h5')
Ejemplo n.º 3
0
    def get_image_path(self, id):
        return self._images_pathes[id]


if __name__ == '__main__':
    import matplotlib.pyplot as plt
    from generator import data_generator
    from config import Config
    data_path = '../data/First_round_data/jinnan2_round1_train_20190305'

    cfg = Config()

    dataset_tra = JinNan(data_path)
    dataset_tra.load_JinNan(val=False)
    dataset_tra_gen = data_generator(dataset_tra, cfg)

    dataset_val = JinNan(data_path)
    dataset_val.load_JinNan(val=True)
    dataset_tra_gen = data_generator(dataset_tra, cfg)

    print(len(dataset_tra))
    print(len(dataset_val))
    count = 0
    for i in range(len(dataset_val)):
        #try:
        label = dataset_val.load_label(i)
        count += label
        print(count)
    # except:
    #     print(i)
Ejemplo n.º 4
0
            print("batch_time: %dms - batch_loss: %.4f" % ((batch_end - batch_start)*1000, batch_loss))

        epoch_end = time.time()
        # 打印epoch的信息
        print("batchs: %d - epoch_time: %ds %dms/batch - loss: %.4f" % (batchs, epoch_end - epoch_start, (epoch_end-epoch_start)*1000/batchs, total_loss/batchs))
        
        # 按配置json文件里的save_interval的数值来保存检查点
        if epoch % save_interval == 0:
            manager.save()


if __name__ == "__main__":
    configs = get_config()

    epochs = configs["train"]["train_epochs"]
    data_path = configs["train"]["data_path"]
    num_examples = configs["train"]["num_examples"]
    dataset_name = configs["preprocess"]["dataset_name"]
    batch_size = configs["train"]["batch_size"]

    #加载数据,并构建数据生成器
    train_data = load_data(dataset_name, data_path, "train", num_examples)
    batchs = ceil(len(train_data[0]) / batch_size)
    train_data_generator = data_generator(train_data, "train", batchs, batch_size)

    # 加载模型
    model = get_ds2_model()
    optimizer = tf.keras.optimizers.Adam()
    #训练
    train(model, optimizer, train_data_generator, batchs, epochs)
    
Ejemplo n.º 5
0
def train(data_dir, hyperparams):
    """Trains a pre-trained CNN by transfer learning and fine-tuning
       Log training metrics for evaluation and model weights to Weights and Biases
    """

    # Init the wandb project
    wandb.init(project="mask_cv_model")

    logging.info("Defining and Building the Conv Net")
    # Define and load the CNN -
    cnn = conv_net(
        pre_trained_model=hyperparams["pre_trained"],
        n_classes=hyperparams["n_classes"],
        dropout=hyperparams["dropout"],
        weights=hyperparams["weights"],
    )
    # Define your base pre-trained network
    baseModel = cnn.define_base_network()
    # Define the head network (fully connected layers for the task)
    cnn.define_head_network()

    # get the CNN model
    cnn_model = cnn.get_cnn()

    logging.info("Setting up data generators")
    # Set up data generators
    training_set, validation_set = data_generator(
        data_dir,
        batch_size=hyperparams["batch_size"],
        validation_size=hyperparams["validation_size"],
        horizontal_flip=hyperparams["horizontal_flip"],
        zoom_range=hyperparams["zoom_range"],
        shear_range=hyperparams["shear_range"],
        class_mode=hyperparams["class_mode"],
        target_size=(224, 224),
    )

    # Define early stopping criteria
    earlystop = EarlyStopping(monitor="val_loss",
                              min_delta=0.0001,
                              patience=3,
                              verbose=1,
                              mode="auto")

    # model checkpointing
    checkpoint = ModelCheckpoint(WEIGHTS_PATH,
                                 monitor="val_loss",
                                 verbose=1,
                                 save_best_only=True,
                                 mode="max")

    logging.info("Fitting dense layers - transfer learning")
    # Freeze the base model (pre-trained network) weights and only train the dense layers
    for layer in baseModel.layers:
        layer.trainable = False

    # compile the model
    cnn_model.compile(
        optimizer=Adam(lr=hyperparams["lr_dense"],
                       decay=hyperparams["decay_dense"]),
        loss=hyperparams["loss"],
        metrics=["accuracy"],
    )

    # fit the generator and set callbacks
    cnn_model.fit_generator(
        training_set,
        steps_per_epoch=training_set.__len__(),
        epochs=hyperparams["epochs_dense"],
        validation_data=validation_set,
        validation_steps=validation_set.__len__(),
        use_multiprocessing=hyperparams["multiprocessing"],
        workers=8,
        callbacks=[earlystop, checkpoint,
                   WandbCallback()],
    )

    logging.info("Fitting and fine-tuning the model")
    # Unfreeze the base Models to fine tune the network for the task
    for layer in baseModel.layers[15:]:
        layer.trainable = True

    # compile the model
    cnn_model.compile(
        optimizer=Adam(lr=hyperparams["lr_finetune"],
                       decay=hyperparams["decay_finetune"]),
        loss=hyperparams["loss"],
        metrics=["accuracy"],
    )

    # fit the generator and set callbacks
    cnn_model.fit_generator(
        training_set,
        epochs=hyperparams["epochs_finetune"] + hyperparams["epochs_dense"],
        steps_per_epoch=training_set.__len__(),
        validation_data=validation_set,
        validation_steps=validation_set.__len__(),
        initial_epoch=hyperparams["epochs_dense"],
        use_multiprocessing=hyperparams["multiprocessing"],
        workers=8,
        callbacks=[earlystop, checkpoint,
                   WandbCallback()],
    )

    logging.info("Logging final and the best model to wandb")
    # Save and log the model to wandb
    cnn_model.save(os.path.join(wandb.run.dir, MODEL_PATH))
    K.clear_session()
Ejemplo n.º 6
0
x = GlobalAveragePooling2D()(x)
x = Dense(FC_SIZE, activation='relu')(x)  # new FC layer, random init
predictions = Dense(classes, activation='softmax')(x)  # new softmax layer
model = Model(inputs=base_model.input, outputs=predictions)

# 迁移学习
for layer in base_model.layers:
    layer.trainable = False
model.compile(optimizer=SGD(lr=0.05, momentum=0.9, decay=0.5),
              loss='categorical_crossentropy',
              metrics=['accuracy'])

csvlogger = CSVLogger('training_0823.log', append=True)
model_check = ModelCheckpoint('model_0823.h5')

model.fit_generator(data_generator(train_set, batch=8),
                    steps_per_epoch=10,
                    epochs=epochs,
                    validation_data=data_generator(valid_set, batch=8),
                    validation_steps=2,
                    verbose=1,
                    workers=20,
                    max_q_size=1,
                    callbacks=[csvlogger, model_check],
                    initial_epoch=initial_epoch)

model.save('model_0823.h5')

# 微调
for layer in model.layers[:NB_IV3_LAYERS_TO_FREEZE]:
    layer.trainable = False
Ejemplo n.º 7
0

if __name__ == '__main__':
    from data import JinNan
    from generator import data_generator
    from config import Config
    # we need to recompile the model for these modifications to take effect
    # we use SGD with a low learning rate

    from keras.optimizers import SGD
    data_path = '../data/First_round_data/jinnan2_round1_train_20190305'
    dataset = JinNan(data_path)
    dataset.load_JinNan()

    cfg = Config()
    data_gen = data_generator(dataset, cfg)

    ceptionV3 = inception_V3(2)

    # let's visualize layer names and layer indices to see how many layers
    # we should freeze:
    for i, layer in enumerate(ceptionV3.layers):
        print(i, layer.name)

    # we chose to train the top 2 inception blocks, i.e. we will freeze
    # the first 249 layers and unfreeze the rest:
    for layer in ceptionV3.layers[:249]:
        layer.trainable = False
    for layer in ceptionV3.layers[249:]:
        layer.trainable = True
    ceptionV3.compile(optimizer=SGD(lr=0.0001, momentum=0.9),
Ejemplo n.º 8
0
    period=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                              factor=0.1,
                              patience=3,
                              verbose=1)
early_stopping = EarlyStopping(monitor='val_loss',
                               min_delta=0,
                               patience=10,
                               verbose=1)

model = Model([model_yolo.input, *y_true], model_loss)
model.compile(optimizer=Adam(1e-3),
              loss={
                  'yolo_loss': lambda y_true, y_pred: y_pred
              })
model.fit_generator(
    generator=data_generator(label_lines=train_lines,
                             batch_size=config.batch_size,
                             input_shape=config.image_input_shape,
                             anchors=anchors,
                             num_classes=num_classes),
    validation_data=data_generator(label_lines=valid_lines,
                                   batch_size=config.batch_size,
                                   input_shape=config.image_input_shape,
                                   anchors=anchors,
                                   num_classes=num_classes),
    steps_per_epoch=len(label_lines) // config.batch_size,
    validation_steps=int(len(label_lines) * config.validation_split),
    epochs=config.epochs,
    callbacks=[logging, checkpoint, reduce_lr, early_stopping])
model_yolo.save('model_yolo.h5')
Ejemplo n.º 9
0
    model.load_weights('pit_model/mobile-032-0.88355.hdf5')
    print("...Previous weight data...")
except:
    print("...New weight data...")
    pass

model.summary()

# Define callbacks
os.makedirs('model',exist_ok=True)
weight_path = "model/pitmodel_1line-{epoch:03d}-{val_iou:.5f}.hdf5"
checkpoint = ModelCheckpoint(filepath=weight_path,
                             verbose=0,
                             monitor='val_iou',
                             save_best_only=False,
                             save_weights_only=False, mode='auto', period=1)

tensorboard = TensorBoard(log_dir="/logs/pitmodel_1line{}".format(time.time()),
                              batch_size=TRAIN_BATCH, write_images=True)

train_check = TrainCheck(output_path='./img', model_name=model_name)

# training
history = model.fit_generator(data_generator('../city/data_mix1_line.h5', TRAIN_BATCH, 'train'),
                              steps_per_epoch=65934 // TRAIN_BATCH,
                              validation_data=data_generator('../city/data_mix1_line.h5', VAL_BATCH, 'val'),
                              validation_steps=1500 // VAL_BATCH,
                              callbacks=[checkpoint,train_check, tensorboard],
                              epochs=2000,
                              verbose=1,initial_epoch = 1)
Ejemplo n.º 10
0
    manager = tf.train.CheckpointManager(
        checkpoint,
        directory=configs["checkpoint"]['directory'],
        max_to_keep=configs["checkpoint"]['max_to_keep'])
    if manager.latest_checkpoint:
        checkpoint.restore(manager.latest_checkpoint)

    test_data_path = configs["test"]["data_path"]
    num_examples = configs["test"]["num_examples"]
    dataset_name = configs["preprocess"]["dataset_name"]
    batch_size = configs["test"]["batch_size"]

    # 加载测试集数据生成器
    test_data = load_data(dataset_name, test_data_path, "test", num_examples)
    batchs = ceil(len(test_data[0]) / batch_size)
    test_data_generator = data_generator(test_data, "test", batchs, batch_size)

    aver_wers = 0
    aver_lers = 0
    aver_norm_lers = 0

    # 构建字符集对象
    index_word = get_index_word()

    for batch, (input_tensor, labels_list) in zip(range(1, batchs + 1),
                                                  test_data_generator):
        originals = labels_list
        results = []
        y_pred = model(input_tensor)
        output = tf.keras.backend.ctc_decode(
            y_pred=y_pred,
Ejemplo n.º 11
0
print("Processing video.")
fname = "data/train.mp4"
try:
	f, ext = splitext(fname)
	with np.load(f + '_op.npz') as data:
		video = data['arr_0']
except:
	print("Could not find preprocessed video, creating it now")
	video = denseflow(fname, 4)

width = video.shape[2]
height = video.shape[1]
video_size = len(video)

train_gen = data_generator(video[:int(video_size*split)], speeds[:int(video_size*split)], batch_size, sequence_length)
val_gen = data_generator(video[int(video_size*split):], speeds[int(video_size*split):], batch_size, sequence_length)
pred_gen = prediction_generator(video, sequence_length)

# Will return a feature and label set.	
# Features are a list of image sequences in the form: (sequence_length, img_height, img_width, dimensions)
inputs = Input((sequence_length,height,width,3))

# A convolution being applied to each image seperately
x = Conv3D(32,(1,3,3),strides=(1,2,2),activation=None)(inputs)
x = LeakyReLU(alpha=0.1)(x)
x = BatchNormalization()(x)
x = Conv3D(32,(3,3,3),strides=(2,2,2),activation=None)(x)
x = LeakyReLU(alpha=0.1)(x)
x = BatchNormalization()(x)
x = Conv3D(32,(3,3,3),strides=(2,2,2),activation=None)(x)