예제 #1
0
        callback = LearningRateScheduler(get_lr)
        callbacks.append(callback)

    return callbacks


model = Unet(input_shape, num)
model.summary()
if os.path.exists(mode1_path):  #继续训练
    model.load_weights(mode1_path)
callbacks = make_callbacks()
path = r'F:\cmm\yumi\pic'
train_set, val_set = get_train_val(path, 'tif')
train = train_data(train_set, bitchs)
val = val_data(val_set, bitchs)
alltrain = len(train_set)
allval = len(val_set)
loss_f = loss.mean_iou
metrics = [loss_f]
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=metrics)
A = model.fit_generator(generator=train,
                        steps_per_epoch=alltrain // bitchs,
                        epochs=epochs,
                        verbose=1,
                        validation_data=val,
                        validation_steps=allval // bitchs,
                        callbacks=callbacks,
                        max_q_size=1)
예제 #2
0
    def train_model(train_data,
                    val_data,
                    num_train,
                    num_val,
                    epochs,
                    callback=True):  #callback用于是否记录训练。
        if model_name == "deeplabv3":
            model = Deeplabv3((HEIGHT, WIDTH, 3), NCLASSES, bone_name)
        elif model_name == 'unet':
            model = Unet((HEIGHT, WIDTH, 3), NCLASSES, bone_name)
        elif model_name == 'pspnet':
            model = PSPnet((HEIGHT, WIDTH, 3), NCLASSES, bone_name)

        epoch = 0
        if init_with == 'first':  #init_with selection = ['first', 'last'] 选择模型的训练模式
            print('-' * 100)
            # model.load_weights(weights_path, by_name=True, skip_mismatch=True) #加载预训练模型
            print('开始从头训练模型...')
        else:
            model.load_weights(find_last(log_path), by_name=True)
            epoch = int(
                os.path.basename(find_last(log_path))[:-3].split('_')[-1])
            epochs = epoch + epochs  #这样可以确保重新训练时,设置的epochs为实际训练的轮数。
            print('-' * 100)
            print('成功加载最新模型, 重新从第%s轮开始训练...' % epoch)

        #保存训练过程
        tbCallBack = TensorBoard(log_dir=log_path + "records/",
                                 histogram_freq=0,
                                 write_graph=True,
                                 write_images=True)

        # 保存的方式,1次epoch保存一次
        checkpoint_path = os.path.join(
            log_path, '%s_%s_*epoch*.h5' % (model_name, bone_name))
        checkpoint_path = checkpoint_path.replace("*epoch*", "{epoch:04d}")
        checkpoint_period = ModelCheckpoint(checkpoint_path,
                                            monitor='val_loss',
                                            save_weights_only=True,
                                            save_best_only=True,
                                            period=1)

        # 学习率下降的方式,val_loss五次不下降就下降学习率继续训练
        reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                      factor=0.5,
                                      patience=5,
                                      verbose=1)

        # 是否需要早停,当val_loss一直不下降的时候意味着模型基本训练完毕,可以停止
        early_stopping = EarlyStopping(monitor='val_loss',
                                       min_delta=0,
                                       patience=15,
                                       verbose=1)

        callbacks = [checkpoint_period, reduce_lr, early_stopping]
        if callback:
            tbCallBack.set_model(model)
            callbacks.append(tbCallBack)

        print('训练的样本量为:{} 张图片, 验证集的样本量为:{} 张图片,'.format(num_train, num_val))
        print('每份数据集大小为:{} 张图片, 图片大小为: {}'.format(batch_size, (HEIGHT, WIDTH)))
        print('以%s为主干的%s模型正式开始训练,请耐心等待,并注意提示...' %
              (bone_name, model_name))  #开始训练
        print('-' * 100)

        model.compile(loss=focal_loss,
                      optimizer=Adam(lr=1e-3),
                      metrics=['accuracy'])
        model.fit_generator(generate_arrays_from_file(train_data, batch_size),
                            steps_per_epoch=max(1, num_train // batch_size),
                            validation_data=generate_arrays_from_file(
                                val_data, batch_size),
                            validation_steps=max(1, num_val // batch_size),
                            epochs=epochs,
                            initial_epoch=epoch,
                            shuffle=True,
                            callbacks=callbacks)