Exemplo n.º 1
0
def test_densenet(modelpath, batch_size):
    dataLoader = DataLoader()
    net = DenseNet.build_densenet()
    net.compile(loss='sparse_categorical_crossentropy', metrics=['accuracy'])
    net.build((1,64,64,3))
    net.load_weights(modelpath)
    test_images, test_labels = dataLoader.get_batch_test(batch_size)
    net.evaluate(test_images, test_labels, verbose=2)
Exemplo n.º 2
0
def train_densenet(batch_size, epoch):
    dataLoader = DataLoader()
    # build callbacks
    checkpoint = tf.keras.callbacks.ModelCheckpoint('{epoch}_epoch_densenet_weight.h5',
        save_weights_only=True,
        verbose=1,
        save_freq='epoch')
    # build model
    net = DenseNet.build_densenet()
    net.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001, decay=1e-6),loss='sparse_categorical_crossentropy', metrics=['accuracy'])


    # 详细参数见官方文档:https://tensorflow.google.cn/api_docs/python/tf/keras/preprocessing/image/ImageDataGenerator?hl=en
    data_generate = ImageDataGenerator(
        featurewise_center=False,# 将输入数据的均值设置为0
        samplewise_center=False, # 将每个样本的均值设置为0
        featurewise_std_normalization=False,  # 将输入除以数据标准差,逐特征进行
        samplewise_std_normalization=False,   # 将每个输出除以其标准差
        zca_epsilon=1e-6,        # ZCA白化的epsilon值,默认为1e-6
        zca_whitening=False,     # 是否应用ZCA白化
        rotation_range=10,        # 随机旋转的度数范围,输入为整数
        width_shift_range=0.1,   # 左右平移,输入为浮点数,大于1时输出为像素值
        height_shift_range=0.1,  # 上下平移,输入为浮点数,大于1时输出为像素值
        shear_range=0.,          # 剪切强度,输入为浮点数
        zoom_range=0.1,          # 随机缩放,输入为浮点数
        channel_shift_range=0.,  # 随机通道转换范围,输入为浮点数
        fill_mode='nearest',     # 输入边界以外点的填充方式,还有constant,reflect,wrap三种填充方式
        cval=0.,                 # 用于填充的值,当fill_mode='constant'时生效
        horizontal_flip=True,    # 随机水平翻转
        vertical_flip=False,     # 随机垂直翻转
        rescale=None,            # 重缩放因子,为None或0时不进行缩放
        preprocessing_function=None,  # 应用于每个输入的函数
        data_format='channels_last',   # 图像数据格式,默认为channels_last
        validation_split=0.0
      )
    # 引用自:https://www.jianshu.com/p/1576da1abd71

    train_images,train_labels = dataLoader.get_batch_train(60000)
    net.fit(
        data_generate.flow(train_images, train_labels, 
            batch_size=batch_size, 
            shuffle=True, 
            #save_to_dir='resource/images'
        ), 
        steps_per_epoch=len(train_images) // batch_size,
        epochs=epoch,
        callbacks=[checkpoint],
        shuffle=True)
Exemplo n.º 3
0
def train_densenet(batch_size, epoch):
    dataLoader = DataLoader()
    # build callbacks
    checkpoint = tf.keras.callbacks.ModelCheckpoint(f'./weight/{epoch}_epoch_densenet_weight.h5', save_best_only=True, save_weights_only=True, verbose=1, save_freq='epoch')
    # build model
    net = DenseNet.build_densenet()
    net.compile(tf.keras.optimizers.Adam(lr=0.001, decay=1e-6), loss='sparse_categorical_crossentropy', metrics=['accuracy'])

    # num_iter = dataLoader.num_train//batch_size
    # for e in range(epoch):
    #     for i in range(num_iter):
    #         train_images, train_labels = dataLoader.get_batch_train(batch_size)
    #         net.fit(train_images, train_labels, shuffle=False, batch_size=batch_size, validation_split=0.1, callbacks=[checkpoint])
    #     net.save_weights("./weight/"+str(e+1)+"epoch_iter"+str(i)+"_resnet_weight.h5")

    data_generate = ImageDataGenerator(
        featurewise_center=False,
        samplewise_center=False,
        featurewise_std_normalization=False,
        samplewise_std_normalization=False,
        zca_epsilon=1e-6,
        zca_whitening=False,
        rotation_range=10,
        width_shift_range=0.1,
        height_shift_range=0.1,
        shear_range=0.,
        zoom_range=0.1,
        channel_shift_range=0,
        fill_mode='nearest',
        cval=0.,
        horizontal_flip=True,
        vertical_flip=False,
        rescale=None,
        preprocessing_function=None,
        data_format='channels_last',
        validation_split=0.0)

    train_images, train_labels = dataLoader.get_batch_train(60000)
    net.fit(data_generate.flow(train_images, train_labels, batch_size=batch_size, shuffle=True,),
            steps_per_epoch=len(train_images)//batch_size,
            epochs=epoch,
            callbacks=[checkpoint],
            shuffle=True)