Esempio n. 1
0
def main():
    im_height = 224
    im_width = 224

    # load image
    # img_path = "../tulip.jpg"
    img_path = ".\\tensorflow_classification\\Test2_alexnet\\11.jpg"
    assert os.path.exists(img_path), "file: '{}' dose not exist.".format(
        img_path)
    img = Image.open(img_path)  # 打开图片

    # resize image to 224x224
    img = img.resize((im_width, im_height))  # resize处理:224*224
    plt.imshow(img)

    # scaling pixel value to (0-1)
    img = np.array(img) / 255.  # 缩放到0~1之间

    # Add the image to a batch where it's the only member.
    img = (np.expand_dims(img, 0)
           )  # 增加一个维度,B*H*W*C   # ^ np.expand_dims(img, 0) 最前面扩充一个维度

    # read class_indict
    json_path = './class_indices.json'
    assert os.path.exists(json_path), "file: '{}' dose not exist.".format(
        json_path)

    json_file = open(json_path, "r")
    class_indict = json.load(json_file)  # 读取json文件

    # create model
    model = AlexNet_v1(num_classes=5)  # 实例化模型
    weighs_path = "./save_weights/myAlex.h5"
    assert os.path.exists(img_path), "file: '{}' dose not exist.".format(
        weighs_path)
    model.load_weights(weighs_path)  # ^ .load_weights 载入模型

    # prediction
    result = np.squeeze(
        model.predict(img))  # model.predict预测图片;并去掉batch维度 np.squeeze,得到概率分布
    predict_class = np.argmax(result)  # 获取概率最大的值对应的索引

    print_res = "class: {}   prob: {:.3}".format(
        class_indict[str(predict_class)],  # * 通过json文件得到所属类别
        result[predict_class])
    plt.title(print_res)
    print(print_res)
    plt.show()
Esempio n. 2
0
def main():
    im_height = 224
    im_width = 224

    # load image
    img_path = "../tulip.jpg"
    assert os.path.exists(img_path), "file: '{}' dose not exist.".format(
        img_path)
    img = Image.open(img_path)

    # resize image to 224x224
    img = img.resize((im_width, im_height))
    plt.imshow(img)

    # scaling pixel value to (0-1)
    img = np.array(img) / 255.

    # Add the image to a batch where it's the only member.
    img = (np.expand_dims(img, 0))

    # read class_indict
    json_path = './class_indices.json'
    assert os.path.exists(json_path), "file: '{}' dose not exist.".format(
        json_path)

    json_file = open(json_path, "r")
    class_indict = json.load(json_file)

    # create model
    model = AlexNet_v1(num_classes=5)
    weighs_path = "./save_weights/myAlex.h5"
    assert os.path.exists(img_path), "file: '{}' dose not exist.".format(
        weighs_path)
    model.load_weights(weighs_path)

    # prediction
    result = np.squeeze(model.predict(img))
    predict_class = np.argmax(result)

    print_res = "class: {}   prob: {:.3}".format(
        class_indict[str(predict_class)], result[predict_class])
    plt.title(print_res)
    print(print_res)
    plt.show()
# load train dataset
train_dataset = tf.data.Dataset.from_tensor_slices(
    (train_image_list, train_label_list))
train_dataset = train_dataset.shuffle(buffer_size=train_num)\
                             .map(process_path, num_parallel_calls=AUTOTUNE)\
                             .repeat().batch(batch_size).prefetch(AUTOTUNE)

# load train dataset
val_dataset = tf.data.Dataset.from_tensor_slices(
    (val_image_list, val_label_list))
val_dataset = val_dataset.map(process_path, num_parallel_calls=tf.data.experimental.AUTOTUNE)\
                         .repeat().batch(batch_size)

# 实例化模型
model = AlexNet_v1(im_height=im_height, im_width=im_width, class_num=5)
# model = AlexNet_v2(class_num=5)
# model.build((batch_size, 224, 224, 3))  # when using subclass model
model.summary()

# using keras low level api for training
loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=False)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.0005)

train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.CategoricalAccuracy(name='train_accuracy')

test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.CategoricalAccuracy(name='test_accuracy')

Esempio n. 4
0
def main():
    data_root = os.path.abspath(os.path.join(os.getcwd(),
                                             "../.."))  # get data root path
    image_path = os.path.join(data_root, "data_set",
                              "flower_data")  # flower data set path
    train_dir = os.path.join(image_path, "train")
    validation_dir = os.path.join(image_path, "val")
    assert os.path.exists(train_dir), "cannot find {}".format(train_dir)
    assert os.path.exists(validation_dir), "cannot find {}".format(
        validation_dir)

    # create direction for saving weights
    if not os.path.exists("save_weights"):
        os.makedirs("save_weights")

    im_height = 224
    im_width = 224
    batch_size = 32
    epochs = 10

    # data generator with data augmentation
    train_image_generator = ImageDataGenerator(rescale=1. / 255,
                                               horizontal_flip=True)
    validation_image_generator = ImageDataGenerator(rescale=1. / 255)

    train_data_gen = train_image_generator.flow_from_directory(
        directory=train_dir,
        batch_size=batch_size,
        shuffle=True,
        target_size=(im_height, im_width),
        class_mode='categorical')
    total_train = train_data_gen.n

    # get class dict
    class_indices = train_data_gen.class_indices

    # transform value and key of dict
    inverse_dict = dict((val, key) for key, val in class_indices.items())
    # write dict into json file
    json_str = json.dumps(inverse_dict, indent=4)
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    val_data_gen = validation_image_generator.flow_from_directory(
        directory=validation_dir,
        batch_size=batch_size,
        shuffle=False,
        target_size=(im_height, im_width),
        class_mode='categorical')
    total_val = val_data_gen.n
    print("using {} images for training, {} images for validation.".format(
        total_train, total_val))

    # sample_training_images, sample_training_labels = next(train_data_gen)  # label is one-hot coding
    #
    # # This function will plot images in the form of a grid with 1 row
    # # and 5 columns where images are placed in each column.
    # def plotImages(images_arr):
    #     fig, axes = plt.subplots(1, 5, figsize=(20, 20))
    #     axes = axes.flatten()
    #     for img, ax in zip(images_arr, axes):
    #         ax.imshow(img)
    #         ax.axis('off')
    #     plt.tight_layout()
    #     plt.show()
    #
    #
    # plotImages(sample_training_images[:5])

    model = AlexNet_v1(im_height=im_height, im_width=im_width, num_classes=5)
    # model = AlexNet_v2(class_num=5)
    # model.build((batch_size, 224, 224, 3))  # when using subclass model
    model.summary()

    # using keras high level api for training
    model.compile(
        optimizer=tf.keras.optimizers.Adam(learning_rate=0.0005),
        loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False),
        metrics=["accuracy"])

    callbacks = [
        tf.keras.callbacks.ModelCheckpoint(filepath='./save_weights/myAlex.h5',
                                           save_best_only=True,
                                           save_weights_only=True,
                                           monitor='val_loss')
    ]

    # tensorflow2.1 recommend to using fit
    history = model.fit(x=train_data_gen,
                        steps_per_epoch=total_train // batch_size,
                        epochs=epochs,
                        validation_data=val_data_gen,
                        validation_steps=total_val // batch_size,
                        callbacks=callbacks)

    # plot loss and accuracy image
    history_dict = history.history
    train_loss = history_dict["loss"]
    train_accuracy = history_dict["accuracy"]
    val_loss = history_dict["val_loss"]
    val_accuracy = history_dict["val_accuracy"]

    # figure 1
    plt.figure()
    plt.plot(range(epochs), train_loss, label='train_loss')
    plt.plot(range(epochs), val_loss, label='val_loss')
    plt.legend()
    plt.xlabel('epochs')
    plt.ylabel('loss')

    # figure 2
    plt.figure()
    plt.plot(range(epochs), train_accuracy, label='train_accuracy')
    plt.plot(range(epochs), val_accuracy, label='val_accuracy')
    plt.legend()
    plt.xlabel('epochs')
    plt.ylabel('accuracy')
    plt.show()
Esempio n. 5
0
def main():
    gpus = tf.config.experimental.list_physical_devices("GPU")
    if gpus:
        try:
            for gpu in gpus:
                tf.config.experimental.set_memory_growth(
                    gpu, True)  # 设置GPU的内存占用,根据模型大小来占用
        except RuntimeError as e:
            print(e)
            exit(-1)

    data_root = os.path.abspath(os.path.join(os.getcwd(),
                                             "../.."))  # get data root path
    image_path = os.path.join(data_root, "data_set",
                              "flower_data")  # flower data set path
    train_dir = os.path.join(image_path, "train")
    validation_dir = os.path.join(image_path, "val")
    assert os.path.exists(train_dir), "cannot find {}".format(train_dir)
    assert os.path.exists(validation_dir), "cannot find {}".format(
        validation_dir)

    # create direction for saving weights
    if not os.path.exists("save_weights"):
        os.makedirs("save_weights")

    im_height = 224
    im_width = 224
    batch_size = 32
    epochs = 10

    # class dict
    data_class = [
        cla for cla in os.listdir(train_dir)
        if os.path.isdir(os.path.join(train_dir, cla))
    ]
    class_num = len(data_class)
    class_dict = dict((value, index) for index, value in enumerate(data_class))

    # reverse value and key of dict
    inverse_dict = dict((val, key) for key, val in class_dict.items())
    # write dict into json file
    json_str = json.dumps(inverse_dict, indent=4)
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    # load train images list
    train_image_list = glob.glob(train_dir + "/*/*.jpg")
    random.shuffle(train_image_list)
    train_num = len(train_image_list)
    assert train_num > 0, "cannot find any .jpg file in {}".format(train_dir)
    train_label_list = [
        class_dict[path.split(os.path.sep)[-2]] for path in train_image_list
    ]

    # load validation images list
    val_image_list = glob.glob(validation_dir + "/*/*.jpg")
    random.shuffle(val_image_list)
    val_num = len(val_image_list)
    assert val_num > 0, "cannot find any .jpg file in {}".format(
        validation_dir)
    val_label_list = [
        class_dict[path.split(os.path.sep)[-2]] for path in val_image_list
    ]

    print("using {} images for training, {} images for validation.".format(
        train_num, val_num))

    def process_path(img_path, label):
        label = tf.one_hot(label, depth=class_num)
        image = tf.io.read_file(img_path)
        image = tf.image.decode_jpeg(image)
        image = tf.image.convert_image_dtype(image, tf.float32)
        image = tf.image.resize(image, [im_height, im_width])
        return image, label

    AUTOTUNE = tf.data.experimental.AUTOTUNE

    # ! 这里和CPU版本的差异较大
    # load train dataset
    # * 并没有用图像生成器的方法,而是使用tf.data.Dataset,是可以多线程的
    train_dataset = tf.data.Dataset.from_tensor_slices(
        (train_image_list, train_label_list))
    train_dataset = train_dataset.shuffle(buffer_size=train_num)\
                                 .map(process_path, num_parallel_calls=AUTOTUNE)\
                                 .repeat().batch(batch_size).prefetch(AUTOTUNE)

    # load train dataset
    val_dataset = tf.data.Dataset.from_tensor_slices(
        (val_image_list, val_label_list))
    val_dataset = val_dataset.map(process_path, num_parallel_calls=tf.data.experimental.AUTOTUNE)\
                             .repeat().batch(batch_size)

    # 实例化模型
    model = AlexNet_v1(im_height=im_height, im_width=im_width, num_classes=5)
    # model = AlexNet_v2(class_num=5)
    # model.build((batch_size, 224, 224, 3))  # when using subclass model
    model.summary()

    # using keras low level api for training
    loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=False)
    optimizer = tf.keras.optimizers.Adam(learning_rate=0.0005)

    train_loss = tf.keras.metrics.Mean(name='train_loss')
    train_accuracy = tf.keras.metrics.CategoricalAccuracy(
        name='train_accuracy')

    test_loss = tf.keras.metrics.Mean(name='test_loss')
    test_accuracy = tf.keras.metrics.CategoricalAccuracy(name='test_accuracy')

    @tf.function
    def train_step(images, labels):
        with tf.GradientTape() as tape:
            predictions = model(images, training=True)
            loss = loss_object(labels, predictions)
        gradients = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))

        train_loss(loss)
        train_accuracy(labels, predictions)

    @tf.function
    def test_step(images, labels):
        predictions = model(images, training=False)
        t_loss = loss_object(labels, predictions)

        test_loss(t_loss)
        test_accuracy(labels, predictions)

    best_test_loss = float('inf')
    train_step_num = train_num // batch_size
    val_step_num = val_num // batch_size
    for epoch in range(1, epochs + 1):
        train_loss.reset_states()  # clear history info
        train_accuracy.reset_states()  # clear history info
        test_loss.reset_states()  # clear history info
        test_accuracy.reset_states()  # clear history info

        t1 = time.perf_counter()
        for index, (images, labels) in enumerate(train_dataset):
            train_step(images, labels)
            if index + 1 == train_step_num:
                break
        print(time.perf_counter() - t1)

        for index, (images, labels) in enumerate(val_dataset):
            test_step(images, labels)
            if index + 1 == val_step_num:
                break

        template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
        print(
            template.format(epoch, train_loss.result(),
                            train_accuracy.result() * 100, test_loss.result(),
                            test_accuracy.result() * 100))
        if test_loss.result() < best_test_loss:
            model.save_weights("./save_weights/myAlex.ckpt".format(epoch),
                               save_format='tf')
Esempio n. 6
0
def main():
    # * 定义训练集和测试集文件的位置
    # data_root = os.path.abspath(os.path.join(os.getcwd(), "../.."))  # get data root path
    data_root = os.path.abspath(os.getcwd())  # get data root path
    image_path = os.path.join(data_root, "data_set",
                              "flower_data")  # flower data set path
    train_dir = os.path.join(image_path, "train")
    validation_dir = os.path.join(image_path, "val")
    assert os.path.exists(train_dir), "cannot find {}".format(train_dir)
    assert os.path.exists(validation_dir), "cannot find {}".format(
        validation_dir)

    # create direction for saving weights
    if not os.path.exists("save_weights"):
        os.makedirs("save_weights")

    # * 网络的基本参数:输入图像的H、W;batch_size;epoch
    im_height = 224
    im_width = 224
    batch_size = 32
    epochs = 10

    # data generator with data augmentation
    # from tensorflow.keras.preprocessing.image import ImageDataGenerator 图像生成器
    # * ImageDataGenerator()有很多图像预处理方法,自己查看
    # 分别定义训练集和验证集的图像生成器
    train_image_generator = ImageDataGenerator(
        rescale=1. / 255,  # 并从0~255缩放到0~1之间
        horizontal_flip=True)  # 随机水平翻转
    validation_image_generator = ImageDataGenerator(rescale=1. / 255)

    # * 通过.flow_from_directory读取图片
    train_data_gen = train_image_generator.flow_from_directory(
        directory=train_dir,
        batch_size=batch_size,
        shuffle=True,
        target_size=(im_height, im_width),
        class_mode='categorical')  # categorical分类特征
    # * .n 获得训练集样本的个数
    total_train = train_data_gen.n

    # * get class dict; .class_indices获得类别名称的index
    class_indices = train_data_gen.class_indices

    # transform value and key of dict
    # *	遍历字典,键值对 转化为 值键对
    inverse_dict = dict((val, key) for key, val in class_indices.items())

    # * write dict into json file  将转化后的字典写入json文件中
    json_str = json.dumps(inverse_dict, indent=4)
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    # 同样的方法,设置验证集生成器类似的信息
    val_data_gen = validation_image_generator.flow_from_directory(
        directory=validation_dir,
        batch_size=batch_size,
        shuffle=False,
        target_size=(im_height, im_width),
        class_mode='categorical')
    total_val = val_data_gen.n
    print("using {} images for training, {} images for validation.".format(
        total_train, total_val))

    # # 下面看一下载入的图像信息   可注释
    # sample_training_images, sample_training_labels = next(train_data_gen)  # label is one-hot coding
    # # * 注意:ImageDataGenerator()生成器会自动地将label转化为 one-hot编码形式

    # # This function will plot images in the form of a grid with 1 row
    # # and 5 columns where images are placed in each column.
    # def plotImages(images_arr):
    #     fig, axes = plt.subplots(1, 5, figsize=(20, 20))
    #     axes = axes.flatten()
    #     for img, ax in zip(images_arr, axes):
    #         ax.imshow(img)
    #         ax.axis('off')
    #     plt.tight_layout()
    #     plt.show()

    # plotImages(sample_training_images[:5])   # 查看前5张数据

    model = AlexNet_v1(im_height=im_height, im_width=im_width, num_classes=5)

    # model = AlexNet_v2(class_num=5)
    # model.build((batch_size, 224, 224, 3))  # when using subclass model
    # & 如果使用的是子类模型的方法构建网络,在model.summary() 之前必须加上model.build(),build调用后才真正实例化这个模型了

    model.summary()  # ^ model.summary()可以看到模型的参数信息

    # using keras high level api for training
    # ! 使用keras的高层API方法进行训练
    # * 首先对模型进行编译 model.compile(),定义优化器、loss计算、要打印的信息
    model.compile(
        optimizer=tf.keras.optimizers.Adam(learning_rate=0.0005),
        loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False),
        # from_logits=False,因为搭建模型的时候已经用到了softmax处理
        # CategoricalCrossentropy expect labels to be provided in a `one_hot` representation;进入函数查看
        # If you want to provide labels as integers, please use `SparseCategoricalCrossentropy` loss
        metrics=["accuracy"])  # ^ metrics 表示需要监控的指标

    # 定义回调函数列表
    # * .callbacks.ModelCheckpoint 定义保存模型的一些参数
    callbacks = [
        tf.keras.callbacks.ModelCheckpoint(
            filepath='./save_weights/myAlex.h5',
            # &  保存模型的格式有两种: keras的官方 .h5 模式;tensorflow的ckpt格式; 自己改后缀就可以
            save_best_only=True,  # 是否只保存最佳的参数
            save_weights_only=
            True,  # 是否只保存权重文件,那么参数会小;但是如果要用,就要自己先创建模型,然后再载入权重;  如果还保存了模型文件,就不用自己创建了,直接调用文件就可以
            monitor='val_loss')
    ]  # monitor 监控验证集的损失,以此判断当前是否为最佳参数

    # * tensorflow2.1 recommend to using fit,推荐使用fit,保存在history中
    # tf2.1之后,model.fit已经兼容了model.fit_generator
    history = model.fit(
        x=train_data_gen,  # x是训练集输入,train_data_gen训练集生成器
        steps_per_epoch=total_train // batch_size,  # steps_per_epoch每一轮迭代次数
        epochs=epochs,
        validation_data=val_data_gen,
        validation_steps=total_val // batch_size,
        callbacks=callbacks)  # * callbacks回调函数,即保存模型的规则

    # plot loss and accuracy image
    history_dict = history.history  # history.history得到数据字典
    train_loss = history_dict["loss"]
    train_accuracy = history_dict["accuracy"]
    val_loss = history_dict["val_loss"]
    val_accuracy = history_dict["val_accuracy"]

    # figure 1
    plt.figure()
    plt.plot(range(epochs), train_loss, label='train_loss')
    plt.plot(range(epochs), val_loss, label='val_loss')
    plt.legend()
    plt.xlabel('epochs')
    plt.ylabel('loss')

    # figure 2
    plt.figure()
    plt.plot(range(epochs), train_accuracy, label='train_accuracy')
    plt.plot(range(epochs), val_accuracy, label='val_accuracy')
    plt.legend()
    plt.xlabel('epochs')
    plt.ylabel('accuracy')
    plt.show()

    # * tf2.1之前:当数据集很大的时候,不能一次性将数据集载入模型,所以使用model.fit_generator;而小数据集才使用model.fit
    # tf2.1之后,model.fit已经兼容了model.fit_generator

    history = model.fit_generator(generator=train_data_gen,
                                  steps_per_epoch=total_train // batch_size,
                                  epochs=epochs,
                                  validation_data=val_data_gen,
                                  validation_steps=total_val // batch_size,
                                  callbacks=callbacks)
Esempio n. 7
0
im_height = 224
im_width = 224

# load image
img = Image.open("../tulip.jpg")
# resize image to 224x224
img = img.resize((im_width, im_height))
plt.imshow(img)

# scaling pixel value to (0-1)
img = np.array(img) / 255.

# Add the image to a batch where it's the only member.
img = (np.expand_dims(img, 0))

# read class_indict
try:
    json_file = open('./class_indices.json', 'r')
    class_indict = json.load(json_file)
except Exception as e:
    print(e)
    exit(-1)

model = AlexNet_v1(class_num=5)
model.load_weights("./save_weights/myAlex.h5")
result = np.squeeze(model.predict(img))
predict_class = np.argmax(result)
print(class_indict[str(predict_class)], result[predict_class])
plt.show()