コード例 #1
0
def main():

    model = GoogLeNet(num_classes=num_clazz,
                      aux_logits=True,
                      init_weights=True).to(device)
    optimizer = optim.Adam(model.parameters(), lr=lr)
    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
    criterion = nn.CrossEntropyLoss().to(device)

    validation_acc = []
    best_acc, best_epoch = 0, 0
    global_step = 0

    for epoch in range(epochs):
        model.train()  # 训练模式
        total_batch_loss = 0
        print('start')
        # print(train_loader[:1])
        for step, data in enumerate(train_loader):
            x, y = data
            x, y = x.to(device), y.to(device)
            logits, aux_logits2, aux_logits1 = model(x)
            loss0 = criterion(logits, y)
            loss1 = criterion(aux_logits1, y)
            loss2 = criterion(aux_logits2, y)
            loss = loss0 + loss1 * 0.3 + loss2 * 0.3
            total_batch_loss += loss.item()
            # 梯度清零
            optimizer.zero_grad()
            # 计算梯度
            loss.backward()
            # 更新参数
            optimizer.step()

            if step % 200 == 0:
                print('Step {}/{} \t loss: {}'.format(step, len(train_loader),
                                                      loss))

        # eval模式
        model.eval()
        val_acc = evalute(model, val_loader)
        if val_acc > best_acc:
            best_epoch = epoch
            best_acc = val_acc
            torch.save(model.state_dict(), 'best.mdl')

        scheduler.step()  # 调整学习率
        print("epoch: ", epoch, "epoch_loss: ", total_batch_loss, "epoch_acc:",
              val_acc)

    print('best acc:', best_acc, 'best epoch:', best_epoch)

    model.load_state_dict(torch.load('best.mdl'))
    print('loaded from ckpt!')

    test_acc = evalute(model, test_loader)
    print('test acc:', test_acc)
コード例 #2
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    data_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    # load image
    img_path = "../tulip.jpg"
    assert os.path.exists(img_path), "file: '{}' dose not exist.".format(
        img_path)
    img = Image.open(img_path)
    plt.imshow(img)
    # [N, C, H, W]
    img = data_transform(img)
    # expand batch dimension
    img = torch.unsqueeze(img, dim=0)

    # read class_indict
    json_path = './class_indices.json'
    assert os.path.exists(json_path), "file: '{}' dose not exist.".format(
        json_path)

    json_file = open(json_path, "r")
    class_indict = json.load(json_file)

    # create model
    model = GoogLeNet(num_classes=5, aux_logits=False).to(device)
    # * 初始化的时候,aux_logits=False,不需要搭建辅助分类器

    # load model weights
    weights_path = "./googleNet.pth"
    assert os.path.exists(weights_path), "file: '{}' dose not exist.".format(
        weights_path)
    missing_keys, unexpected_keys = model.load_state_dict(torch.load(
        weights_path, map_location=device),
                                                          strict=False)
    # * strict默认是True,表示精准匹配 当前模型和需要载入的权重模型 之间的结构
    # * strict=False, 因为刚刚保存模型的时候已经保存了2个辅助分类器,而此时不需要,所以设为False

    model.eval()
    with torch.no_grad():
        # predict class
        output = torch.squeeze(model(img.to(device))).cpu()
        predict = torch.softmax(output, dim=0)
        predict_cla = torch.argmax(predict).numpy()

    print_res = "class: {}   prob: {:.3}".format(
        class_indict[str(predict_cla)], predict[predict_cla].numpy())
    plt.title(print_res)
    print(print_res)
    plt.show()
コード例 #3
0
ファイル: JM1.py プロジェクト: yuanandfang/flower_recognition
    def predict(self):
        im_height = 224
        im_width = 224

        # load image
        img = Image.open(fname)
        # resize image to 224x224
        img = img.resize((im_width, im_height))
        plt.imshow(img)

        # scaling pixel value and normalize
        img = ((np.array(img) / 255.) - 0.5) / 0.5

        # Add the image to a batch where it's the only member.
        img = (np.expand_dims(img, 0))

        # read class_indict
        try:
            json_file = open('./class_indices.json', 'r')
            class_indict = json.load(json_file)
        except Exception as e:
            print(e)
            exit(-1)

        model = GoogLeNet(class_num=5, aux_logits=False)
        model.summary()
        model.load_weights("./save_weights/myGoogLenet.h5", by_name=True)  # h5 format
        # model.load_weights("./save_weights/myGoogLeNet.ckpt")  # ckpt format
        result = model.predict(img)
        predict_class = np.argmax(result)
        print(class_indict[str(predict_class)])
        result1 = class_indict[str(predict_class)]
        self.textBrowser.setPlainText(result1)
        plt.show()
コード例 #4
0
def _main(model_name):
    # data_sets path
    train_dir = '/home/lst/datasets/cifar-10-images_train/'
    val_dir = '/home/lst/datasets/cifar-10-images_test/'

    # model
    model = GoogLeNet(input_shape=(224, 224, 3))
    # parallel model
    parallel_model = multi_gpu_model(model, gpus=2)

    # optimizers setting
    from keras import optimizers
    optimizer = optimizers.adamax(lr=0.002, decay=1e-06)
    parallel_model.compile(loss=[
        'categorical_crossentropy', 'categorical_crossentropy',
        'categorical_crossentropy'
    ],
                           loss_weights=[1, 0.3, 0.3],
                           optimizer=optimizer,
                           metrics=["accuracy"])
    # load data by batch
    train_generator, validation_generator, num_train, num_val = load_batch_data(
        train_dir, val_dir)

    # Callbacks settings
    from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard

    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0.001,
                               patience=30,
                               mode='min',
                               verbose=1)

    checkpoint = ModelCheckpoint(f'{model_name}.h5',
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min',
                                 period=1)

    tensorboard = TensorBoard(log_dir=f'./logs/{model_name}',
                              histogram_freq=0,
                              write_graph=True,
                              write_images=False)
    # fit
    parallel_model.fit_generator(
        train_generator,
        validation_data=validation_generator,
        steps_per_epoch=math.ceil(num_train / batch_size),
        validation_steps=math.ceil(num_val / batch_size),
        epochs=epochs,
        callbacks=[tensorboard, early_stop, checkpoint],
    )
コード例 #5
0
ファイル: predict.py プロジェクト: KDD2018/Machine-Learning
def predict(img_path, weights_path, class_indices_path):
    """
    对图像进行预测分类
    :param img_path: 待预测图像路径
    :param weights_path: 模型权重路径
    :param class_indices_path: 标签类别索引
    :return: 待预测图像类别
    """
    img_height = img_width = 224

    # 加载待预测图像
    img = Image.open(img_path)
    # 重设图像大小
    img = img.resize((img_width, img_height))
    plt.imshow(img)

    # 归一化
    img = (np.array(img) / 255. - 0.5) / 0.5

    # 增加batch这个维度
    img = (np.expand_dims(img, 0))

    # 加载标签类别索引文件
    try:
        json_file = open(class_indices_path, 'r')
        class_indict = json.load(json_file)
    except Exception as e:
        print(e)
        exit(-1)

    # 预测
    model = GoogLeNet(class_num=5, aux_logits=False)
    # 由于当前创建的模型没有辅助分类器,而保存的训练模型有辅助分类器,需要by_name参数根据名字匹配相关权重参数
    model.load_weights(weights_path, by_name=True)
    result = np.squeeze(model.predict(img))
    predict_class = np.argmax(result)
    label = class_indict[str(predict_class)], result[predict_class]
    plt.title(label)
    plt.show()
コード例 #6
0
def main():
    im_height = 224
    im_width = 224

    # load image
    img_path = "../tulip.jpg"
    assert os.path.exists(img_path), "file: '{}' dose not exist.".format(
        img_path)
    img = Image.open(img_path)
    # resize image to 224x224
    img = img.resize((im_width, im_height))
    plt.imshow(img)

    # scaling pixel value and normalize
    img = ((np.array(img) / 255.) - 0.5) / 0.5

    # Add the image to a batch where it's the only member.
    img = (np.expand_dims(img, 0))

    # read class_indict
    json_path = './class_indices.json'
    assert os.path.exists(json_path), "file: '{}' dose not exist.".format(
        json_path)

    json_file = open(json_path, "r")
    class_indict = json.load(json_file)

    model = GoogLeNet(class_num=5,
                      aux_logits=False)  # 没有辅助分类器后,参数几乎少了一半;因为辅助分类器都是全连接层
    model.summary()
    # model.load_weights("./save_weights/myGoogLenet.h5", by_name=True)  # h5 format
    #  .h5格式保存的,需要加上参数 by_name=True ;会按照当前创建的模型的变量名称载入
    # 因为训练的时候是有辅助分类器的,而现在预测的时候是没有辅助分类器的
    weights_path = "./save_weights/myGoogLeNet.ckpt"
    assert len(glob.glob(weights_path +
                         "*")), "cannot find {}".format(weights_path)
    model.load_weights(weights_path)

    result = np.squeeze(model.predict(img))
    predict_class = np.argmax(result)

    print_res = "class: {}   prob: {:.3}".format(
        class_indict[str(predict_class)], result[predict_class])
    plt.title(print_res)
    print(print_res)
    plt.show()
def load_model():
    model = GoogLeNet()
    model.load_state_dict(torch.load(PRETRAINED_MODEL))
    return model
コード例 #8
0
ファイル: train.py プロジェクト: HCS12/DeepLearning
def main():
    data_root = os.path.abspath(os.path.join(os.getcwd(),
                                             "../.."))  # get data root path
    image_path = os.path.join(data_root, "data_set",
                              "flower_data")  # flower data set path
    train_dir = os.path.join(image_path, "train")
    validation_dir = os.path.join(image_path, "val")
    assert os.path.exists(train_dir), "cannot find {}".format(train_dir)
    assert os.path.exists(validation_dir), "cannot find {}".format(
        validation_dir)

    # create direction for saving weights
    if not os.path.exists("save_weights"):
        os.makedirs("save_weights")

    im_height = 224
    im_width = 224
    batch_size = 32
    epochs = 30

    def pre_function(img):
        # img = im.open('test.jpg')
        # img = np.array(img).astype(np.float32)
        img = img / 255.
        img = (img - 0.5) * 2.0

        return img

    # data generator with data augmentation
    train_image_generator = ImageDataGenerator(
        preprocessing_function=pre_function, horizontal_flip=True)
    validation_image_generator = ImageDataGenerator(
        preprocessing_function=pre_function)

    train_data_gen = train_image_generator.flow_from_directory(
        directory=train_dir,
        batch_size=batch_size,
        shuffle=True,
        target_size=(im_height, im_width),
        class_mode='categorical')
    total_train = train_data_gen.n

    # get class dict
    class_indices = train_data_gen.class_indices

    # transform value and key of dict
    inverse_dict = dict((val, key) for key, val in class_indices.items())
    # write dict into json file
    json_str = json.dumps(inverse_dict, indent=4)
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    val_data_gen = validation_image_generator.flow_from_directory(
        directory=validation_dir,
        batch_size=batch_size,
        shuffle=False,
        target_size=(im_height, im_width),
        class_mode='categorical')
    total_val = val_data_gen.n
    print("using {} images for training, {} images for validation.".format(
        total_train, total_val))

    model = GoogLeNet(im_height=im_height,
                      im_width=im_width,
                      class_num=5,
                      aux_logits=True)
    # model.build((batch_size, 224, 224, 3))  # when using subclass model
    model.summary()

    # using keras low level api for training
    loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=False)
    optimizer = tf.keras.optimizers.Adam(learning_rate=0.0003)

    train_loss = tf.keras.metrics.Mean(name='train_loss')
    train_accuracy = tf.keras.metrics.CategoricalAccuracy(
        name='train_accuracy')

    val_loss = tf.keras.metrics.Mean(name='val_loss')
    val_accuracy = tf.keras.metrics.CategoricalAccuracy(name='val_accuracy')

    @tf.function
    def train_step(images, labels):
        with tf.GradientTape() as tape:
            aux1, aux2, output = model(images, training=True)
            loss1 = loss_object(labels, aux1)
            loss2 = loss_object(labels, aux2)
            loss3 = loss_object(labels, output)
            loss = loss1 * 0.3 + loss2 * 0.3 + loss3
        gradients = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))

        train_loss(loss)
        train_accuracy(labels, output)

    @tf.function
    def val_step(images, labels):
        _, _, output = model(images, training=False)
        loss = loss_object(labels, output)

        val_loss(loss)
        val_accuracy(labels, output)

    best_val_acc = 0.
    for epoch in range(epochs):
        train_loss.reset_states()  # clear history info
        train_accuracy.reset_states()  # clear history info
        val_loss.reset_states()  # clear history info
        val_accuracy.reset_states()  # clear history info

        # train
        train_bar = tqdm(range(total_train // batch_size))
        for step in train_bar:
            images, labels = next(train_data_gen)
            train_step(images, labels)

            # print train process
            train_bar.desc = "train epoch[{}/{}] loss:{:.3f}, acc:{:.3f}".format(
                epoch + 1, epochs, train_loss.result(),
                train_accuracy.result())

        # validate
        val_bar = tqdm(range(total_val // batch_size))
        for step in val_bar:
            val_images, val_labels = next(val_data_gen)
            val_step(val_images, val_labels)

            # print val process
            val_bar.desc = "valid epoch[{}/{}] loss:{:.3f}, acc:{:.3f}".format(
                epoch + 1, epochs, val_loss.result(), val_accuracy.result())

        # only save best weights
        if val_accuracy.result() > best_val_acc:
            best_val_acc = val_accuracy.result()
            model.save_weights("./save_weights/myGoogLeNet.ckpt")
コード例 #9
0
# load image
img = Image.open("../tulip.jpg")
plt.imshow(img)
# [N, C, H, W]
img = data_transform(img)
# expand batch dimension
img = torch.unsqueeze(img, dim=0)

# read class_indict
try:
    json_file = open('./class_indices.json', 'r')
    class_indict = json.load(json_file)
except Exception as e:
    print(e)
    exit(-1)

# create model
model = GoogLeNet(num_classes=5, aux_logits=False)
# load model weights
model_weight_path = "./googleNet.pth"
missing_keys, unexpected_keys = model.load_state_dict(torch.load(model_weight_path), strict=False)
model.eval()
with torch.no_grad():
    # predict class
    output = torch.squeeze(model(img))
    predict = torch.softmax(output, dim=0)
    predict_cla = torch.argmax(predict).numpy()
print(predict.numpy()[predict_cla])
print(class_indict[str(predict_cla)])
plt.show()
コード例 #10
0
def run():
    # 准备数据
    train_image_generator = ImageDataGenerator(
        preprocessing_function=pre_function, horizontal_flip=True)
    validation_image_generator = ImageDataGenerator(
        preprocessing_function=pre_function)

    train_data_gen = train_image_generator.flow_from_directory(
        directory=train_dir,
        batch_size=batch_size,
        shuffle=True,
        target_size=(img_height, img_width),
        class_mode='categorical')
    val_data_gen = validation_image_generator.flow_from_directory(
        directory=validation_dir,
        batch_size=batch_size,
        shuffle=False,
        target_size=(img_height, img_width),
        class_mode='categorical')

    total_train, total_val = train_data_gen.n, val_data_gen.n

    # get class dict
    class_indices = train_data_gen.class_indices

    # transform value and key of dict
    inverse_dict = dict((val, key) for key, val in class_indices.items())
    # write dict into json file
    json_str = json.dumps(inverse_dict, indent=4)
    with open('../class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    # 构建GoogLeNet模型
    model = GoogLeNet(img_height=img_height,
                      img_width=img_width,
                      class_num=5,
                      aux_logits=True)
    # model.build((batch_size, 224, 224, 3))  # when using subclass model
    # model.load_weights("pretrain_weights.ckpt")
    model.summary()

    # 使用keras低层API自定义训练过程
    loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=False)
    optimizer = tf.keras.optimizers.Adam(learning_rate=0.0005)

    train_loss = tf.keras.metrics.Mean(name='train_loss')
    train_accuracy = tf.keras.metrics.CategoricalAccuracy(
        name='train_accuracy')

    test_loss = tf.keras.metrics.Mean(name='test_loss')
    test_accuracy = tf.keras.metrics.CategoricalAccuracy(name='test_accuracy')

    @tf.function
    def train_step(images, labels):
        with tf.GradientTape() as tape:
            aux1, aux2, output = model(images, training=True)
            loss1 = loss_object(labels, aux1)
            loss2 = loss_object(labels, aux2)
            loss3 = loss_object(labels, output)
            loss = loss1 * 0.3 + loss2 * 0.3 + loss3
        gradients = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))

        train_loss(loss)
        train_accuracy(labels, output)

    @tf.function
    def test_step(images, labels):
        _, _, output = model(images, training=False)
        t_loss = loss_object(labels, output)

        test_loss(t_loss)
        test_accuracy(labels, output)

    best_test_loss = float('inf')
    for epoch in range(1, epochs + 1):
        train_loss.reset_states()  # clear history info
        train_accuracy.reset_states()  # clear history info
        test_loss.reset_states()  # clear history info
        test_accuracy.reset_states()  # clear history info

        for step in range(total_train // batch_size):
            images, labels = next(train_data_gen)
            train_step(images, labels)

        for step in range(total_val // batch_size):
            test_images, test_labels = next(val_data_gen)
            test_step(test_images, test_labels)

        template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
        print(
            template.format(epoch, train_loss.result(),
                            train_accuracy.result() * 100, test_loss.result(),
                            test_accuracy.result() * 100))
        if test_loss.result() < best_test_loss:
            best_test_loss = test_loss.result()
            model.save_weights("../save_weights/GoogLeNet.h5")
コード例 #11
0
ファイル: predict.py プロジェクト: huangjunxiong11/TF2
import matplotlib.pyplot as plt

im_height = 224
im_width = 224

# 读入图片
img = Image.open(
    "E:/DeepLearning/GoogLeNet/daisy_test.jpg")  # 这是我的路径,要根据自己的根目录来改
# resize成224x224的格式
img = img.resize((im_width, im_height))
plt.imshow(img)
# 对原图标准化处理
img = ((np.array(img) / 255.) - 0.5) / 0.5
# Add the image to a batch where it's the only member.
img = (np.expand_dims(img, 0))
# 读class_indict文件
try:
    json_file = open('./class_indices.json', 'r')
    class_indict = json.load(json_file)
except Exception as e:
    print(e)
    exit(-1)
model = GoogLeNet(class_num=5, aux_logits=False)  # 重新构建网络
model.summary()
model.load_weights("./save_weights/myGoogLenet.h5", by_name=True)  # 加载模型参数
# model.load_weights("./save_weights/myGoogLeNet.ckpt")  # ckpt format
result = model.predict(img)
predict_class = np.argmax(result)
print('预测出的类别是:', class_indict[str(predict_class)])  # 打印显示出预测类别
plt.show()
コード例 #12
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    data_transform = transforms.Compose(
        [transforms.Resize((224, 224)),
         transforms.ToTensor(),
         transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

    # load image
    img_path = "./a.jpg"
    assert os.path.exists(img_path), "file: '{}' dose not exist.".format(img_path)
    img = Image.open(img_path)
    plt.imshow(img)
    # [N, C, H, W]
    img = data_transform(img)
    # expand batch dimension
    img = torch.unsqueeze(img, dim=0)

    # read class_indict
    json_path = './class_indices.json'
    assert os.path.exists(json_path), "file: '{}' dose not exist.".format(json_path)

    json_file = open(json_path, "r")
    class_indict = json.load(json_file)

    # create model
    model = GoogLeNet(num_classes=5, aux_logits=False).to(device)

    # load model weights
    weights_path = "./weights/googlenet.pth"
    assert os.path.exists(weights_path), "file: '{}' dose not exist.".format(weights_path)
    missing_keys, unexpected_keys = model.load_state_dict(torch.load(weights_path, map_location=device),
                                                          strict=False)

    model.eval()
    print('=================================')
    dummy_input = torch.randn(1, 3, 224, 224).to(device)
    torch.onnx.export(
        model,
        dummy_input,
        'googlenet.onnx',
        dynamic_axes={'image': {0: 'B'}, 'outputs': {0: 'B'}},
        input_names=['image'],
        output_names=['outputs'],
        opset_version=12
    )
    print('=================================')

    print('---------------------------------')
    traced_script_module = torch.jit.trace(model, dummy_input)
    traced_script_module.save("googlenet.pt")
    with torch.no_grad():
        # predict class
        output = torch.squeeze(model(img.to(device))).cpu()
        predict = torch.softmax(output, dim=0)
        predict_cla = torch.argmax(predict).numpy()

    print_res = "class: {}   prob: {:.3}".format(class_indict[str(predict_cla)],
                                                 predict[predict_cla].numpy())
    plt.title(print_res)
    print(print_res)
    plt.show()
コード例 #13
0
                                              shuffle=False,
                                              num_workers=0)

# test_data_iter = iter(validate_loader)
# test_image, test_label = test_data_iter.next()

# net = torchvision.models.googlenet(num_classes=5)
# model_dict = net.state_dict()
# pretrain_model = torch.load("googlenet.pth")
# del_list = ["aux1.fc2.weight", "aux1.fc2.bias",
#             "aux2.fc2.weight", "aux2.fc2.bias",
#             "fc.weight", "fc.bias"]
# pretrain_dict = {k: v for k, v in pretrain_model.items() if k not in del_list}
# model_dict.update(pretrain_dict)
# net.load_state_dict(model_dict)
net = GoogLeNet(num_classes=5, aux_logits=True, init_weights=True)
net.to(device)
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.0003)

best_acc = 0.0
save_path = './googleNet.pth'
for epoch in range(30):
    # train
    net.train()
    running_loss = 0.0
    for step, data in enumerate(train_loader, start=0):
        images, labels = data
        optimizer.zero_grad()
        logits, aux_logits2, aux_logits1 = net(images.to(device))
        loss0 = loss_function(logits, labels.to(device))
コード例 #14
0
    json_str = json.dumps(inverse_dict,
                          indent=4)  #将转换后的字典写入文件class_indices.json
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    # 使用图像生成器从验证集validation_dir中读取样本
    val_data_gen = train_image_generator.flow_from_directory(
        directory=validation_dir,
        batch_size=batch_size,
        shuffle=True,
        target_size=(im_height, im_width),
        class_mode='categorical')
    # 验证集的数量
    total_val = val_data_gen.n
    model = GoogLeNet(im_height=im_height,
                      im_width=im_width,
                      class_num=5,
                      aux_logits=True)  #实例化模型
    # model.build((batch_size, 224, 224, 3))  # when using subclass model
    model.summary()

    #使用keras底层api进行网络训练
    loss_object = tf.keras.losses.CategoricalCrossentropy(
        from_logits=False)  # 定义损失函数(这种方式需要one-hot编码)
    optimizer = tf.keras.optimizers.Adam(lr=0.0001)  #定义优化器

    train_loss = tf.keras.metrics.Mean(name='train_loss')
    train_accuracy = tf.keras.metrics.CategoricalAccuracy(
        name='train_acuracy')  #定义平均准确率

    test_loss = tf.keras.metrics.Mean(name='test_loss')
    test_accuracy = tf.keras.metrics.CategoricalAccuracy(
コード例 #15
0
inverse_dict = dict((val, key) for key, val in class_indices.items())
# write dict into json file
json_str = json.dumps(inverse_dict, indent=4)
with open('class_indices.json', 'w') as json_file:
    json_file.write(json_str)

val_data_gen = train_image_generator.flow_from_directory(
    directory=validation_dir,
    batch_size=batch_size,
    shuffle=True,
    target_size=(im_height, im_width),
    class_mode='categorical')
total_val = val_data_gen.n

model = GoogLeNet(im_height=im_height,
                  im_width=im_width,
                  class_num=5,
                  aux_logits=True)
# model.build((batch_size, 224, 224, 3))  # when using subclass model
model.summary()

# using keras low level api for training
loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=False)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.0003)

train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.CategoricalAccuracy(name='train_accuracy')

test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.CategoricalAccuracy(name='test_accuracy')

コード例 #16
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)

    data_transform = {
        "train": transforms.Compose([transforms.RandomResizedCrop(224),
                                     transforms.RandomHorizontalFlip(),
                                     transforms.ToTensor(),
                                     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]),
        "val": transforms.Compose([transforms.Resize((224, 224)),
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])}

    data_root = os.path.abspath(os.path.join(os.getcwd(), "../.."))  # get data root path
    image_path = data_root + "/data_set/flower_data/"  # flower data set path

    train_dataset = datasets.ImageFolder(root=image_path + "train",
                                         transform=data_transform["train"])
    train_num = len(train_dataset)

    # {'daisy':0, 'dandelion':1, 'roses':2, 'sunflower':3, 'tulips':4}
    flower_list = train_dataset.class_to_idx
    cla_dict = dict((val, key) for key, val in flower_list.items())
    # write dict into json file
    json_str = json.dumps(cla_dict, indent=4)
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    batch_size = 32
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size, shuffle=True,
                                               num_workers=0)

    validate_dataset = datasets.ImageFolder(root=image_path + "val",
                                            transform=data_transform["val"])
    val_num = len(validate_dataset)
    validate_loader = torch.utils.data.DataLoader(validate_dataset,
                                                  batch_size=batch_size, shuffle=False,
                                                  num_workers=0)

    # test_data_iter = iter(validate_loader)
    # test_image, test_label = test_data_iter.next()

    # net = torchvision.models.googlenet(num_classes=5)
    # model_dict = net.state_dict()
    # pretrain_model = torch.load("googlenet.pth")
    # del_list = ["aux1.fc2.weight", "aux1.fc2.bias",
    #             "aux2.fc2.weight", "aux2.fc2.bias",
    #             "fc.weight", "fc.bias"]
    # pretrain_dict = {k: v for k, v in pretrain_model.items() if k not in del_list}
    # model_dict.update(pretrain_dict)
    # net.load_state_dict(model_dict)
    net = GoogLeNet(num_classes=5, aux_logits=True, init_weights=True)
    net.to(device)
    loss_function = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=0.0003)

    best_acc = 0.0
    save_path = './googleNet.pth'
    for epoch in range(30):
        # train
        net.train()
        running_loss = 0.0
        for step, data in enumerate(train_loader, start=0):
            images, labels = data
            optimizer.zero_grad()
            logits, aux_logits2, aux_logits1 = net(images.to(device))
            loss0 = loss_function(logits, labels.to(device))
            loss1 = loss_function(aux_logits1, labels.to(device))
            loss2 = loss_function(aux_logits2, labels.to(device))
            loss = loss0 + loss1 * 0.3 + loss2 * 0.3
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()
            # print train process
            rate = (step + 1) / len(train_loader)
            a = "*" * int(rate * 50)
            b = "." * int((1 - rate) * 50)
            print("\rtrain loss: {:^3.0f}%[{}->{}]{:.3f}".format(int(rate * 100), a, b, loss), end="")
        print()

        # validate
        net.eval()
        acc = 0.0  # accumulate accurate number / epoch
        with torch.no_grad():
            for val_data in validate_loader:
                val_images, val_labels = val_data
                outputs = net(val_images.to(device))  # eval model only have last output layer
                predict_y = torch.max(outputs, dim=1)[1]
                acc += (predict_y == val_labels.to(device)).sum().item()
            val_accurate = acc / val_num
            if val_accurate > best_acc:
                best_acc = val_accurate
                torch.save(net.state_dict(), save_path)
            print('[epoch %d] train_loss: %.3f  test_accuracy: %.3f' %
                  (epoch + 1, running_loss / step, val_accurate))

    print('Finished Training')
コード例 #17
0
def main():
    gpus = tf.config.experimental.list_physical_devices("GPU")
    if gpus:
        try:
            for gpu in gpus:
                tf.config.experimental.set_memory_growth(gpu, True)
        except RuntimeError as e:
            print(e)
            exit(-1)

    data_root = os.path.abspath(os.path.join(os.getcwd(),
                                             "../.."))  # get data root path
    image_path = os.path.join(data_root, "data_set",
                              "flower_data")  # flower data set path
    train_dir = os.path.join(image_path, "train")
    validation_dir = os.path.join(image_path, "val")
    assert os.path.exists(train_dir), "cannot find {}".format(train_dir)
    assert os.path.exists(validation_dir), "cannot find {}".format(
        validation_dir)

    # create direction for saving weights
    if not os.path.exists("save_weights"):
        os.makedirs("save_weights")

    im_height = 224
    im_width = 224
    batch_size = 32
    epochs = 30

    # class dict
    data_class = [
        cla for cla in os.listdir(train_dir)
        if os.path.isdir(os.path.join(train_dir, cla))
    ]
    class_num = len(data_class)
    class_dict = dict((value, index) for index, value in enumerate(data_class))

    # reverse value and key of dict
    inverse_dict = dict((val, key) for key, val in class_dict.items())
    # write dict into json file
    json_str = json.dumps(inverse_dict, indent=4)
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    # load train images list
    train_image_list = glob.glob(train_dir + "/*/*.jpg")
    random.shuffle(train_image_list)
    train_num = len(train_image_list)
    assert train_num > 0, "cannot find any .jpg file in {}".format(train_dir)
    train_label_list = [
        class_dict[path.split(os.path.sep)[-2]] for path in train_image_list
    ]

    # load validation images list
    val_image_list = glob.glob(validation_dir + "/*/*.jpg")
    random.shuffle(val_image_list)
    val_num = len(val_image_list)
    assert val_num > 0, "cannot find any .jpg file in {}".format(
        validation_dir)
    val_label_list = [
        class_dict[path.split(os.path.sep)[-2]] for path in val_image_list
    ]

    print("using {} images for training, {} images for validation.".format(
        train_num, val_num))

    def process_train_img(img_path, label):
        label = tf.one_hot(label, depth=class_num)
        image = tf.io.read_file(img_path)
        image = tf.image.decode_jpeg(image)
        image = tf.image.convert_image_dtype(image, tf.float32)
        image = tf.image.resize(image, [im_height, im_width])
        image = tf.image.random_flip_left_right(image)
        image = (image - 0.5) / 0.5
        return image, label

    def process_val_img(img_path, label):
        label = tf.one_hot(label, depth=class_num)
        image = tf.io.read_file(img_path)
        image = tf.image.decode_jpeg(image)
        image = tf.image.convert_image_dtype(image, tf.float32)
        image = tf.image.resize(image, [im_height, im_width])
        image = (image - 0.5) / 0.5
        return image, label

    AUTOTUNE = tf.data.experimental.AUTOTUNE

    # load train dataset
    train_dataset = tf.data.Dataset.from_tensor_slices(
        (train_image_list, train_label_list))
    train_dataset = train_dataset.shuffle(buffer_size=train_num)\
                                 .map(process_train_img, num_parallel_calls=AUTOTUNE)\
                                 .repeat().batch(batch_size).prefetch(AUTOTUNE)

    # load train dataset
    val_dataset = tf.data.Dataset.from_tensor_slices(
        (val_image_list, val_label_list))
    val_dataset = val_dataset.map(process_val_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)\
                             .repeat().batch(batch_size)

    # 实例化模型
    model = GoogLeNet(im_height=224,
                      im_width=224,
                      class_num=5,
                      aux_logits=True)
    model.summary()

    # using keras low level api for training
    loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=False)
    optimizer = tf.keras.optimizers.Adam(learning_rate=0.0003)

    train_loss = tf.keras.metrics.Mean(name='train_loss')
    train_accuracy = tf.keras.metrics.CategoricalAccuracy(
        name='train_accuracy')

    test_loss = tf.keras.metrics.Mean(name='test_loss')
    test_accuracy = tf.keras.metrics.CategoricalAccuracy(name='test_accuracy')

    @tf.function
    def train_step(images, labels):
        with tf.GradientTape() as tape:
            aux1, aux2, output = model(images, training=True)
            loss1 = loss_object(labels, aux1)
            loss2 = loss_object(labels, aux2)
            loss3 = loss_object(labels, output)
            loss = loss1 * 0.3 + loss2 * 0.3 + loss3
        gradients = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))

        train_loss(loss)
        train_accuracy(labels, output)

    @tf.function
    def test_step(images, labels):
        _, _, output = model(images, training=False)
        t_loss = loss_object(labels, output)

        test_loss(t_loss)
        test_accuracy(labels, output)

    best_test_loss = float('inf')
    train_step_num = train_num // batch_size
    val_step_num = val_num // batch_size
    for epoch in range(1, epochs + 1):
        train_loss.reset_states()  # clear history info
        train_accuracy.reset_states()  # clear history info
        test_loss.reset_states()  # clear history info
        test_accuracy.reset_states()  # clear history info

        t1 = time.perf_counter()
        for index, (images, labels) in enumerate(train_dataset):
            train_step(images, labels)
            if index + 1 == train_step_num:
                break
        print(time.perf_counter() - t1)

        for index, (images, labels) in enumerate(val_dataset):
            test_step(images, labels)
            if index + 1 == val_step_num:
                break

        template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
        print(
            template.format(epoch, train_loss.result(),
                            train_accuracy.result() * 100, test_loss.result(),
                            test_accuracy.result() * 100))
        if test_loss.result() < best_test_loss:
            model.save_weights("./save_weights/myGoogLeNet.ckpt".format(epoch),
                               save_format='tf')
コード例 #18
0
img = img.resize((im_width, im_height))
plt.imshow(img)

# scaling pixel value and normalize
img = ((np.array(img) / 255.) - 0.5) / 0.5

# Add the image to a batch where it's the only member.
img = (np.expand_dims(img, 0))

# read class_indict
try:
    json_file = open('./class_indices.json', 'r')
    class_indict = json.load(json_file)
except Exception as e:
    print(e)
    exit(-1)

model = GoogLeNet(class_num=5, aux_logits=False)
# 在训练过程中,用了sequential和functional方式构建模型,所以模型是一个static graph,当做validation的时候,不能动态控制附加输出的分支,但是做inference的时候构建的模型不含附加输出的分支
model.summary()

# 模型载入参数的两种方式,对于h5格式,需要指定按照卷积层的名称载入参数;对于ckpt格式,则不需要指定,会自动按照名称给参数赋值
# model.load_weights("./save_weights/myGoogLenet.h5", by_name=True)  # h5 format
model.load_weights("./save_weights/myGoogLeNet.ckpt")  # ckpt format
result = model.predict(img)
# 预测结果应该是:
# result = np.squeeze(model.predict(img))
predict_class = np.argmax(result)
print(class_indict[str(predict_class)])
plt.show()
コード例 #19
0
# 我们这次仍然使用前面项目介绍过的ResNet模型,简单数据增广、MixUp都用上,看看使用SMOTE方法改善训练数据分布不平衡的效果。
#

# In[1]:

# 模型训练
from model import GoogLeNet
from train import train

if __name__ == '__main__':
    # 在分布不平衡的数据集上训练
    # model = GoogLeNet("GoogLeNet", num_classes=3)
    # train(model, augment='userDef') # 使用自定义数据増广的训练
    # model = GoogLeNet("GoogLeNet", num_classes=3)
    # train(model, augment='samplePairing') # 使用samplePairing
    model = GoogLeNet("GoogLeNet", num_classes=3)
    train(model, augment='mixup')  # 使用mixup

    # 在分布平衡的数据集上训练
    # model = GoogLeNet("GoogLeNet", num_classes=3)
    # train(model, augment='userDef', bal='balance') # 使用自定义数据増广的训练
    # model = GoogLeNet("GoogLeNet", num_classes=3)
    # train(model, augment='samplePairing', bal='balance') # 使用samplePairing
    model = GoogLeNet("GoogLeNet", num_classes=3)
    train(model, augment='mixup', bal='balance')  # 使用mixup

# In[32]:

#查看训练结果(loss)
import numpy as np
import matplotlib.pyplot as plt
コード例 #20
0
ファイル: predict.py プロジェクト: niuxinzan/tf20_dl
    im_width = 224
    #读入图片
    img = Image.open('daisy_test.jpg')
    img = img.resize((im_height, im_width))
    plt.imshow(img)
    plt.show()
    #对原图像进行处理
    img = (np.array(img) / 255. - 0.5) / 0.5
    # 当图像只有一张时,需要增加一维度
    img = np.expand_dims(img, 0)
    # 读取标签说明文件
    try:
        json_file = open('class_indices.json', 'r')
        class_indict = json.load(json_file)
    except Exception as e:
        print(e)
        exit(-1)
    #构造网络
    # saved model use following method
    # model = tf.keras.models.load_model('./saved_model/')
    #save wieight model use following method
    model = GoogLeNet(class_num=5,
                      im_height=im_height,
                      im_width=im_width,
                      aux_logits=False)
    model.load_weights('save_weights/myGoogLeNet.h5', by_name=True)  #加载模型参数
    model.summary()
    result = model.predict(img)
    predict_class = np.argmax(result)
    print('预测出来的类别为{0}'.format(class_indict[str(predict_class)]))
コード例 #21
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("using {} device.".format(device))

    data_transform = {
        "train": transforms.Compose([transforms.RandomResizedCrop(224),
                                     transforms.RandomHorizontalFlip(),
                                     transforms.ToTensor(),
                                     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]),
        "val": transforms.Compose([transforms.Resize((224, 224)),
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])}

    data_root = os.path.abspath(os.path.join(os.getcwd(), "../.."))  # get data root path
    image_path = os.path.join(data_root, "data_set", "flower_data")  # flower data set path
    assert os.path.exists(image_path), "{} path does not exist.".format(image_path)
    train_dataset = datasets.ImageFolder(root=os.path.join(image_path, "train"),
                                         transform=data_transform["train"])
    train_num = len(train_dataset)

    # {'daisy':0, 'dandelion':1, 'roses':2, 'sunflower':3, 'tulips':4}
    flower_list = train_dataset.class_to_idx
    cla_dict = dict((val, key) for key, val in flower_list.items())
    # write dict into json file
    json_str = json.dumps(cla_dict, indent=4)
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    batch_size = 32
    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])  # number of workers
    print('Using {} dataloader workers every process'.format(nw))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size, shuffle=True,
                                               num_workers=nw)

    validate_dataset = datasets.ImageFolder(root=os.path.join(image_path, "val"),
                                            transform=data_transform["val"])
    val_num = len(validate_dataset)
    validate_loader = torch.utils.data.DataLoader(validate_dataset,
                                                  batch_size=batch_size, shuffle=False,
                                                  num_workers=nw)

    print("using {} images for training, {} images fot validation.".format(train_num,
                                                                           val_num))

    # test_data_iter = iter(validate_loader)
    # test_image, test_label = test_data_iter.next()

    # net = torchvision.models.googlenet(num_classes=5)
    # model_dict = net.state_dict()
    # pretrain_model = torch.load("googlenet.pth")
    # del_list = ["aux1.fc2.weight", "aux1.fc2.bias",
    #             "aux2.fc2.weight", "aux2.fc2.bias",
    #             "fc.weight", "fc.bias"]
    # pretrain_dict = {k: v for k, v in pretrain_model.items() if k not in del_list}
    # model_dict.update(pretrain_dict)
    # net.load_state_dict(model_dict)
    net = GoogLeNet(num_classes=5, aux_logits=True, init_weights=True)
    net.to(device)
    loss_function = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=0.0003)

    epochs = 30
    best_acc = 0.0
    save_path = './googleNet.pth'
    train_steps = len(train_loader)
    for epoch in range(epochs):
        # train
        net.train()
        running_loss = 0.0
        train_bar = tqdm(train_loader)
        for step, data in enumerate(train_bar):
            images, labels = data
            optimizer.zero_grad()
            logits, aux_logits2, aux_logits1 = net(images.to(device))
            loss0 = loss_function(logits, labels.to(device))
            loss1 = loss_function(aux_logits1, labels.to(device))
            loss2 = loss_function(aux_logits2, labels.to(device))
            loss = loss0 + loss1 * 0.3 + loss2 * 0.3
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()

            train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(epoch + 1,
                                                                     epochs,
                                                                     loss)

        # validate
        net.eval()
        acc = 0.0  # accumulate accurate number / epoch
        with torch.no_grad():
            val_bar = tqdm(validate_loader, colour='green')
            for val_data in val_bar:
                val_images, val_labels = val_data
                outputs = net(val_images.to(device))  # eval model only have last output layer
                predict_y = torch.max(outputs, dim=1)[1]
                acc += torch.eq(predict_y, val_labels.to(device)).sum().item()

        val_accurate = acc / val_num
        print('[epoch %d] train_loss: %.3f  val_accuracy: %.3f' %
              (epoch + 1, running_loss / train_steps, val_accurate))

        if val_accurate > best_acc:
            best_acc = val_accurate
            torch.save(net.state_dict(), save_path)

    print('Finished Training')
コード例 #22
0
def main():
    data_root = os.path.abspath(os.path.join(os.getcwd(), "../.."))  # get data root path
    image_path = data_root + "/data_set/flower_data/"  # flower data set path
    train_dir = image_path + "train"
    validation_dir = image_path + "val"

    # create direction for saving weights
    if not os.path.exists("save_weights"):
        os.makedirs("save_weights")

    im_height = 224
    im_width = 224
    batch_size = 32
    epochs = 30

    def pre_function(img):
        # img = im.open('test.jpg')
        # img = np.array(img).astype(np.float32)
        img = img / 255.
        img = (img - 0.5) * 2.0

        return img

    # data generator with data augmentation
    train_image_generator = ImageDataGenerator(preprocessing_function=pre_function,
                                               horizontal_flip=True)
    validation_image_generator = ImageDataGenerator(preprocessing_function=pre_function)

    train_data_gen = train_image_generator.flow_from_directory(directory=train_dir,
                                                               batch_size=batch_size,
                                                               shuffle=True,
                                                               target_size=(im_height, im_width),
                                                               class_mode='categorical')
    total_train = train_data_gen.n

    # get class dict
    class_indices = train_data_gen.class_indices

    # transform value and key of dict
    inverse_dict = dict((val, key) for key, val in class_indices.items())
    # write dict into json file
    json_str = json.dumps(inverse_dict, indent=4)
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    val_data_gen = validation_image_generator.flow_from_directory(directory=validation_dir,
                                                                  batch_size=batch_size,
                                                                  shuffle=False,
                                                                  target_size=(im_height, im_width),
                                                                  class_mode='categorical')
    total_val = val_data_gen.n

    model = GoogLeNet(im_height=im_height, im_width=im_width, class_num=5, aux_logits=True)
    # model.build((batch_size, 224, 224, 3))  # when using subclass model
    model.summary()

    # using keras low level api for training
    loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=False)
    optimizer = tf.keras.optimizers.Adam(learning_rate=0.0003)

    train_loss = tf.keras.metrics.Mean(name='train_loss')
    train_accuracy = tf.keras.metrics.CategoricalAccuracy(name='train_accuracy')

    test_loss = tf.keras.metrics.Mean(name='test_loss')
    test_accuracy = tf.keras.metrics.CategoricalAccuracy(name='test_accuracy')

    @tf.function
    def train_step(images, labels):
        with tf.GradientTape() as tape:
            aux1, aux2, output = model(images, training=True)
            loss1 = loss_object(labels, aux1)
            loss2 = loss_object(labels, aux2)
            loss3 = loss_object(labels, output)
            loss = loss1 * 0.3 + loss2 * 0.3 + loss3
        gradients = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))

        train_loss(loss)
        train_accuracy(labels, output)

    @tf.function
    def test_step(images, labels):
        _, _, output = model(images, training=False)
        t_loss = loss_object(labels, output)

        test_loss(t_loss)
        test_accuracy(labels, output)

    best_test_loss = float('inf')
    for epoch in range(1, epochs + 1):
        train_loss.reset_states()  # clear history info
        train_accuracy.reset_states()  # clear history info
        test_loss.reset_states()  # clear history info
        test_accuracy.reset_states()  # clear history info

        for step in range(total_train // batch_size):
            images, labels = next(train_data_gen)
            train_step(images, labels)

        for step in range(total_val // batch_size):
            test_images, test_labels = next(val_data_gen)
            test_step(test_images, test_labels)

        template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
        print(template.format(epoch,
                              train_loss.result(),
                              train_accuracy.result() * 100,
                              test_loss.result(),
                              test_accuracy.result() * 100))
        if test_loss.result() < best_test_loss:
            best_test_loss = test_loss.result()
            model.save_weights("./save_weights/myGoogLeNet.h5")
コード例 #23
0
AUTOTUNE = tf.data.experimental.AUTOTUNE

# load train dataset
train_dataset = tf.data.Dataset.from_tensor_slices((train_image_list, train_label_list))
train_dataset = train_dataset.shuffle(buffer_size=train_num)\
                             .map(process_train_img, num_parallel_calls=AUTOTUNE)\
                             .repeat().batch(batch_size).prefetch(AUTOTUNE)

# load train dataset
val_dataset = tf.data.Dataset.from_tensor_slices((val_image_list, val_label_list))
val_dataset = val_dataset.map(process_val_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)\
                         .repeat().batch(batch_size)

# 实例化模型
model = GoogLeNet(im_height=224, im_width=224, class_num=5, aux_logits=True)
model.summary()

# using keras low level api for training
loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=False)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.0003)

train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.CategoricalAccuracy(name='train_accuracy')

test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.CategoricalAccuracy(name='test_accuracy')


@tf.function
def train_step(images, labels):
コード例 #24
0
im_width = 224

# load image
img = Image.open("../yjh.jpg")
# resize image to 224x224
img = img.resize((im_width, im_height))
plt.imshow(img)

# scaling pixel value and normalize
img = ((np.array(img) / 255.) - 0.5) / 0.5

# Add the image to a batch where it's the only member.
img = (np.expand_dims(img, 0))

# read class_indict
try:
    json_file = open('./class_indices.json', 'r')
    class_indict = json.load(json_file)
except Exception as e:
    print(e)
    exit(-1)

model = GoogLeNet(class_num=5, aux_logits=False)
model.summary()
model.load_weights("./save_weights/myGoogLenet.h5", by_name=True)  # h5 format
# model.load_weights("./save_weights/myGoogLeNet.ckpt")  # ckpt format
result = model.predict(img)
predict_class = np.argmax(result)
print(class_indict[str(predict_class)])
plt.show()