예제 #1
0
import tensorflow as tf
import dataset_util
import numpy as np

model = tf.keras.applications.ResNet50(include_top=True, weights="imagenet")
print(model.summary())

ds = dataset_util.VOC2012_Dataset("dataset/VOC2012", "/train_list.txt", 2)
images, gt_boxes, gt_classes, gt_masks, img_sizes = ds.next_batch()

print(np.argmax(model(images), axis=1))
import sys
sys.exit(0)

last_layer = tf.keras.models.Model(
    inputs=model.input,
    outputs=[
        model.get_layer('conv4_block6_out').output,
        model.get_layer('conv4_block5_out').output
    ])
res = last_layer(images, training=True)
print(res)
예제 #2
0
if __name__ == "__main__":
    anchors = anchor_utils.get_all_anchors_for_c5([16, 16], 128**2,
                                                  config.ANCHOR_RATIOS)

    backbone2 = backbone.Resnet34_FPN()
    model = rpn.RPN(backbone2, 3)

    optimizer = tf.keras.optimizers.SGD(lr=0.0005, momentum=0.9, decay=1e-4)
    checkpoint = tf.train.Checkpoint(optimizer=optimizer,
                                     net=model,
                                     step=tf.Variable(1))
    manager = tf.train.CheckpointManager(checkpoint,
                                         config.WEIGHTS_DIR,
                                         max_to_keep=4)

    train_dataset = dataset_util.VOC2012_Dataset("DATASET/VOC2012/VOC2012",
                                                 "/train_list.txt", 20)
    # train_dataset = dataset_util.VOC2012_Dataset("dataset/VOC2012", "/train_list.txt", 2)
    valid_dataset = dataset_util.VOC2012_Dataset("DATASET/VOC2012/VOC2012",
                                                 "/valid_list.txt", 20)
    # valid_dataset = dataset_util.VOC2012_Dataset("dataset/VOC2012", "/valid_list.txt", 2)

    if manager.latest_checkpoint:
        print("Restoring...", manager.latest_checkpoint)
        images, gt_boxes, gt_classes, gt_masks, img_sizes = train_dataset.next_batch(
        )
        rpn_classes, rpn_bbox_deltas = anchor_utils.get_rpn_classes_and_bbox_deltas(
            len(images), anchors, gt_boxes)
        l1, l2 = train_step(
            model, optimizer, [images, img_sizes],
            [gt_boxes, gt_classes, img_sizes, rpn_classes, rpn_bbox_deltas])
        checkpoint.restore(manager.latest_checkpoint).assert_consumed()
예제 #3
0
import rpn
import backbone
import dataset_util
import anchor_utils
import tensorflow as tf
import config
import mask_rcnn
import numpy as np
import image_util
import metrics

ds = dataset_util.VOC2012_Dataset("dataset/VOC2012", "/valid_list.txt", 2)
#ds = dataset_util.VOC2012_Dataset("dataset/TEST", "/test_list.txt", 2)
anchors = anchor_utils.get_all_anchors(config.IMAGE_SIZE, config.ANCHOR_SCALES,
                                       config.ANCHOR_RATIOS)

backbone2 = backbone.Resnet34_FPN()
rpn2 = rpn.RPN(backbone2, 3)
model = mask_rcnn.Mask_RCNN(rpn2, anchors, len(config.CLASSES))

checkpoint = tf.train.Checkpoint(net=model, step=tf.Variable(1))
manager = tf.train.CheckpointManager(checkpoint,
                                     config.WEIGHTS_DIR,
                                     max_to_keep=4)
if manager.latest_checkpoint:
    print("Restoring...", manager.latest_checkpoint)
    model([
        np.random.rand(1, config.IMAGE_SIZE[0], config.IMAGE_SIZE[1], 3),
        np.array([[500, 500]])
    ],
          training=False)
예제 #4
0
    plt.imshow(image/255)
    a = np.reshape(softmax[1::3], shape)
    a = skt.resize(a, (512, 512))
    plt.imshow(a, cmap='Greens', interpolation='nearest', alpha=0.4)
    plt.savefig(name + "-2.png")

    plt.imshow(image/255)
    a = np.reshape(softmax[2::3], shape)
    a = skt.resize(a, (512, 512))
    plt.imshow(a, cmap='Blues', interpolation='nearest', alpha=0.4)
    plt.savefig(name + "-3.png")

if __name__ == "__main__":
    #tf.executing_eagerly()
    #train_dataset = dataset_util.VOC2012_Dataset("DATASET/VOC2012/VOC2012", "/train_list.txt", 2)
    train_dataset = dataset_util.VOC2012_Dataset("dataset/VOC2012", "/train_list.txt", 5)
    #valid_dataset = dataset_util.VOC2012_Dataset("DATASET/VOC2012/VOC2012", "/valid_list.txt", 2)
    valid_dataset = dataset_util.VOC2012_Dataset("dataset/VOC2012", "/valid_list.txt", 5)
    anchors = anchor_utils.get_all_anchors(config.IMAGE_SIZE, config.ANCHOR_SCALES, config.ANCHOR_RATIOS)

    backbone2 = backbone.Resnet34_FPN()
    rpn2 = rpn.RPN(backbone2, 3)
    model = mask_rcnn.Mask_RCNN(rpn2, anchors, len(config.CLASSES))

    checkpoint = tf.train.Checkpoint(net=model, step=tf.Variable(1))
    manager = tf.train.CheckpointManager(checkpoint, config.WEIGHTS_DIR, max_to_keep=4)
    if manager.latest_checkpoint:
        print("Restoring...", manager.latest_checkpoint)
        images, gt_boxes, gt_classes, gt_masks, img_sizes = valid_dataset.next_batch()
        model([images, img_sizes], training=False)
        checkpoint.restore(manager.latest_checkpoint)
예제 #5
0
        anchors_scales.append(anchors_scales_i)

    anchors_scales = [
        [90, 140, 200, 250, 320],
        [80, 130, 180, 240, 300],
        [85, 150, 210, 260, 340],
        [95, 140, 210, 280, 350],
        [90, 135, 185, 230, 320]
    ]

    #best [95, 145, 200, 260, 350] 44.07723995880536
    anchors = [get_all_anchors((512, 512), scale, [(1, 1), (1, 2), (2, 1)]) for scale in anchors_scales]
    sums = np.zeros(len(anchors_scales))

    batch_size = 20
    ds = dataset_util.VOC2012_Dataset("DATASET/VOC2012/VOC2012", "/all_list.txt", batch_size)
    #ds = dataset_util.VOC2012_Dataset("dataset/VOC2012", "/train_list.txt", batch_size)

    for j in range(ds.total_batches):
        for i in range(len(anchors)):
            data1, data2_2, data3, d5, d4 = ds.next_batch()
            data2_2, data3_2 = get_rpn_classes_and_bbox_deltas(len(data1), anchors[i], data2_2)

            ind = np.where(data2_2 == 1)[0]
            sums[i] += len(ind)

        print("batch", j, sums, sums / ((j+1) * batch_size))

    for i in range(len(anchors_scales)):
        print(anchors_scales[i], sums[i] / len(ds.data_names))
예제 #6
0
                    print("{}. bigger map in row, exiting".format(bigger_map_in_row))
                    sys.exit(0)

if __name__ == "__main__":
    #tf.executing_eagerly()
    anchors = anchor_utils.get_all_anchors(config.IMAGE_SIZE, config.ANCHOR_SCALES, config.ANCHOR_RATIOS)

    backbone2 = backbone.Resnet34_FPN()
    rpn2 = rpn.RPN(backbone2, 3)
    model = mask_rcnn.Mask_RCNN(rpn2, anchors, len(config.CLASSES))

    optimizer = tf.keras.optimizers.SGD(lr=0.004, momentum=0.9)
    checkpoint = tf.train.Checkpoint(optimizer=optimizer, net=model, step=tf.Variable(1))
    manager = tf.train.CheckpointManager(checkpoint, config.WEIGHTS_DIR, max_to_keep=4)

    train_dataset = dataset_util.VOC2012_Dataset("DATASET/VOC2012/VOC2012", "/train_list.txt", 12)
    td_map = dataset_util.VOC2012_Dataset("DATASET/VOC2012/VOC2012", "/train_list.txt", 4)
    vd_map = dataset_util.VOC2012_Dataset("DATASET/VOC2012/VOC2012", "/valid_list.txt", 4)

    '''train_dataset = dataset_util.AOLP_Dataset("DATASET/AOLP", "/train_list.txt", 10)
    td_map = dataset_util.AOLP_Dataset("DATASET/AOLP", "/train_list.txt", 4)
    vd_map = dataset_util.AOLP_Dataset("DATASET/AOLP", "/valid_list.txt", 4)'''

    if manager.latest_checkpoint:
        print("Restoring...", manager.latest_checkpoint)
        images, gt_boxes, gt_classes, gt_masks, img_sizes = train_dataset.next_batch()
        rpn_classes, rpn_bbox_deltas = anchor_utils.get_rpn_classes_and_bbox_deltas(len(images), anchors, gt_boxes)
        l1, l2, l3, l4, l5 = train_step(model, optimizer, [images, img_sizes], [gt_boxes, gt_classes, gt_masks, img_sizes, rpn_classes, rpn_bbox_deltas])
        checkpoint.restore(manager.latest_checkpoint).assert_consumed()

    mask_map, box_map = calculate_map(td_map, model)