Exemplo n.º 1
0
def valid(model, dataset, anchors):
    print("*" * 10, "STARTING VALIDATION", "*" * 10)

    valid_losses = np.zeros(2)

    for _ in range(dataset.total_batches):
        images, gt_boxes, gt_classes, gt_masks, img_sizes = dataset.next_batch(
        )
        gt_rpn_classes, gt_rpn_bbox_deltas = anchor_utils.get_rpn_classes_and_bbox_deltas(
            len(images), anchors, gt_boxes)

        data = [images, img_sizes]
        rpn_fg_bg_softmaxes, rpn_bbox_deltas = model(data, training=False)

        rpn_object_loss = losses.rpn_object_loss(gt_rpn_classes,
                                                 rpn_fg_bg_softmaxes)
        rpn_bbox_loss = losses.rpn_bbox_loss(gt_rpn_classes,
                                             gt_rpn_bbox_deltas,
                                             rpn_bbox_deltas)

        valid_losses += [rpn_object_loss, rpn_bbox_loss]

    valid_losses /= dataset.total_batches
    print("*" * 15, "VALID", "*" * 15)
    print("rpn_cls_loss={}, rpn_bbox_loss={}".format(*valid_losses))
    print("*" * 40)
    return valid_losses
Exemplo n.º 2
0
def train(num_epochs, optimizer, anchors, train_dataset, valid_dataset):
    print(optimizer.learning_rate)
    # tf.executing_eagerly()

    max_loss = float("inf")
    bigger_loss_in_row = 0
    for epoch in range(1, num_epochs + 1):
        print("Epoch", epoch)
        epoch_losses = np.zeros(2)
        for i in range(1, train_dataset.total_batches + 1):
            images, gt_boxes, gt_classes, gt_masks, img_sizes = train_dataset.next_batch(
            )
            gt_rpn_classes, gt_rpn_bbox_deltas = anchor_utils.get_rpn_classes_and_bbox_deltas(
                len(images), anchors, gt_boxes)
            l1, l2 = train_step(model, optimizer, [images, img_sizes], [
                gt_boxes, gt_classes, img_sizes, gt_rpn_classes,
                gt_rpn_bbox_deltas
            ])

            l1, l2 = tf.keras.backend.eval(l1), tf.keras.backend.eval(l2)
            epoch_losses += [l1, l2]

            if i % 10 == 0:
                print("Iter {}: rpn_cls_loss={}, rpn_bbox_loss={}".format(
                    i, l1, l2))

        epoch_losses /= train_dataset.total_batches
        print("*" * 50)
        print("Epoch {}: rpn_cls_loss={}, rpn_bbox_loss={}".format(
            epoch, *epoch_losses))
        print("*" * 50)
        with open(config.TRAIN_LOSSES_FILE, "a+") as f1:
            f1.write("{} {}\n".format(*epoch_losses))

        if epoch % 10 == 0:
            checkpoint.step.assign_add(1)
            manager.save()

            valid_losses = valid(model, valid_dataset, anchors)
            valid_loss = np.sum(valid_losses)

            with open(config.VALID_LOSSES_FILE, "a+") as f2:
                f2.write("{} {}\n".format(*valid_losses))

            if valid_loss < max_loss:
                max_loss = valid_loss
                bigger_loss_in_row = 0
            else:
                bigger_loss_in_row += 1

                if bigger_loss_in_row == 1000:
                    print("{}. bigger loss in row, exiting".format(
                        bigger_loss_in_row))
                    sys.exit(0)
Exemplo n.º 3
0
def train(num_epochs, optimizer, anchors, train_dataset, td_map, vd_map):
    max_map = float("inf")
    bigger_map_in_row = 0
    for epoch in range(1, num_epochs + 1):
        print("Epoch", epoch)
        epoch_losses = np.zeros(5)
        for i in range(1, train_dataset.total_batches + 1):
            images, gt_boxes, gt_classes, gt_masks, img_sizes = train_dataset.next_batch()
            gt_rpn_classes, gt_rpn_bbox_deltas = anchor_utils.get_rpn_classes_and_bbox_deltas(len(images), anchors, gt_boxes)
            l1, l2, l3, l4, l5 = train_step(model, optimizer, [images, img_sizes],
                                           [gt_boxes, gt_classes, gt_masks, img_sizes, gt_rpn_classes, gt_rpn_bbox_deltas])

            l1, l2, l3, l4, l5 = tf.keras.backend.eval(l1), tf.keras.backend.eval(l2), tf.keras.backend.eval(l3), \
                                 tf.keras.backend.eval(l4), tf.keras.backend.eval(l5)
            epoch_losses += [l1, l2, l3, l4, l5]

            if i % 10 == 0:
                print("Iter {}: rpn_cls_loss={}, rpn_bbox_loss={}, mask_cls_loss={}, mask_bbox_loss={}, mask_mask_loss={}".format(i, l1, l2, l3, l4, l5))

        epoch_losses /= train_dataset.total_batches
        print("*" * 50)
        print("Epoch {}: rpn_cls_loss={}, rpn_bbox_loss={}, mask_cls_loss={}, mask_bbox_loss={}, mask_mask_loss={}".format(epoch, *epoch_losses))
        print("*" * 50)
        with open(config.TRAIN_LOSSES_FILE, "a+") as f1:
            f1.write("{} {} {} {} {}\n".format(*epoch_losses))

        if epoch % 10 == 0:
            checkpoint.step.assign_add(1)
            manager.save()

            train_mask_map, train_box_map = calculate_map(td_map, model)
            print("Train mAP: mask", train_mask_map, "bbox", train_box_map)

            valid_mask_map, valid_box_map = calculate_map(vd_map, model)
            print("Valid mAP: mask", valid_mask_map, "bbox", valid_box_map)

            if valid_mask_map < max_map:
                max_map = valid_mask_map
                bigger_map_in_row = 0
            else:
                bigger_map_in_row += 1

                if bigger_map_in_row == 10:
                    print("{}. bigger map in row, exiting".format(bigger_map_in_row))
                    sys.exit(0)
Exemplo n.º 4
0
                                                indices[:, 0],
                                                [7, 7], method="bilinear")
            print(new_rois.shape)
            start = 0
            for j in range(proposals.shape[0]):
                count = tf.where(tf.equal(indices[:, 0], j)).shape[0]
                rois_by_images[j].extend(new_rois[start : start + count])
                start = count

        print("kraj")
        rois = tf.ragged.constant(rois_by_images)
        print(rois.shape)
        return


if __name__ == "__main__":
    #np.random.seed(100)
    tf.random.set_seed(110)
    anchors = anchor_utils.get_all_anchors((512, 512), [64, 128, 256, 512, 1024], [(1, 1), (1, 2), (2, 1)])
    rpn_model = rpn.RPN(Resnet34_FPN(), 3)
    #weights_path = "weights.ckpt"
    #rpn_model.load_weights(weights_path)
    num_classes = len(config.CLASSES)
    model = Mask_RCNN(rpn_model, anchors, num_classes)

    #ds = dataset_util.VOC2012_Dataset("DATASET/VOC2012/VOC2012", "/train_list.txt", 2)
    ds = dataset_util.VOC2012_Dataset("dataset/VOC2012", "/train_list.txt", 2)
    data1, data2, data3, data5, d4 = ds.next_batch()
    data2, data3 = anchor_utils.get_rpn_classes_and_bbox_deltas(len(data1), anchors, data2)

    a, b, c, d, e = model([data1, d4], training=True)
Exemplo n.º 5
0
                                     net=model,
                                     step=tf.Variable(1))
    manager = tf.train.CheckpointManager(checkpoint,
                                         config.WEIGHTS_DIR,
                                         max_to_keep=4)

    train_dataset = dataset_util.VOC2012_Dataset("DATASET/VOC2012/VOC2012",
                                                 "/train_list.txt", 20)
    # train_dataset = dataset_util.VOC2012_Dataset("dataset/VOC2012", "/train_list.txt", 2)
    valid_dataset = dataset_util.VOC2012_Dataset("DATASET/VOC2012/VOC2012",
                                                 "/valid_list.txt", 20)
    # valid_dataset = dataset_util.VOC2012_Dataset("dataset/VOC2012", "/valid_list.txt", 2)

    if manager.latest_checkpoint:
        print("Restoring...", manager.latest_checkpoint)
        images, gt_boxes, gt_classes, gt_masks, img_sizes = train_dataset.next_batch(
        )
        rpn_classes, rpn_bbox_deltas = anchor_utils.get_rpn_classes_and_bbox_deltas(
            len(images), anchors, gt_boxes)
        l1, l2 = train_step(
            model, optimizer, [images, img_sizes],
            [gt_boxes, gt_classes, img_sizes, rpn_classes, rpn_bbox_deltas])
        checkpoint.restore(manager.latest_checkpoint).assert_consumed()

    train(50, optimizer, anchors, train_dataset, valid_dataset)

    optimizer.learning_rate = optimizer.learning_rate / 10
    train(50, optimizer, anchors, train_dataset, valid_dataset)

    optimizer.learning_rate = optimizer.learning_rate / 10
    train(50, optimizer, anchors, train_dataset, valid_dataset)