コード例 #1
0
def __discard_boxes_by_iou(bnd_box, bounding_boxes, threshold):
    result = []
    for i in range(bounding_boxes.shape[0]):
        bnd_box2 = bounding_boxes[i]
        if intersection_over_union(bnd_box[:4], bnd_box2[:4]) < threshold:
            result.append(bnd_box2)
    return np.array(result)
コード例 #2
0
def visually_inspect_result(just_trained=False,
                            unseen=False,
                            index=0,
                            parent_dir="./training/"):
    print("Getting prediction for visual inspection")
    rgb = np.array(get_single_image(parent_dir, "rgb", index),
                   dtype=np.float32)  #to display

    y_pred = get_prediction(just_trained, index, parent_dir)

    if (unseen == False
        ):  #Show ground truth mask and IoU if we are using training data
        mask = get_single_image(parent_dir, "mask", index)
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            mask = skimage.color.rgb2gray(mask)
            mask = skimage.exposure.rescale_intensity(mask)
        cv2.imshow('ground truth', mask)

        y_pred2 = np.copy(y_pred)
        iou = util.intersection_over_union(y_pred2, mask)
        print("IoU = %f" % iou)

    rgb = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
    cv2.imshow('input image', rgb)
    cv2.imshow('RGB result', y_pred)

    print("Displaying images")
    cv2.waitKey(0)
    return
コード例 #3
0
def visually_inspect_result(just_trained=False, unseen=False, index=0, parent_dir=TRAINING_DATA_DIR):
    print("Getting prediction for visual inspection")

    rgb = np.array(get_single_image(parent_dir, "rgb", index), dtype=np.float32) #Image to display
    y_pred = get_prediction(just_trained, index, parent_dir)
    #rgb[:,:,2] = rgb[:,:,2]+np.squeeze(y_pred)
 
    #Show ground truth mask (if we are using the training set for our sample prediction)
    if (unseen == False):
        mask = get_single_image(parent_dir, "mask", index)
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            mask = skimage.color.rgb2gray(mask)
            mask = skimage.exposure.rescale_intensity(mask)
            mask = np.nan_to_num(mask)
        cv2.imshow('ground truth',mask)

        y_pred2 = np.copy(y_pred)
        iou = util.intersection_over_union(y_pred2, mask)
        print("IoU = %f" % iou)

    rgb = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
    cv2.imshow('input image',rgb)
    cv2.imshow('5layer result',y_pred)
    
    print("Displaying images")
    cv2.waitKey(0)
    return
コード例 #4
0
    def next_test_batch(self):
        img, annotations = self.next_test_sample()
        predictions_per_cell = Configuration.ANCHORS.shape[0] * (
            len(Configuration.CLASS_LABELS.keys()) + 5)
        #annotations = annotations[['label','xc','yc','w','h']]
        y_true = pd.DataFrame(
            np.zeros(shape=(Configuration.GRID_SIZE * Configuration.GRID_SIZE,
                            predictions_per_cell)))
        #Get the size of the cell
        cell_size = 1.0 / Configuration.GRID_SIZE
        for i in range(annotations.shape[0]):
            l = 0
            m = 0
            cx = 0.0
            cy = 0.0
            gnd_truth_box = annotations.iloc[i]
            while cx <= gnd_truth_box[5]:
                cx += cell_size
                l += 1
            cx -= cell_size
            l -= 1
            while cy <= gnd_truth_box[6]:
                cy += cell_size
                m += 1
            cy -= cell_size
            m -= 1

            xc = gnd_truth_box[5]
            yc = gnd_truth_box[6]
            #            xc = gnd_truth_box[5] - cx
            #            yc = gnd_truth_box[6] - cy
            w = gnd_truth_box[7]
            h = gnd_truth_box[8]
            labels = []
            for label in Configuration.CLASS_LABELS.keys():
                if label == gnd_truth_box[0]:
                    labels.append(1)
                else:
                    labels.append(0)
            data = []
            for j in range(Configuration.ANCHORS.shape[0]):
                anchor_box = Configuration.ANCHORS[j]
                anchor_box = np.append(gnd_truth_box[5:7], anchor_box, axis=0)
                anchor_box = cvt_coord_to_diagonal(anchor_box)
                c = intersection_over_union(anchor_box, gnd_truth_box[1:5])
                #                c = 1.0
                data = data + [xc, yc, w, h, c] + labels
            y_true.iloc[Configuration.GRID_SIZE * m + l] = pd.Series(data)

        #y true contains [xc,yc,w,h,c]+one_hot_labels
        return img, np.reshape(np.array(y_true),
                               newshape=(Configuration.GRID_SIZE,
                                         Configuration.GRID_SIZE,
                                         predictions_per_cell))
コード例 #5
0
    def test_iou(self):

        a = [3, 5, 10, 8]
        b = [4, 6, 8, 15]
        iou_true = 0.1633
        iou = intersection_over_union(a, b)
        self.assertAlmostEqual(iou_true, iou, places=4)
        iou = intersection_over_union(b, a)
        self.assertAlmostEqual(iou_true, iou, places=4)

        a = [3, 5, 10, 8]
        b = [11, 5, 15, 8]
        iou = intersection_over_union(a, b)
        iou_true = 0.0
        self.assertAlmostEqual(iou_true, iou, places=4)

        a = [0, 1, 13, 12]
        b = [3, 5, 10, 8]
        iou_true = 0.1469
        iou = intersection_over_union(a, b)
        self.assertAlmostEqual(iou_true, iou, places=4)

        a = [6, 1, 8, 12]
        b = [3, 5, 10, 8]
        iou_true = 0.1622
        iou = intersection_over_union(a, b)
        self.assertAlmostEqual(iou_true, iou, places=4)

        a = [3, 5, 10, 8]
        b = [3.5, 5.5, 10.5, 8.5]
        iou_true = 0.6311
        iou = intersection_over_union(a, b)
        self.assertAlmostEqual(iou_true, iou, places=4)
コード例 #6
0
    def validation_IOU(self, sess, pred, X, Y, keep_prob, is_training):
        img_loader2 = data_load.ImgLoader('pascal')
        img_loader2.run('val')  # train or val

        batch_num = 10
        input_batch, label_batch = img_loader2.nextbatch(self.input_size, 0)

        IOU_sum = 0

        for i in range(batch_num):
            input_batch, label_batch = img_loader2.nextbatch(
                self.input_size, i)
            pred_ = sess.run(pred,
                             feed_dict={
                                 X: input_batch,
                                 Y: label_batch,
                                 keep_prob: 1.0,
                                 is_training: False
                             })
            IOU = util.intersection_over_union(pred_, label_batch)
            IOU_sum += IOU

        return IOU_sum / batch_num
コード例 #7
0
def visually_inspect_result(just_trained,
                            unseen=False,
                            index=0,
                            parent_dir=TRAINING_DATA_DIR):
    print("Commencing visual inspection of result")
    rgb = np.array(get_single_image(parent_dir, "rgb", index),
                   dtype=np.float32)

    file_names = [
        os.path.join(parent_dir + "txt", f)
        for f in os.listdir(parent_dir + "txt") if f.endswith(".txt")
    ]
    file_names = sorted(file_names)
    dx, dy = get_dxy_from_txt(file_names[index])

    y_pred = get_prediction(just_trained, index, parent_dir)

    if (unseen == False):  #Get mask and IoU if using training data
        mask = get_single_image(parent_dir, "mask", index)
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            mask = skimage.color.rgb2gray(mask)
            mask = skimage.exposure.rescale_intensity(mask)
            cv2.imshow('ground truth', mask)

        y_pred2 = np.copy(y_pred)
        iou = util.intersection_over_union(y_pred2, mask)
        print("IoU = %f" % iou)

    rgb = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
    #cv2.imshow("rgb", rgb)
    cv2.imshow("input dx", dx)
    cv2.imshow('X/Y result', y_pred)

    print("Displaying images")
    cv2.waitKey(0)
    return
コード例 #8
0
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state('./model')
saver.restore(sess, ckpt.model_checkpoint_path)

# sample2see, loss_ = sess.run([pred, loss], feed_dict={X: input_batch, Y: label_batch_cal_loss, keep_prob: 1.0, is_training: False})
sample2see, loss_ = sess.run([pred, loss],
                             feed_dict={
                                 X: input_batch,
                                 Y: label_batch_cal_loss,
                                 keep_prob: 1.0,
                                 is_training: False
                             })

mask = masker.make_mask_from_label(sample2see)

IOU = util.intersection_over_union(sample2see, label_batch_cal_loss)

figure()
for i in range(batch_size):
    ax = subplot(batch_size, 3, 3 * i + 1)
    imshow(input_batch[i] / 255)
    ax.set_title('INPUT')
    setp(ax.get_xticklabels(), visible=False)
    setp(ax.get_yticklabels(), visible=False)

    ax = subplot(batch_size, 3, 3 * i + 2)
    imshow(mask[i])
    ax.set_title('PREDICTION')
    setp(ax.get_xticklabels(), visible=False)
    setp(ax.get_yticklabels(), visible=False)
コード例 #9
0
    def run(self, max_iter, already_done):
        ######already_done = 이미 에폭 돌아간 횟수########

        img_loader = data_load.ImgLoader()
        img_loader.run('train')  # train or val

        data_size = len(img_loader.class_path)
        batch_num = data_size // self.input_size

        # input_batch, label_batch = img_loader.nextbatch(self.input_size, iter)
        input_batch, label_batch = img_loader.nextbatch(self.input_size,
                                                        0,
                                                        stochastic=True)

        X = tf.placeholder(tf.float32, [
            None, input_batch.shape[1], input_batch.shape[2],
            input_batch.shape[3]
        ])  # h * w * 3
        Y = tf.placeholder(tf.float32, [
            None, label_batch.shape[1], label_batch.shape[2],
            label_batch.shape[3]
        ])  # h * w * (class+1) <= 배경포함(+1)
        keep_prob = tf.placeholder(tf.float32)
        is_training = tf.placeholder(tf.bool)

        train_op, pred, loss, logit = self.train(X, Y, keep_prob, is_training)

        sess = tf.Session()
        saver = tf.train.Saver(tf.global_variables())

        ckpt = tf.train.get_checkpoint_state('./model')
        if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            sess.run(tf.global_variables_initializer())

        for epoch in range(max_iter):
            for itr in range(batch_num):
                input_batch, label_batch = img_loader.nextbatch(
                    self.input_size, itr, stochastic=True)
                _, loss_, pred_ = sess.run(
                    [train_op, loss, pred],
                    feed_dict={
                        X: input_batch,
                        Y: label_batch,
                        keep_prob: 0.5,
                        is_training: True
                    })
                print('iteration :', itr, '  loss :', loss_)

            if (epoch + 1) % 2 == 0:
                IOU_this_batch = util.intersection_over_union(
                    pred_, label_batch)
                print('########################\n',
                      'intersection over union for this batch :',
                      IOU_this_batch, '\n########################')
                IOU_val = self.validation_IOU(sess, pred, X, Y, keep_prob,
                                              is_training)
                print('########################\n',
                      'intersection over union for validation set:', IOU_val,
                      '\n########################')

                model_dir = './model' + str(epoch + 1 +
                                            already_done) + '/model.ckpt'
                with open('loss.txt', 'a') as wf:
                    loss_info = '\nepoch: ' + '%7d' % (
                        epoch + 1 + already_done
                    ) + '  batch loss: ' + '%7.6f' % loss_ + '  batch IOU: ' + '%7.6f' % IOU_this_batch + '  valiation IOU: ' + '%7.6f' % IOU_val
                    wf.write(loss_info)
                saver.save(sess, model_dir)
            else:
                model_dir = './model/model.ckpt'
                saver.save(sess, model_dir)
コード例 #10
0
    def train(self, data):
        #Initialization
        iteration = 1
        avg_error = 10000000000

        #Initialize cluster vectors to random points from data
        idx = np.random.randint(low=0, high=data.shape[0], size=self.k)
        self.cluster_vectors = data[idx, :]

        #Cluster Labels for data
        cluster_labels = np.zeros(shape=(data.shape[0]), dtype=np.int32)

        while True:
            #divide the set of training vectors into K clusters using
            #minimum error criteria
            print('Iteration:', iteration)
            for i in range(data.shape[0]):
                bnd_box = data[i]
                bnd_box = np.concatenate(([0, 0], bnd_box))
                bnd_box = cvt_coord_to_diagonal(bnd_box)
                error_iou = 0
                #assign bnd_box a cluster based on iou
                for j in range(self.k):
                    training_vector = np.concatenate(
                        ([0, 0], self.cluster_vectors[j]))

                    training_vector = cvt_coord_to_diagonal(training_vector)

                    e_iou = 1.0 - intersection_over_union(
                        bnd_box, training_vector)
                    if j == 0:
                        error_iou = e_iou
                        cluster_labels[i] = j
                    elif e_iou < error_iou:
                        error_iou = e_iou
                        cluster_labels[i] = j

            #compute average distortion
            error = 0.0
            xc = [0 for _ in range(self.k)]
            yc = [0 for _ in range(self.k)]
            coordinate_counts = [0 for _ in range(self.k)]
            average_iou = 0
            for i in range(data.shape[0]):
                bnd_box = data[i]

                xc[cluster_labels[i]] += bnd_box[0]
                yc[cluster_labels[i]] += bnd_box[1]
                coordinate_counts[cluster_labels[i]] += 1

                bnd_box = np.concatenate(([0, 0], bnd_box))
                bnd_box = cvt_coord_to_diagonal(bnd_box)

                training_vector = self.cluster_vectors[cluster_labels[i]]
                training_vector = np.concatenate(([0, 0], training_vector))
                training_vector = cvt_coord_to_diagonal(training_vector)
                iou = intersection_over_union(bnd_box, training_vector)
                average_iou += iou
                error += (1.0 - iou)

            error = error / data.shape[0]
            average_iou = average_iou / data.shape[0]

            #Make new cluster vectors ie find new centroids
            new_clusters = []
            for i in range(self.k):
                if coordinate_counts[i] == 0:
                    new_clusters.append([
                        self.cluster_vectors[i][0], self.cluster_vectors[i][1]
                    ])
                else:
                    new_clusters.append([
                        xc[i] / coordinate_counts[i],
                        yc[i] / coordinate_counts[i]
                    ])
            self.cluster_vectors = np.array(new_clusters)

            if np.abs(avg_error - error
                      ) / error < self.e or iteration >= self.max_iteration:
                break
            else:
                avg_error = error
                iteration += 1

        print("K: ", self.k, ' AvgError: ',
              np.abs(avg_error - error) / error, 'Iterations: ', iteration,
              'AvgIou: ', average_iou)
        return self.cluster_vectors, cluster_labels, average_iou
コード例 #11
0
    def forward(self, predictions, target):
        predictions = predictions.reshape(-1, self.S, self.S, self.C + self.B * 5)

        iou_b1 = intersection_over_union(predictions[..., 2:6], target[..., 2:6])
        iou_b2 = intersection_over_union(predictions[..., 7:11], target[..., 2:6])
        ious = torch.cat([iou_b1.unsqueeze(0), iou_b2.unsqueeze(0)], dim=0)

        iou_maxes, bestbox = torch.max(ious, dim=0)
        exists_box = target[..., 1].unsqueeze(3)

        box_predictions = exists_box * (
            (
                bestbox * predictions[..., 7:11]
                + (1 - bestbox) * predictions[..., 2:6]
            )
        )

        box_targets = exists_box * target[..., 2:6]

        box_predictions[..., 2:4] = torch.sign(box_predictions[..., 2:4]) * torch.sqrt(
            torch.abs(box_predictions[..., 2:4] + 1e-6)
        )
        box_targets[..., 2:4] = torch.sqrt(box_targets[..., 2:4])

        box_loss = self.mse(
            torch.flatten(box_predictions, end_dim=-2),
            torch.flatten(box_targets, end_dim=-2),
        )

        pred_box = (
            bestbox * predictions[..., 1:2] + (1 - bestbox) * predictions[..., 6:7]
        )

        object_loss = self.mse(
            torch.flatten(exists_box * pred_box),
            torch.flatten(exists_box * target[..., 1:2]),
        )

        no_object_loss = self.mse(
            torch.flatten((1 - exists_box) * predictions[..., 1:2], start_dim=1),
            torch.flatten((1 - exists_box) * target[..., 1:2], start_dim=1),
        )

        no_object_loss += self.mse(
            torch.flatten((1 - exists_box) * predictions[..., 5:6], start_dim=1),
            torch.flatten((1 - exists_box) * target[..., 1:2], start_dim=1)
        )

        class_loss = self.mse(
            torch.flatten(exists_box * predictions[..., :1], end_dim=-2,),
            torch.flatten(exists_box * target[..., :1], end_dim=-2,),
        )

        loss = (
            self.lambda_coord * box_loss
            + object_loss
            + self.lambda_noobj * no_object_loss
            + class_loss
        )

        return loss