def main():
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
    config = tf.ConfigProto(gpu_options=gpu_options)
    with tf.Session(config=config) as sess:
        model = SiameseNet()

        model.load_weights(
            '/home/tracy/PycharmProjects/SiameseNet/checkpoint/with_reference/my_model'
        )

        img = cv2.imread('/home/tracy/YOLOv3_TensorFlow/temp/2_3.jpg')

        img1, img2 = data_loader.dataloader(img)
        pred, scores, _ = model.prediction(img1, img2)

        pred, scores = sess.run([pred, scores])

        print(pred, scores)
        pred_labels = np.nonzero(pred)
        scores_ = scores[pred_labels]

        label_index = np.argmax(scores_)

        print('\033[1;32m   Label\t\t\tScore\033[0m')
        for i in pred_labels[0]:
            print('\t', i, '\t\t\t', scores[i], '\n')
Exemple #2
0
class SiamesModel():
    def __init__(self, config):
        self.graph = tf.Graph()
        self.sess = tf.Session(config=config, graph=self.graph)
        with self.sess.as_default():
            with self.graph.as_default():
                self.siamese_model = SiameseNet()
                self.siamese_model.load_weights(
                    '/home/tracy/PycharmProjects/SiameseNet/checkpoint/with_reference/best/my_model'
                )

    def run(self, img1, img2):
        with self.graph.as_default():
            return self.siamese_model.prediction(img1, img2)
Exemple #3
0
        # Crop the detected traffic signs
        if x1 - x0 > 10 and y1 - y0 > 10 and labels_[j] == 0:
            img_ori_ = cv2.cvtColor(img_ori,
                                    cv2.COLOR_BGR2RGB).astype(np.float32)
            img_cropped = img_ori_[int(y0):int(y1), int(x0):int(x1)]

            tf.reset_default_graph()
            new_graph = tf.Graph()
            with new_graph.as_default():
                with tf.Session(graph=new_graph) as new_sess:
                    siamese_model = SiameseNet()
                    siamese_model.load_weights(
                        '/home/tracy/PycharmProjects/SiameseNet/checkpoint/with_reference/my_model'
                    )
                    img1, img2 = dataloader(img_cropped)
                    label_pred, label_score, _ = siamese_model.prediction(
                        img1, img2)
                    label_pred_, label_score_ = new_sess.run(
                        [label_pred, label_score])

            # with sess_yolo.as_default():
            #     with sess_yolo.graph.as_default():
            #         img1, img2 = dataloader(img_cropped)
            #         # input1_ = sess_yolo.run(img1)
            #         # input2_ = sess_yolo.run(img2)
            #         label_pred_, label_score_ = yolo_model.siamese_forward(img1, img2)
            #         print(label_pred_, label_score_)

            # with sess_siam.as_default():
            #     with sess_siam.graph.as_default():
            #         img1, img2 = dataloader(img_cropped)
            #         label_pred, label_score = siamese_model.prediction(img1, img2)
def test_display_one_img(img_path):
    img_ori = cv2.imread(img_path)
    if args.letterbox_resize:
        img, resize_ratio, dw, dh = letterbox_resize(img_ori, args.new_size[0],
                                                     args.new_size[1])
    else:
        height_ori, width_ori = img_ori.shape[:2]
        img = cv2.resize(img_ori, tuple(args.new_size))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = np.asarray(img, np.float32)
    img = img[np.newaxis, :] / 255.
    with sess_yolo.as_default():
        with graph_yolo.as_default():
            boxes_, scores_, labels_ = sess_yolo.run(
                [boxes, scores, labels], feed_dict={input_data: img})

    # rescale the coordinates to the original image
    if args.letterbox_resize:
        boxes_[:, [0, 2]] = (boxes_[:, [0, 2]] - dw) / resize_ratio
        boxes_[:, [1, 3]] = (boxes_[:, [1, 3]] - dh) / resize_ratio
    else:
        boxes_[:, [0, 2]] *= (width_ori / float(args.new_size[0]))
        boxes_[:, [1, 3]] *= (height_ori / float(args.new_size[1]))

    for j in range(len(boxes_)):
        x0, y0, x1, y1 = boxes_[j]
        x0 = np.maximum(x0, 0)
        y0 = np.maximum(y0, 0)
        x1 = np.maximum(x1, 0)
        y1 = np.maximum(y1, 0)

        label_index = labels_[j]
        # Crop the detected traffic signs

        if x1 - x0 > 10 and y1 - y0 > 10 and labels_[j] == 0:
            img_ori_ = cv2.cvtColor(img_ori,
                                    cv2.COLOR_BGR2RGB).astype(np.float32)
            img_cropped = img_ori_[int(y0):int(y1), int(x0):int(x1)]

            if img_cropped.any():
                tf.reset_default_graph()
                new_graph = tf.Graph()
                with new_graph.as_default():
                    with tf.Session(graph=new_graph) as new_sess:
                        siamese_model = SiameseNet()
                        siamese_model.load_weights(
                            '/home/tracy/PycharmProjects/SiameseNet/checkpoint/RGBscaled/best/my_model'
                        )
                        img1, img2 = dataloader(img_cropped)
                        label_pred, label_score, _ = siamese_model.prediction(
                            img1, img2)
                        label_pred_, label_score_ = new_sess.run(
                            [label_pred, label_score])

                # with sess_siam.as_default():
                #     with sess_siam.graph.as_default():
                #         img1, img2 = dataloader(img_cropped)
                #         label_pred, label_score, _ = siamese_model.prediction(img1, img2)
                #         label_pred_, label_score_ = sess_siam.run([label_pred, label_score])

                # cv2.imwrite('/home/tracy/YOLOv3_TensorFlow/temp/' + str(i) + '_' + str(j) + '.jpg', img_cropped)

    #     print("Writting %s"%img)
    #     test_one_img('/home/tracy/data/TrafficSign_test/Images1/' + img)
    #     print('Done writing %s'%img)
    # Choose the one label with highest score
                pred_labels = np.nonzero(label_pred_)
                pred_scores = label_score_[pred_labels]
                # print("pred_scores: ", pred_scores)
                if len(pred_scores) > 0:
                    label_index = np.argmax(pred_scores)
                    label_index = pred_labels[0][label_index] + 2
                # labels_[j] = label_index

        plot_one_box(img_ori, [x0, y0, x1, y1],
                     label_index=label_index,
                     label=args.classes_all[label_index] +
                     ', {:.2f}%'.format(scores_[j] * 100),
                     color=color_table[labels_[j]])

    cv2.namedWindow('Detection result', 0)
    cv2.resizeWindow('Detection result', 2400, 1800)
    cv2.imshow('Detection result', img_ori)
    cv2.imwrite('detection_result.jpg', img_ori)
    cv2.waitKey(0)
Exemple #5
0
            '/home/tracy/PycharmProjects/SiameseNet/checkpoint/checkpoint_alerted/model_alterd'
        )

        label = sess_yolo.run(label_pred_)

        # print(label_pred_plh)
        print(label)
sess_yolo.close()

with sess_siam.as_default():
    with sess_siam.graph.as_default():
        img1, img2 = dataloader(img)

        img1_, img2_ = img1.eval(), img2.eval()

        label_pred_siam, label_score_siam, distance_siam = siamese_model.prediction(
            img1_, img2_)
        label_pred_siam_, label_score_siam_, distance_siam_ = sess_siam.run(
            [label_pred_siam, label_score_siam, distance_siam])

        # intermediate_layer_model = Model(inputs=siamese_model.input,
        #                                  outputs=siamese_model.get_layer(
        #                                      'encoder/layer_with_weights-1/beta/.ATTRIBUTES/VARIABLE_VALUE').output)
        # intermediate_output = intermediate_layer_model.predict(img1. img2)
        print(
            "\033[1;32m**************** result from siamese net ****************\033[0m"
        )
        # print('layer1: ', intermediate_output)
        print(label_pred_siam_)

        # print('---------------------------------------------------')
        # sess_siam.run(tf.Print(img1, [img1, img1.shape], message='Debug message: '))