Exemple #1
0
 def __init__(self, detection_threshold=5):
     self.classifier = baxter.BaxterClassifier()
     self.top_results = 3
     # IOU threshold to combine two overlapping bounding box
     self.overlap_threshold = 0.75
     # logit threshold to be considered as detection
     self.threshold = detection_threshold
     # Coefficient applied when two boxes are merged
     self.overlap_bonus = 1.1
     [self.image_mean, self.image_std] = inputProcessor.getNormalizationData(
         self.classifier.training_file)
     self.classifier.saver = tf.train.Saver()
     self.classifier.saver.restore(
         self.classifier.sess, self.classifier.weights_file)
def main(argvs):

    baxterClassifier = baxter.BaxterClassifier()
    [meanImage, std] = inputProcessor.getNormalizationData(
        baxterClassifier.training_file)

    # Start Tensorflow Session
    with baxterClassifier.sess as sess:

        baxterClassifier.saver = tf.train.Saver()
        print("weight file to restore ... : ", baxterClassifier.weights_file)

        baxterClassifier.saver.restore(
            baxterClassifier.sess, baxterClassifier.weights_file)

        cv2.waitKey(1000)
        print("starting session... ")

        batch = inputProcessor.get_custom_dataset_batch(
            50, "data/spoon_test_data.csv", meanImage, std)
        image_batch = batch[0]
        label_batch = batch[1]
        batch_size = len(label_batch)

        prediction = tf.argmax(baxterClassifier.logits, 1)
        trueLabel = np.argmax(label_batch, 1)

        result = sess.run(prediction, feed_dict={
            baxterClassifier.x: image_batch,
            baxterClassifier.batch_size: batch_size,
            baxterClassifier.dropout_rate: 1})

        print("=============")
        print(result)
        print(trueLabel)
        print("=============\n\n")

        train_accuracy = baxterClassifier.accuracy.eval(feed_dict={baxterClassifier.x: image_batch,
                                                                   baxterClassifier.y: label_batch,
                                                                   baxterClassifier.batch_size: batch_size,
                                                                   baxterClassifier.dropout_rate: 1})
        print("\nTest Accuracy %.2f \n\n" % (train_accuracy))
    def callback(self, ros_data):
        '''Callback function of subscribed topic. 
        Here images get converted and features detected'''
        if VERBOSE :
            print 'received image of type: "%s"' % ros_data.format
    
        #### direct conversion to IMAGE NP ARRAY  ####
        np_arr = np.fromstring(ros_data.data, np.uint8)
        image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
        

        top_results = 2  # number of crops to show for detection
        predicting_class = 1
        predictions = []

        # TODO : tensorflow image detection starts here
        [meanImage, std] = inputProcessor.getNormalizationData("src/beginner_tutorials/scripts/data/custom_train_data.csv")
        baxterClassifier = baxter.BaxterClassifier()
        

        # Start Tensorflow Session
        with baxterClassifier.sess as sess:

            baxterClassifier.saver = tf.train.Saver()
            print("weight file to restore ... : ", baxterClassifier.weights_file)

            baxterClassifier.saver.restore(
                baxterClassifier.sess, "src/beginner_tutorials/scripts/"+baxterClassifier.weights_file)

            cv2.waitKey(1000)
            print("starting session... ")

            # GET IMAGE FROM USER INPUT
            batch = inputProcessor.regionProposal(image_np)

            if batch is None:
                print("something went wrong when getting images crops ... ")
                return 

            original_img = batch[0]
            image_batch = batch[1]
            boundingBoxInfo = batch[2]
            batch_size = len(image_batch)

            # CREATE INPUT IMAGE BATCH
            input_image = np.zeros(
                [len(image_batch), baxterClassifier.img_size, baxterClassifier.img_size, 3])

            for x in range(batch_size):
                input_image[x] = (image_batch[x] - meanImage) / std

            # RUN CASCADING DETECTOR
            print("batch size : ", batch_size)
            print("input tensor size : ", input_image.shape)

            prediction = sess.run(baxterClassifier.logits, feed_dict={
                baxterClassifier.x: input_image,
                baxterClassifier.batch_size: batch_size,
                baxterClassifier.dropout_rate: 1})

            # filter correctly detected crops
            for y in range(batch_size):
                prob = prediction[y][predicting_class]
                boundingBox = boundingBoxInfo[y]
                predictions.append([prob, boundingBox])

            # sort crops by logit values
            predictions.sort(reverse=True)

            for i in range(top_results):
                boundingBoxData = predictions[i]
                print(boundingBoxData)

                x = boundingBoxData[1][0]
                y = boundingBoxData[1][1]
                winW = boundingBoxData[1][2]
                winH = boundingBoxData[1][3]

                # if boundingBoxData[0] > threshold:
                cv2.rectangle(original_img, (x, y),
                              (x + winW, y + winH), (0, 255, 0), 2)

            cv2.imshow("Window", original_img)
            cv2.waitKey(1000)
            time.sleep(1)
def main(argvs):
    [meanImage,
     std] = inputProcessor.getNormalizationData("data/custom_train_data.csv")
    baxterClassifier = baxter.BaxterClassifier(argvs)
    top_results = 2  # number of crops to show for detection

    # Start Tensorflow Session
    with baxterClassifier.sess as sess:

        baxterClassifier.saver = tf.train.Saver()
        print("weight file to restore ... : ", baxterClassifier.weights_file)

        baxterClassifier.saver.restore(baxterClassifier.sess,
                                       baxterClassifier.weights_file)

        cv2.waitKey(1000)
        print("starting session... ")

        while True:
            # GET USER INPUT
            predictions = []
            img_filename = raw_input('image location: ')
            predictingClass = int(raw_input('class value: '))

            # GET IMAGE FROM USER INPUT
            batch = inputProcessor.get_sliding_window_img_crops(img_filename)
            if batch is None:
                print("wrong user input regarding image or labels ")
            original_img = batch[0]
            image_batch = batch[1]
            boundingBoxInfo = batch[2]
            batch_size = len(image_batch)

            # CREATE INPUT IMAGE BATCH
            input_image = np.zeros([
                len(image_batch), baxterClassifier.img_size,
                baxterClassifier.img_size, 3
            ])

            for x in range(batch_size):
                input_image[x] = (image_batch[x] - meanImage) / std

            # RUN CASCADING DETECTOR
            print("batch size : ", batch_size)
            print("input tensor size : ", input_image.shape)

            prediction = sess.run(baxterClassifier.logits,
                                  feed_dict={
                                      baxterClassifier.x: input_image,
                                      baxterClassifier.batch_size: batch_size,
                                      baxterClassifier.dropout_rate: 1
                                  })

            # filter correctly detected crops
            for y in range(batch_size):
                prob = prediction[y][predictingClass]
                boundingBox = boundingBoxInfo[y]
                predictions.append([prob, boundingBox])

            # sort crops by logit values
            predictions.sort(reverse=True)

            for i in range(top_results):
                boundingBoxData = predictions[i]
                print(boundingBoxData)

                x = boundingBoxData[1][0]
                y = boundingBoxData[1][1]
                winW = boundingBoxData[1][2]
                winH = boundingBoxData[1][3]

                # if boundingBoxData[0] > threshold:
                cv2.rectangle(original_img, (x, y), (x + winW, y + winH),
                              (0, 255, 0), 2)

            cv2.imshow("Window", original_img)
            cv2.waitKey(1000)
            time.sleep(1)