コード例 #1
0
 def __init__(self, detection_threshold=5):
     self.classifier = baxter.BaxterClassifier()
     self.top_results = 3
     # IOU threshold to combine two overlapping bounding box
     self.overlap_threshold = 0.75
     # logit threshold to be considered as detection
     self.threshold = detection_threshold
     # Coefficient applied when two boxes are merged
     self.overlap_bonus = 1.1
     [self.image_mean, self.image_std] = inputProcessor.getNormalizationData(
         self.classifier.training_file)
     self.classifier.saver = tf.train.Saver()
     self.classifier.saver.restore(
         self.classifier.sess, self.classifier.weights_file)
コード例 #2
0
def main(argvs):

    baxterClassifier = baxter.BaxterClassifier()
    [meanImage, std] = inputProcessor.getNormalizationData(
        baxterClassifier.training_file)

    # Start Tensorflow Session
    with baxterClassifier.sess as sess:

        baxterClassifier.saver = tf.train.Saver()
        print("weight file to restore ... : ", baxterClassifier.weights_file)

        baxterClassifier.saver.restore(
            baxterClassifier.sess, baxterClassifier.weights_file)

        cv2.waitKey(1000)
        print("starting session... ")

        batch = inputProcessor.get_custom_dataset_batch(
            50, "data/spoon_test_data.csv", meanImage, std)
        image_batch = batch[0]
        label_batch = batch[1]
        batch_size = len(label_batch)

        prediction = tf.argmax(baxterClassifier.logits, 1)
        trueLabel = np.argmax(label_batch, 1)

        result = sess.run(prediction, feed_dict={
            baxterClassifier.x: image_batch,
            baxterClassifier.batch_size: batch_size,
            baxterClassifier.dropout_rate: 1})

        print("=============")
        print(result)
        print(trueLabel)
        print("=============\n\n")

        train_accuracy = baxterClassifier.accuracy.eval(feed_dict={baxterClassifier.x: image_batch,
                                                                   baxterClassifier.y: label_batch,
                                                                   baxterClassifier.batch_size: batch_size,
                                                                   baxterClassifier.dropout_rate: 1})
        print("\nTest Accuracy %.2f \n\n" % (train_accuracy))
コード例 #3
0
def main():
    baxterClassifier = BaxterClassifier()
    [meanImage, std] = inputProcessor.getNormalizationData(
        baxterClassifier.training_file)

    # Start Tensorflow Session
    with baxterClassifier.sess as sess:
        baxterClassifier.saver = tf.train.Saver()
        cv2.waitKey(1000)
        print("starting session... ")

        # INITIALIZE VARIABLES
        # merged = tf.summary.merge(["accuracy", "cross_entropy"])
        # train_writer = tf.summary.FileWriter("model/summary/")

        sess.run(tf.initialize_all_variables())
        # START TRAINING
        batch_index = 0
        i = 0
        while batch_index < 30000:

            print("starting  " + str(i) + "th  with batch index :  " +
                  str(batch_index) + "  training iteration..")
            i += 1

            ###################################################
            # GET BATCH (FOR CIFAR DATA SET)
            # batch_size = 50
            # batch = inputProcessor.get_next_cifar(batch_size, batch_index)
            # image_batch = batch[0]
            # label_batch = batch[1]
            # batch_index = batch_index + batch[2]
            # batch_size = len(label_batch)

            ###################################################
            # GET BATCH (FOR IMAGENET DATASET)
            # batch = inputProcessor.get_imagenet_batch(
            #     "data/train_data.csv", 10)
            # image_batch = batch[0]
            # label_batch = batch[1]
            # batch_index = batch_index + 100
            # batch_size = len(label_batch)

            ###################################################
            # GET BATCH FOR CUSTOM DATASET AND (FOR CALTECH DATASET)
            batch = inputProcessor.get_custom_dataset_batch(
                32, baxterClassifier.training_file, meanImage, std)
            image_batch = batch[0]
            label_batch = batch[1]
            batch_index = batch_index + 64
            batch_size = len(label_batch)

            ###################################################

            # PERIODIC PRINT-OUT FOR CHECKING
            if i % 20 == 0:
                prediction = tf.argmax(baxterClassifier.logits, 1)
                trueLabel = np.argmax(label_batch, 1)

                result = sess.run(prediction, feed_dict={
                    baxterClassifier.x: image_batch,
                    baxterClassifier.batch_size: batch_size,
                    baxterClassifier.dropout_rate: 1})

                print("=============")
                print(result)
                print(trueLabel)
                print("=============\n\n")

                # summary, train_accuracy = sess.run(
                #     [merged, baxterClassifier.accuracy], feed_dict={baxterClassifier.x: image_batch,
                #                                                     baxterClassifier.y: label_batch,
                #                                                     baxterClassifier.batch_size: batch_size,
                #                                                     baxterClassifier.dropout_rate: 1})
                # train_writer.add_summary(summary, i)

                train_accuracy = baxterClassifier.accuracy.eval(feed_dict={baxterClassifier.x: image_batch,
                                                                           baxterClassifier.y: label_batch,
                                                                           baxterClassifier.batch_size: batch_size,
                                                                           baxterClassifier.dropout_rate: 1})

                print("\nStep %d, Training Accuracy %.2f \n\n" % (i,
                                                                  train_accuracy))

            # ACTUAL TRAINING PROCESS
            baxterClassifier.train_op.run(feed_dict={baxterClassifier.x: image_batch,
                                                     baxterClassifier.y: label_batch,
                                                     baxterClassifier.batch_size: batch_size,
                                                     baxterClassifier.dropout_rate: 0.5})

        # DONE.. SAVE MODEL
        save_path = baxterClassifier.saver.save(
            sess, baxterClassifier.weights_file)
        print("saving model to ", save_path)
コード例 #4
0
    def callback(self, ros_data):
        '''Callback function of subscribed topic. 
        Here images get converted and features detected'''
        if VERBOSE :
            print 'received image of type: "%s"' % ros_data.format
    
        #### direct conversion to IMAGE NP ARRAY  ####
        np_arr = np.fromstring(ros_data.data, np.uint8)
        image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
        

        top_results = 2  # number of crops to show for detection
        predicting_class = 1
        predictions = []

        # TODO : tensorflow image detection starts here
        [meanImage, std] = inputProcessor.getNormalizationData("src/beginner_tutorials/scripts/data/custom_train_data.csv")
        baxterClassifier = baxter.BaxterClassifier()
        

        # Start Tensorflow Session
        with baxterClassifier.sess as sess:

            baxterClassifier.saver = tf.train.Saver()
            print("weight file to restore ... : ", baxterClassifier.weights_file)

            baxterClassifier.saver.restore(
                baxterClassifier.sess, "src/beginner_tutorials/scripts/"+baxterClassifier.weights_file)

            cv2.waitKey(1000)
            print("starting session... ")

            # GET IMAGE FROM USER INPUT
            batch = inputProcessor.regionProposal(image_np)

            if batch is None:
                print("something went wrong when getting images crops ... ")
                return 

            original_img = batch[0]
            image_batch = batch[1]
            boundingBoxInfo = batch[2]
            batch_size = len(image_batch)

            # CREATE INPUT IMAGE BATCH
            input_image = np.zeros(
                [len(image_batch), baxterClassifier.img_size, baxterClassifier.img_size, 3])

            for x in range(batch_size):
                input_image[x] = (image_batch[x] - meanImage) / std

            # RUN CASCADING DETECTOR
            print("batch size : ", batch_size)
            print("input tensor size : ", input_image.shape)

            prediction = sess.run(baxterClassifier.logits, feed_dict={
                baxterClassifier.x: input_image,
                baxterClassifier.batch_size: batch_size,
                baxterClassifier.dropout_rate: 1})

            # filter correctly detected crops
            for y in range(batch_size):
                prob = prediction[y][predicting_class]
                boundingBox = boundingBoxInfo[y]
                predictions.append([prob, boundingBox])

            # sort crops by logit values
            predictions.sort(reverse=True)

            for i in range(top_results):
                boundingBoxData = predictions[i]
                print(boundingBoxData)

                x = boundingBoxData[1][0]
                y = boundingBoxData[1][1]
                winW = boundingBoxData[1][2]
                winH = boundingBoxData[1][3]

                # if boundingBoxData[0] > threshold:
                cv2.rectangle(original_img, (x, y),
                              (x + winW, y + winH), (0, 255, 0), 2)

            cv2.imshow("Window", original_img)
            cv2.waitKey(1000)
            time.sleep(1)
コード例 #5
0
def main(argvs):
    [meanImage,
     std] = inputProcessor.getNormalizationData("data/custom_train_data.csv")
    baxterClassifier = baxter.BaxterClassifier(argvs)
    top_results = 2  # number of crops to show for detection

    # Start Tensorflow Session
    with baxterClassifier.sess as sess:

        baxterClassifier.saver = tf.train.Saver()
        print("weight file to restore ... : ", baxterClassifier.weights_file)

        baxterClassifier.saver.restore(baxterClassifier.sess,
                                       baxterClassifier.weights_file)

        cv2.waitKey(1000)
        print("starting session... ")

        while True:
            # GET USER INPUT
            predictions = []
            img_filename = raw_input('image location: ')
            predictingClass = int(raw_input('class value: '))

            # GET IMAGE FROM USER INPUT
            batch = inputProcessor.get_sliding_window_img_crops(img_filename)
            if batch is None:
                print("wrong user input regarding image or labels ")
            original_img = batch[0]
            image_batch = batch[1]
            boundingBoxInfo = batch[2]
            batch_size = len(image_batch)

            # CREATE INPUT IMAGE BATCH
            input_image = np.zeros([
                len(image_batch), baxterClassifier.img_size,
                baxterClassifier.img_size, 3
            ])

            for x in range(batch_size):
                input_image[x] = (image_batch[x] - meanImage) / std

            # RUN CASCADING DETECTOR
            print("batch size : ", batch_size)
            print("input tensor size : ", input_image.shape)

            prediction = sess.run(baxterClassifier.logits,
                                  feed_dict={
                                      baxterClassifier.x: input_image,
                                      baxterClassifier.batch_size: batch_size,
                                      baxterClassifier.dropout_rate: 1
                                  })

            # filter correctly detected crops
            for y in range(batch_size):
                prob = prediction[y][predictingClass]
                boundingBox = boundingBoxInfo[y]
                predictions.append([prob, boundingBox])

            # sort crops by logit values
            predictions.sort(reverse=True)

            for i in range(top_results):
                boundingBoxData = predictions[i]
                print(boundingBoxData)

                x = boundingBoxData[1][0]
                y = boundingBoxData[1][1]
                winW = boundingBoxData[1][2]
                winH = boundingBoxData[1][3]

                # if boundingBoxData[0] > threshold:
                cv2.rectangle(original_img, (x, y), (x + winW, y + winH),
                              (0, 255, 0), 2)

            cv2.imshow("Window", original_img)
            cv2.waitKey(1000)
            time.sleep(1)