Пример #1
0
    def __init__(self):
        self.node_name = "Object Detection"
        self.active = True

        # Detector Configuration
        rospack = rospkg.RosPack()
        self.configuration = rospy.get_param(
            '~object_detector')  # TODO: On-the-fly reconfiguration
        # TODO: Fix after a place for inference models is established
        self.object_detector = ObjectDetector(
            path.join(rospack.get_path('object_detection'),
                      self.configuration['inference_graph_path']),
            float(self.configuration['score_threshold']),
            bool(self.configuration['denormalize_boundingbox']))

        # Subscribers
        self.sub_image = rospy.Subscriber("~image_compressed",
                                          CompressedImage,
                                          self.on_image_received,
                                          queue_size=1)

        # Publishers
        self.pub_detection = rospy.Publisher("~detections",
                                             ObjectDetectionList,
                                             queue_size=1)
Пример #2
0
    def __init__(self, config):
        self.config_ = config
        # Load Models
        if (config.model_name == 'fchardnet'):
            self.seg_model_ = FCHarDNetSemanticSegmentation(config.model_path)
        else:
            self.seg_model_ = PSPNetSematicSegmentation(
                config.model_config_file)

        self.object_detector_ = ObjectDetector(config.model_path_yolo)

        # Load perspective transforms
        mtxs = np.load(config.perspective_transform_path)
        self.M_ = mtxs['M']
        self.M_inv_ = mtxs['M_inv']

        # Test Detection Models
        print('Segmentation and Detection Models loaded, Testing the models')
        img = cv2.imread("/usr/src/app/dev_ws/src/vision/vision/dolly.png")
        self.h_orig_, self.w_orig_, = self.config_.original_height, self.config_.original_width
        _, _ = self.seg_model_.process_img_driveable(
            img, [self.config_.original_height, self.config_.original_width],
            self.config_.drivable_idx)
        _ = self.object_detector_.process_frame(img)
        self.im_hw_ = self.object_detector_.im_hw

        print('Imgs tested')
Пример #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('image_file_paths',
                        nargs='*',
                        default=sys.stdin,
                        help='filepaths to the images for object detection.')
    parser.add_argument('-l',
                        '--logfile',
                        help='filepath for logging output, default is stdout.')
    parser.add_argument(
        '-o',
        '--outputsuffix',
        help=
        'output image filename is made up of the input filepath with this suffix inserted before the file extention.',
        default='_output')
    parser.add_argument(
        '-c',
        '--confidencethreshold',
        help=
        'number between 0 and 1 that represents the confidence level for declaring an object as detected. e.g. .5 requires greater than 50%% confidence.',
        default=0.5,
        type=float)
    args = parser.parse_args()

    if args.logfile is None:
        logging.basicConfig(format='%(message)s', level=logging.DEBUG)
    else:
        logging.basicConfig(filename=args.logfile,
                            format='%(message)s',
                            level=logging.DEBUG)

    net = cv2.dnn.readNetFromCaffe(prototxt, model)
    detector = ObjectDetector(net, classes, args.confidencethreshold)

    for f in args.image_file_paths:
        filepath = f.strip()
        detect_image(detector, filepath,
                     output_filepath(filepath, args.outputsuffix))
Пример #4
0
                  for x in f.readlines()]
        id_map = [(x[0], x[1]) for x in id_map if x[0] in existingImages]
    minId, maxId = min([x[1] for x in id_map]), max([x[1] for x in id_map])

    idNum = None
    if len(args) > 4:
        idNum = int(args[4])

    rev_map = {}
    for x in id_map:
        if not x[1] in rev_map:
            rev_map[x[1]] = []
        rev_map[x[1]].append(x[0])

    print('Loading Object Detector')
    det = ObjectDetector(objdetmodel)
    det.SCORE_THRESHOLD = 0.7
    det.loadModel()
    # Warming up
    det.getBoundingBoxes(readImageFromFile(os.path.join(folder,
                                                        rev_map[0][0])))
    det.getBoundingBoxes(readImageFromFile(os.path.join(folder,
                                                        rev_map[0][0])))

    print('Loading Object Identifier')
    idn = SvmIdentifier(modelPath, det)
    idn.loadModel()

    if not idNum is None and not idn.isPresent(idNum):
        print(
            'Provided Id {} is not present in trained model, will choose random Id'
Пример #5
0
from train_validate_svm import *
from extract_features_and_store_to_mongo import *
import numpy as np
from db_interface import DbInterface, DbRecord
from joblib import dump, load
from sklearn.decomposition import IncrementalPCA
import sys
import os
from object_detection import ObjectDetector
import time
import argparse
from test_svm_model import *

print('Loading object detector...')
det = ObjectDetector('ssd/saved_model')
det.loadModel()

#correct,count,times,cl_times = find_acc('jaguar-alexnet-fc7-linear-pca.model', 'jaguars/reid', 100, None, 3, None, det)
#print('Jagaurs Test Accuracy: {:.4f}, out of {} images. Average Id Time: {:.3f}s, Average Classification Times: {:.3f}s'.format(correct/count, count, times/count, cl_times/count))

correct, count, times, cl_times = find_acc(
    'amur-alexnet-relu6-linear-pca.model', 'amur/plain_reid_train/train', 100,
    None, 3, None, det)
print(
    'Tigers Test Accuracy: {:.4f}, out of {} images. Average Id Time: {:.3f}s, Average Classification Times: {:.3f}s'
    .format(correct / count, count, times / count, cl_times / count))

#correct,count,times,cl_times = find_acc('elp-alexnet-fc7-linear-pca.model', 'ELPephants/images', 100, None, 3, None, det)
#print('Elephants Test Accuracy: {:.4f}, out of {} images'.format(correct/count, count, times/count, cl_times/count))
Пример #6
0
                required=True,
                help="path to the image to be classified")
args = vars(ap.parse_args())

# load the configuration file
conf = Conf(args["conf"])

# load the classifier, then initialize the Histogram of Oriented Gradients descriptor
# and the object detector
model = pickle.loads(open(conf["classifier_path"], "rb").read())
hog = HOG(orientations=conf["orientations"],
          pixelsPerCell=tuple(conf["pixels_per_cell"]),
          cellsPerBlock=tuple(conf["cells_per_block"]),
          normalize=conf["normalize"],
          block_norm="L1")
od = ObjectDetector(model, hog)

# load the image and convert it to grayscale
image = cv2.imread(args["image"])
image = imutils.resize(image, width=min(260, image.shape[1]))
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# detect objects in the image
(boxes, probs) = od.detect(gray,
                           conf["window_dim"],
                           winStep=conf["window_step"],
                           pyramidScale=conf["pyramid_scale"],
                           minProb=conf["min_probability"])

# loop over the bounding boxes and draw them
for (startX, startY, endX, endY) in boxes:
    n = int(args[3])

folder = 'amur/detection_train/trainval'
images = ['{}/{}'.format(folder, x) for x in os.listdir(folder)]

imagelist = np.random.choice(images, n, replace=False)

print('Measuring performance over {} images...'.format(len(images)))

config = args[1]
weights = args[2]

#model = YoloObjectDetector(config, weights, '../darknet/darknet/custom_data/detector.data')
#model.loadModel()

model = ObjectDetector(config)
model.loadModel()

print('Warming up...')
#model.getBoundingBoxes(images[0])
#model.getBoundingBoxes(images[1])
model.getBoundingBoxes(cv2.imread(images[0]))
model.getBoundingBoxes(cv2.imread(images[1]))
[cv2.imread(x) for x in imagelist]

print('Starting...')
times = []
for im in imagelist:
    #model.getBoundingBoxes(im)
    img = cv2.imread(im)
    st = time.time()
Пример #8
0
if len(args) == 4:
    n = min(int(args[3]), len(imageFiles))
else:
    n = len(imageFiles)

imageFiles = imageFiles[:n]

print('Running Detection on {} images'.format(n))

box_map = {}
with open(args[2]) as f:
    box_map = json.load(f)
    f.close()

print('Loading Object Detector...')
det = ObjectDetector('ssd_quantized_1L_steps/saved_model')
det.loadModel()
#det.SCORE_THRESHOLD = 0.25
#det.IOU_THRESHOLD = 0.01

print('Warming up model...')
det.getBoundingBoxes(cv2.imread(imageFiles[0]))
det.getBoundingBoxes(cv2.imread(imageFiles[0]))

batchSize = 10
if 'jaguar' in folder.lower():
    expected_classid = 1  # Only tigers
elif 'elp' in folder.lower() or 'elephant' in folder.lower():
    expected_classid = 2
elif 'amur' in folder.lower():
    expected_classid = 3
Пример #9
0
    th = threading.Thread(target=client.publishImageWithDetections,
                          args=[image, detections])
    th.setDaemon(True)
    th.start()


if __name__ == '__main__':
    args = sys.argv

    if len(args) < 3:
        print('Usage: cmd <Model path> <MQTT server IP> [display]')
        exit()

    modelpath = args[1]
    server = args[2]
    detector = ObjectDetector(modelpath, numthreads=4)
    detector.loadModel()

    display = False
    if len(args) > 3 and args[3] == 'display':
        display = True

    publisher = TrackPublisher(DEVICE_NAME, server)
    publisher.register()
    imagePublisher = ImagePublisher(DEVICE_NAME, server)

    print('Registered with MQTT server, publishing messages on channel: {}'.
          format(DEVICE_NAME))

    with picamera.PiCamera(resolution=(640, 480), framerate=30) as camera:
        #camera.start_recording('my_video.h264')
Пример #10
0
    def __init__(self):
        self.object_detector = ObjectDetector()

        # connect to collision map processing service
        rospy.loginfo('waiting for tabletop_collision_map_processing service')
        rospy.wait_for_service(
            '/tabletop_collision_map_processing/tabletop_collision_map_processing'
        )
        self.collision_map_processing_srv = rospy.ServiceProxy(
            '/tabletop_collision_map_processing/tabletop_collision_map_processing',
            TabletopCollisionMapProcessing)
        rospy.loginfo('connected to tabletop_collision_map_processing service')

        # connect to gripper action server
        rospy.loginfo('waiting for wubble_gripper_grasp_action')
        self.posture_controller = SimpleActionClient(
            '/wubble_gripper_grasp_action', GraspHandPostureExecutionAction)
        self.posture_controller.wait_for_server()
        rospy.loginfo('connected to wubble_gripper_grasp_action')

        rospy.loginfo('waiting for wubble_grasp_status service')
        rospy.wait_for_service('/wubble_grasp_status')
        self.get_grasp_status_srv = rospy.ServiceProxy('/wubble_grasp_status',
                                                       GraspStatus)
        rospy.loginfo('connected to wubble_grasp_status service')

        rospy.loginfo('waiting for classify service')
        rospy.wait_for_service('/classify')
        self.classification_srv = rospy.ServiceProxy('/classify', classify)
        rospy.loginfo('connected to classify service')

        # connect to gripper action server
        rospy.loginfo('waiting for wubble_gripper_action')
        self.gripper_controller = SimpleActionClient('/wubble_gripper_action',
                                                     WubbleGripperAction)
        self.gripper_controller.wait_for_server()
        rospy.loginfo('connected to wubble_gripper_action')

        # connect to wubble actions
        rospy.loginfo('waiting for drop_object')
        self.drop_object_client = SimpleActionClient('/drop_object',
                                                     DropObjectAction)
        self.drop_object_client.wait_for_server()
        rospy.loginfo('connected to drop_object')

        rospy.loginfo('waiting for grasp_object')
        self.grasp_object_client = SimpleActionClient('/grasp_object',
                                                      GraspObjectAction)
        self.grasp_object_client.wait_for_server()
        rospy.loginfo('connected to grasp_object')

        rospy.loginfo('waiting for lift_object')
        self.lift_object_client = SimpleActionClient('/lift_object',
                                                     LiftObjectAction)
        self.lift_object_client.wait_for_server()
        rospy.loginfo('connected to lift_object')

        rospy.loginfo('waiting for place_object')
        self.place_object_client = SimpleActionClient('/place_object',
                                                      PlaceObjectAction)
        self.place_object_client.wait_for_server()
        rospy.loginfo('connected to plcae_object')

        rospy.loginfo('waiting for push_object')
        self.push_object_client = SimpleActionClient('/push_object',
                                                     PushObjectAction)
        self.push_object_client.wait_for_server()
        rospy.loginfo('connected to push_object')

        rospy.loginfo('waiting for shake_roll_object')
        self.shake_roll_object_client = SimpleActionClient(
            '/shake_roll_object', ShakeRollObjectAction)
        self.shake_roll_object_client.wait_for_server()
        rospy.loginfo('connected to drop_object')

        rospy.loginfo('waiting for shake_pitch_object')
        self.shake_pitch_object_client = SimpleActionClient(
            '/shake_pitch_object', ShakePitchObjectAction)
        self.shake_pitch_object_client.wait_for_server()
        rospy.loginfo('connected to pitch_object')

        rospy.loginfo('waiting for ready_arm')
        self.ready_arm_client = SimpleActionClient('/ready_arm',
                                                   ReadyArmAction)
        self.ready_arm_client.wait_for_server()
        rospy.loginfo('connected to ready_arm')

        # advertise InfoMax service
        rospy.Service('get_category_distribution', InfoMax,
                      self.process_infomax_request)

        rospy.loginfo(
            'all services contacted, object_categorization is ready to go')

        self.ACTION_INFO = {
            InfomaxAction.GRASP: {
                'client': self.grasp_object_client,
                'goal': GraspObjectGoal(),
                'prereqs': [InfomaxAction.GRASP]
            },
            InfomaxAction.LIFT: {
                'client': self.lift_object_client,
                'goal': LiftObjectGoal(),
                'prereqs': [InfomaxAction.GRASP, InfomaxAction.LIFT]
            },
            InfomaxAction.SHAKE_ROLL: {
                'client':
                self.shake_roll_object_client,
                'goal':
                ShakeRollObjectGoal(),
                'prereqs': [
                    InfomaxAction.GRASP, InfomaxAction.LIFT,
                    InfomaxAction.SHAKE_ROLL
                ]
            },
            InfomaxAction.DROP: {
                'client':
                self.drop_object_client,
                'goal':
                DropObjectGoal(),
                'prereqs':
                [InfomaxAction.GRASP, InfomaxAction.LIFT, InfomaxAction.DROP]
            },
            InfomaxAction.PLACE: {
                'client':
                self.place_object_client,
                'goal':
                PlaceObjectGoal(),
                'prereqs':
                [InfomaxAction.GRASP, InfomaxAction.LIFT, InfomaxAction.PLACE]
            },
            InfomaxAction.PUSH: {
                'client':
                self.push_object_client,
                'goal':
                PushObjectGoal(),
                'prereqs': [
                    InfomaxAction.GRASP, InfomaxAction.LIFT,
                    InfomaxAction.PLACE, InfomaxAction.PUSH
                ]
            },
            InfomaxAction.SHAKE_PITCH: {
                'client':
                self.shake_pitch_object_client,
                'goal':
                ShakePitchObjectGoal(),
                'prereqs': [
                    InfomaxAction.GRASP, InfomaxAction.LIFT,
                    InfomaxAction.SHAKE_PITCH
                ]
            },
        }
Пример #11
0
 def __init__(self):
     self.detector = ObjectDetector()
     # 'http://host.docker.internal:5000/api/' #
     self.base_url = os.environ["MEDIA_API_URL"]
     print("API Url: {}".format(self.base_url))