Пример #1
0
    def __init__(self):
        self.node_name = "Object Detection"
        self.active = True

        # Detector Configuration
        rospack = rospkg.RosPack()
        self.configuration = rospy.get_param(
            '~object_detector')  # TODO: On-the-fly reconfiguration
        # TODO: Fix after a place for inference models is established
        self.object_detector = ObjectDetector(
            path.join(rospack.get_path('object_detection'),
                      self.configuration['inference_graph_path']),
            float(self.configuration['score_threshold']),
            bool(self.configuration['denormalize_boundingbox']))

        # Subscribers
        self.sub_image = rospy.Subscriber("~image_compressed",
                                          CompressedImage,
                                          self.on_image_received,
                                          queue_size=1)

        # Publishers
        self.pub_detection = rospy.Publisher("~detections",
                                             ObjectDetectionList,
                                             queue_size=1)
Пример #2
0
    def __init__(self, config):
        self.config_ = config
        # Load Models
        if (config.model_name == 'fchardnet'):
            self.seg_model_ = FCHarDNetSemanticSegmentation(config.model_path)
        else:
            self.seg_model_ = PSPNetSematicSegmentation(
                config.model_config_file)

        self.object_detector_ = ObjectDetector(config.model_path_yolo)

        # Load perspective transforms
        mtxs = np.load(config.perspective_transform_path)
        self.M_ = mtxs['M']
        self.M_inv_ = mtxs['M_inv']

        # Test Detection Models
        print('Segmentation and Detection Models loaded, Testing the models')
        img = cv2.imread("/usr/src/app/dev_ws/src/vision/vision/dolly.png")
        self.h_orig_, self.w_orig_, = self.config_.original_height, self.config_.original_width
        _, _ = self.seg_model_.process_img_driveable(
            img, [self.config_.original_height, self.config_.original_width],
            self.config_.drivable_idx)
        _ = self.object_detector_.process_frame(img)
        self.im_hw_ = self.object_detector_.im_hw

        print('Imgs tested')
Пример #3
0
class ObjectDetectionNode:
    def __init__(self):
        self.node_name = "Object Detection"
        self.active = True

        # Detector Configuration
        rospack = rospkg.RosPack()
        self.configuration = rospy.get_param(
            '~object_detector')  # TODO: On-the-fly reconfiguration
        # TODO: Fix after a place for inference models is established
        self.object_detector = ObjectDetector(
            path.join(rospack.get_path('object_detection'),
                      self.configuration['inference_graph_path']),
            float(self.configuration['score_threshold']),
            bool(self.configuration['denormalize_boundingbox']))

        # Subscribers
        self.sub_image = rospy.Subscriber("~image_compressed",
                                          CompressedImage,
                                          self.on_image_received,
                                          queue_size=1)

        # Publishers
        self.pub_detection = rospy.Publisher("~detections",
                                             ObjectDetectionList,
                                             queue_size=1)

        # timer for updating the params
        # self.timer = rospy.Timer(rospy.Duration.from_sec(1.0), self.updateParams)

    def on_image_received(self, compressed_image):
        detections = self.object_detector.detect(
            rgb_from_ros(compressed_image))

        if len(detections) > 0:
            detection_list_msg = ObjectDetectionList()
            for detection in detections:
                detection_list_msg.detections.append(
                    Detection(class_label=detection['class_label'],
                              class_id=detection['class_id'],
                              xmin=detection['xmin'],
                              xmax=detection['xmax'],
                              ymin=detection['ymin'],
                              ymax=detection['ymax'],
                              score=detection['score']))

            self.pub_detection.publish(detection_list_msg)

    def onShutdown(self):
        self.object_detector.finalize()
        rospy.loginfo("[ObjectDetectionNode] Shutdown.")

    def loginfo(self, s):
        rospy.loginfo('[%s] %s' % (self.node_name, s))
Пример #4
0
class ImageAIJob:
    def __init__(self):
        self.detector = ObjectDetector()
        # 'http://host.docker.internal:5000/api/' #
        self.base_url = os.environ["MEDIA_API_URL"]
        print("API Url: {}".format(self.base_url))

    def run(self):

        r = requests.get('{}ai/MediaWithoutImageAISource'.format(
            self.base_url))
        medias = r.json()

        for media in medias:

            id = media["id"]
            result = {}
            result["mediaId"] = id
            print("detect items for media: {}".format(id))

            filename = "./tmp/{}.jpg".format(id)

            try:
                imageR = requests.get('{}ai/image/{}'.format(
                    self.base_url, id),
                                      stream=True)

                imageR.raise_for_status()
                img = imageR.raw.read()

                with open(filename, 'wb') as f:
                    f.write(img)

                items = self.detector.detect(filename, media["dimension"])

                result["items"] = items
                result["success"] = True

                print("Found {} items in media: {}".format(
                    len(result["items"]), id))

                os.remove(filename)

            except Exception as e:
                result["success"] = False
                result["error"] = str(e)
                print(result["error"])

            r_save = requests.post('{}ai/save'.format(self.base_url),
                                   json=result)
Пример #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('image_file_paths',
                        nargs='*',
                        default=sys.stdin,
                        help='filepaths to the images for object detection.')
    parser.add_argument('-l',
                        '--logfile',
                        help='filepath for logging output, default is stdout.')
    parser.add_argument(
        '-o',
        '--outputsuffix',
        help=
        'output image filename is made up of the input filepath with this suffix inserted before the file extention.',
        default='_output')
    parser.add_argument(
        '-c',
        '--confidencethreshold',
        help=
        'number between 0 and 1 that represents the confidence level for declaring an object as detected. e.g. .5 requires greater than 50%% confidence.',
        default=0.5,
        type=float)
    args = parser.parse_args()

    if args.logfile is None:
        logging.basicConfig(format='%(message)s', level=logging.DEBUG)
    else:
        logging.basicConfig(filename=args.logfile,
                            format='%(message)s',
                            level=logging.DEBUG)

    net = cv2.dnn.readNetFromCaffe(prototxt, model)
    detector = ObjectDetector(net, classes, args.confidencethreshold)

    for f in args.image_file_paths:
        filepath = f.strip()
        detect_image(detector, filepath,
                     output_filepath(filepath, args.outputsuffix))
Пример #6
0
images = [
    '{}/{}'.format(folder, x) for x in os.listdir(folder) if x.endswith('.jpg')
]

if len(args) == 3:
    maxImages = int(args[2])
    images = images[:maxImages]

# yolo_labels = loadYoloLabels('../darknet/darknet/data/coco.names')
# yolo_expected_classid = 20 # Elephant. Tiger is not available

# model = YoloObjectDetector('../darknet/darknet/cfg/yolov3.cfg', '../darknet/darknet/yolov3.weights')

# runModelMetrics(model, images, yolo_labels, yolo_expected_classid, 'YOLO')

print()
model = ObjectDetector('ssd/saved_model')
if 'amur' in folder:
    ssd_expected_classid = [388]  # Tiger
elif 'elp' in folder:
    ssd_expected_classid = [448]
else:
    ssd_expected_classid = [275, 451, 458]

ssd_labels = loadSsdLabels('ssd/label_mapping.csv')

runModelMetrics(model, images, ssd_labels, ssd_expected_classid,
                'MobileNet-SSD')

plt.show()
Пример #7
0
 def __init__(self):
     self.object_detector = ObjectDetector()
     
     # connect to collision map processing service
     rospy.loginfo('waiting for tabletop_collision_map_processing service')
     rospy.wait_for_service('/tabletop_collision_map_processing/tabletop_collision_map_processing')
     self.collision_map_processing_srv = rospy.ServiceProxy('/tabletop_collision_map_processing/tabletop_collision_map_processing', TabletopCollisionMapProcessing)
     rospy.loginfo('connected to tabletop_collision_map_processing service')
     
     # connect to gripper action server
     rospy.loginfo('waiting for wubble_gripper_grasp_action')
     self.posture_controller = SimpleActionClient('/wubble_gripper_grasp_action', GraspHandPostureExecutionAction)
     self.posture_controller.wait_for_server()
     rospy.loginfo('connected to wubble_gripper_grasp_action')
     
     rospy.loginfo('waiting for wubble_grasp_status service')
     rospy.wait_for_service('/wubble_grasp_status')
     self.get_grasp_status_srv = rospy.ServiceProxy('/wubble_grasp_status', GraspStatus)
     rospy.loginfo('connected to wubble_grasp_status service')
     
     rospy.loginfo('waiting for classify service')
     rospy.wait_for_service('/classify')
     self.classification_srv = rospy.ServiceProxy('/classify', classify)
     rospy.loginfo('connected to classify service')
     
     # connect to gripper action server
     rospy.loginfo('waiting for wubble_gripper_action')
     self.gripper_controller = SimpleActionClient('/wubble_gripper_action', WubbleGripperAction)
     self.gripper_controller.wait_for_server()
     rospy.loginfo('connected to wubble_gripper_action')
     
     # connect to wubble actions
     rospy.loginfo('waiting for drop_object')
     self.drop_object_client = SimpleActionClient('/drop_object', DropObjectAction)
     self.drop_object_client.wait_for_server()
     rospy.loginfo('connected to drop_object')
     
     rospy.loginfo('waiting for grasp_object')
     self.grasp_object_client = SimpleActionClient('/grasp_object', GraspObjectAction)
     self.grasp_object_client.wait_for_server()
     rospy.loginfo('connected to grasp_object')
     
     rospy.loginfo('waiting for lift_object')
     self.lift_object_client = SimpleActionClient('/lift_object', LiftObjectAction)
     self.lift_object_client.wait_for_server()
     rospy.loginfo('connected to lift_object')
     
     rospy.loginfo('waiting for place_object')
     self.place_object_client = SimpleActionClient('/place_object', PlaceObjectAction)
     self.place_object_client.wait_for_server()
     rospy.loginfo('connected to plcae_object')
     
     rospy.loginfo('waiting for push_object')
     self.push_object_client = SimpleActionClient('/push_object', PushObjectAction)
     self.push_object_client.wait_for_server()
     rospy.loginfo('connected to push_object')
     
     rospy.loginfo('waiting for shake_roll_object')
     self.shake_roll_object_client = SimpleActionClient('/shake_roll_object', ShakeRollObjectAction)
     self.shake_roll_object_client.wait_for_server()
     rospy.loginfo('connected to drop_object')
     
     rospy.loginfo('waiting for shake_pitch_object')
     self.shake_pitch_object_client = SimpleActionClient('/shake_pitch_object', ShakePitchObjectAction)
     self.shake_pitch_object_client.wait_for_server()
     rospy.loginfo('connected to pitch_object')
     
     rospy.loginfo('waiting for ready_arm')
     self.ready_arm_client = SimpleActionClient('/ready_arm', ReadyArmAction)
     self.ready_arm_client.wait_for_server()
     rospy.loginfo('connected to ready_arm')
     
     # advertise InfoMax service
     rospy.Service('get_category_distribution', InfoMax, self.process_infomax_request)
     
     rospy.loginfo('all services contacted, object_categorization is ready to go')
     
     self.ACTION_INFO = {
         InfomaxAction.GRASP: {
             'client': self.grasp_object_client,
             'goal': GraspObjectGoal(),
             'prereqs': [InfomaxAction.GRASP]
         },
         
         InfomaxAction.LIFT: {
             'client': self.lift_object_client,
             'goal': LiftObjectGoal(),
             'prereqs': [InfomaxAction.GRASP, InfomaxAction.LIFT]
         },
         
         InfomaxAction.SHAKE_ROLL: {
             'client': self.shake_roll_object_client,
             'goal': ShakeRollObjectGoal(),
             'prereqs': [InfomaxAction.GRASP, InfomaxAction.LIFT, InfomaxAction.SHAKE_ROLL]
         },
         
         InfomaxAction.DROP: {
             'client': self.drop_object_client,
             'goal': DropObjectGoal(),
             'prereqs': [InfomaxAction.GRASP, InfomaxAction.LIFT, InfomaxAction.DROP]
         },
         
         InfomaxAction.PLACE: {
             'client': self.place_object_client,
             'goal': PlaceObjectGoal(),
             'prereqs': [InfomaxAction.GRASP, InfomaxAction.LIFT, InfomaxAction.PLACE]
         },
         
         InfomaxAction.PUSH: {
             'client': self.push_object_client,
             'goal': PushObjectGoal(),
             'prereqs': [InfomaxAction.GRASP, InfomaxAction.LIFT, InfomaxAction.PLACE, InfomaxAction.PUSH]
         },
         
         InfomaxAction.SHAKE_PITCH: {
             'client': self.shake_pitch_object_client,
             'goal': ShakePitchObjectGoal(),
             'prereqs': [InfomaxAction.GRASP, InfomaxAction.LIFT, InfomaxAction.SHAKE_PITCH]
         },
     }
Пример #8
0
class ObjectCategorizer():
    def __init__(self):
        self.object_detector = ObjectDetector()
        
        # connect to collision map processing service
        rospy.loginfo('waiting for tabletop_collision_map_processing service')
        rospy.wait_for_service('/tabletop_collision_map_processing/tabletop_collision_map_processing')
        self.collision_map_processing_srv = rospy.ServiceProxy('/tabletop_collision_map_processing/tabletop_collision_map_processing', TabletopCollisionMapProcessing)
        rospy.loginfo('connected to tabletop_collision_map_processing service')
        
        # connect to gripper action server
        rospy.loginfo('waiting for wubble_gripper_grasp_action')
        self.posture_controller = SimpleActionClient('/wubble_gripper_grasp_action', GraspHandPostureExecutionAction)
        self.posture_controller.wait_for_server()
        rospy.loginfo('connected to wubble_gripper_grasp_action')
        
        rospy.loginfo('waiting for wubble_grasp_status service')
        rospy.wait_for_service('/wubble_grasp_status')
        self.get_grasp_status_srv = rospy.ServiceProxy('/wubble_grasp_status', GraspStatus)
        rospy.loginfo('connected to wubble_grasp_status service')
        
        rospy.loginfo('waiting for classify service')
        rospy.wait_for_service('/classify')
        self.classification_srv = rospy.ServiceProxy('/classify', classify)
        rospy.loginfo('connected to classify service')
        
        # connect to gripper action server
        rospy.loginfo('waiting for wubble_gripper_action')
        self.gripper_controller = SimpleActionClient('/wubble_gripper_action', WubbleGripperAction)
        self.gripper_controller.wait_for_server()
        rospy.loginfo('connected to wubble_gripper_action')
        
        # connect to wubble actions
        rospy.loginfo('waiting for drop_object')
        self.drop_object_client = SimpleActionClient('/drop_object', DropObjectAction)
        self.drop_object_client.wait_for_server()
        rospy.loginfo('connected to drop_object')
        
        rospy.loginfo('waiting for grasp_object')
        self.grasp_object_client = SimpleActionClient('/grasp_object', GraspObjectAction)
        self.grasp_object_client.wait_for_server()
        rospy.loginfo('connected to grasp_object')
        
        rospy.loginfo('waiting for lift_object')
        self.lift_object_client = SimpleActionClient('/lift_object', LiftObjectAction)
        self.lift_object_client.wait_for_server()
        rospy.loginfo('connected to lift_object')
        
        rospy.loginfo('waiting for place_object')
        self.place_object_client = SimpleActionClient('/place_object', PlaceObjectAction)
        self.place_object_client.wait_for_server()
        rospy.loginfo('connected to plcae_object')
        
        rospy.loginfo('waiting for push_object')
        self.push_object_client = SimpleActionClient('/push_object', PushObjectAction)
        self.push_object_client.wait_for_server()
        rospy.loginfo('connected to push_object')
        
        rospy.loginfo('waiting for shake_roll_object')
        self.shake_roll_object_client = SimpleActionClient('/shake_roll_object', ShakeRollObjectAction)
        self.shake_roll_object_client.wait_for_server()
        rospy.loginfo('connected to drop_object')
        
        rospy.loginfo('waiting for shake_pitch_object')
        self.shake_pitch_object_client = SimpleActionClient('/shake_pitch_object', ShakePitchObjectAction)
        self.shake_pitch_object_client.wait_for_server()
        rospy.loginfo('connected to pitch_object')
        
        rospy.loginfo('waiting for ready_arm')
        self.ready_arm_client = SimpleActionClient('/ready_arm', ReadyArmAction)
        self.ready_arm_client.wait_for_server()
        rospy.loginfo('connected to ready_arm')
        
        # advertise InfoMax service
        rospy.Service('get_category_distribution', InfoMax, self.process_infomax_request)
        
        rospy.loginfo('all services contacted, object_categorization is ready to go')
        
        self.ACTION_INFO = {
            InfomaxAction.GRASP: {
                'client': self.grasp_object_client,
                'goal': GraspObjectGoal(),
                'prereqs': [InfomaxAction.GRASP]
            },
            
            InfomaxAction.LIFT: {
                'client': self.lift_object_client,
                'goal': LiftObjectGoal(),
                'prereqs': [InfomaxAction.GRASP, InfomaxAction.LIFT]
            },
            
            InfomaxAction.SHAKE_ROLL: {
                'client': self.shake_roll_object_client,
                'goal': ShakeRollObjectGoal(),
                'prereqs': [InfomaxAction.GRASP, InfomaxAction.LIFT, InfomaxAction.SHAKE_ROLL]
            },
            
            InfomaxAction.DROP: {
                'client': self.drop_object_client,
                'goal': DropObjectGoal(),
                'prereqs': [InfomaxAction.GRASP, InfomaxAction.LIFT, InfomaxAction.DROP]
            },
            
            InfomaxAction.PLACE: {
                'client': self.place_object_client,
                'goal': PlaceObjectGoal(),
                'prereqs': [InfomaxAction.GRASP, InfomaxAction.LIFT, InfomaxAction.PLACE]
            },
            
            InfomaxAction.PUSH: {
                'client': self.push_object_client,
                'goal': PushObjectGoal(),
                'prereqs': [InfomaxAction.GRASP, InfomaxAction.LIFT, InfomaxAction.PLACE, InfomaxAction.PUSH]
            },
            
            InfomaxAction.SHAKE_PITCH: {
                'client': self.shake_pitch_object_client,
                'goal': ShakePitchObjectGoal(),
                'prereqs': [InfomaxAction.GRASP, InfomaxAction.LIFT, InfomaxAction.SHAKE_PITCH]
            },
        }


    def open_gripper(self):
        pg = GraspHandPostureExecutionGoal()
        pg.goal = GraspHandPostureExecutionGoal.RELEASE
        
        self.posture_controller.send_goal(pg)
        self.posture_controller.wait_for_result()


    def gentle_close_gripper(self):
        pg = GraspHandPostureExecutionGoal()
        pg.goal = GraspHandPostureExecutionGoal.GRASP
        
        self.posture_controller.send_goal(pg)
        self.posture_controller.wait_for_result()
        
        goal = WubbleGripperGoal()
        goal.command = WubbleGripperGoal.CLOSE_GRIPPER
        goal.torque_limit = 0.0
        goal.dynamic_torque_control = False
        
        self.gripper_controller.send_goal(goal)
        self.gripper_controller.wait_for_result()


    def segment_objects(self):
        res = self.object_detector.detect()
        
        if res is None:
            rospy.logerr('TabletopSegmentation did not find any clusters')
            return None
            
        segmentation_result = res['segmentation_result']
        self.info = res['cluster_information']
        
        tdr = TabletopDetectionResult()
        tdr.table = segmentation_result.table
        tdr.clusters = segmentation_result.clusters
        tdr.result = segmentation_result.result
        
        tcmpr = self.update_collision_map(tdr)
        
        return tcmpr


    def reset_collision_map(self):
        req = TabletopCollisionMapProcessingRequest()
        req.detection_result = TabletopDetectionResult()
        req.reset_collision_models = True
        req.reset_attached_models = True
        
        self.collision_map_processing_srv(req)
        rospy.loginfo('collision map reset')


    def update_collision_map(self, tabletop_detection_result):
        rospy.loginfo('collision_map update in progress')
        
        req = TabletopCollisionMapProcessingRequest()
        req.detection_result = tabletop_detection_result
        req.reset_collision_models = True
        req.reset_attached_models = True
        req.desired_frame = 'base_link'
        
        res = self.collision_map_processing_srv(req)
        
        rospy.loginfo('collision_map update is done')
        rospy.loginfo('there are %d graspable objects %s, collision support surface name is "%s"' %
                      (len(res.graspable_objects), res.collision_object_names, res.collision_support_surface_name))
                      
        return res


    def reset_robot(self, tabletop_collision_map_processing_result=None):
        rospy.loginfo('resetting robot')
        ordered_collision_operations = OrderedCollisionOperations()
        
        if tabletop_collision_map_processing_result:
            closest_index = self.info[0][0]
            collision_object_name = tabletop_collision_map_processing_result.collision_object_names[closest_index]
            collision_support_surface_name = tabletop_collision_map_processing_result.collision_support_surface_name
            
            collision_operation = CollisionOperation()
            collision_operation.object1 = collision_support_surface_name
            collision_operation.object2 = ARM_GROUP_NAME
            collision_operation.operation = CollisionOperation.DISABLE
            collision_operation.penetration_distance = 0.02
            ordered_collision_operations.collision_operations = [collision_operation]
            
            collision_operation = CollisionOperation()
            collision_operation.object1 = collision_support_surface_name
            collision_operation.object2 = GRIPPER_GROUP_NAME
            collision_operation.operation = CollisionOperation.DISABLE
            collision_operation.penetration_distance = 0.02
            ordered_collision_operations.collision_operations.append(collision_operation)
            
            collision_operation = CollisionOperation()
            collision_operation.object1 = collision_object_name
            collision_operation.object2 = GRIPPER_GROUP_NAME
            collision_operation.operation = CollisionOperation.DISABLE
            collision_operation.penetration_distance = 0.02
            ordered_collision_operations.collision_operations.append(collision_operation)
        else:
            self.reset_collision_map()
            
        current_state = find_current_arm_state()
        rospy.loginfo('arm is currently in %s state' % current_state)
        
        if current_state not in ['READY', 'TUCK']:
            # check if the gripper is empty
            grasp_status = self.get_grasp_status_srv()
            if grasp_status.is_hand_occupied:
                rospy.loginfo('arm is not in any of the following states %s, opening gripper' % str(['READY', 'TUCK']))
                self.open_gripper()
                
        if current_state != 'READY' and not self.ready_arm(ordered_collision_operations): return False
        self.gentle_close_gripper()
        return True


    def ready_arm(self, collision_operations=OrderedCollisionOperations()):
        """
        Moves the arm to the side out of the view of all sensors. In this
        position the arm is ready to perform a grasp action.
        """
        goal = ReadyArmGoal()
        goal.collision_operations = collision_operations
        state = self.ready_arm_client.send_goal_and_wait(goal)
        
        if state == GoalStatus.SUCCEEDED: return True
        else: return False


    def execute_action(self, action, tabletop_collision_map_processing_result):
        goal = self.ACTION_INFO[action]['goal']
        goal.category_id = self.category_id
        closest_index = self.info[0][0]
        goal.graspable_object = tabletop_collision_map_processing_result.graspable_objects[closest_index]
        goal.collision_object_name = tabletop_collision_map_processing_result.collision_object_names[closest_index]
        goal.collision_support_surface_name = tabletop_collision_map_processing_result.collision_support_surface_name
        state = self.ACTION_INFO[action]['client'].send_goal_and_wait(goal)
        
        if state == GoalStatus.SUCCEEDED:
            result = self.ACTION_INFO[action]['client'].get_result()
            return result.recorded_sound
        else:
            return None


    # receive an InfoMax service request containing object ID and desired action
    def process_infomax_request(self, req):
        self.object_names = req.objectNames
        self.action_names = req.actionNames
        self.num_categories = req.numCats
        self.category_id = req.catID
        
        if not self.reset_robot(): return None
        
        # find a graspable object on the floor
        tcmpr = self.segment_objects()
        if tcmpr is None: return None
        
        # initialize as uniform distribution
        beliefs = [1.0/self.num_categories] * self.num_categories
        actions = self.ACTION_INFO[req.actionID.val]['prereqs']
        
        for act in actions:
            sound = self.execute_action(act, tcmpr)
            
            if not sound:
                self.reset_robot(tcmpr)
                return None
            else:
                resp = self.classification_srv(sound)
                beliefs = resp.beliefs
                
        if not self.reset_robot(tcmpr): return None
        
        res = InfoMaxResponse()
        res.beliefs = beliefs
        res.location = self.category_id
        return res
private_key = '{}/thing_private_key.pem.key'.format(certPath)
certificate = '{}/thing_cert.pem.crt'.format(certPath)

shadowClient = getShadowClient(clientId=clientId,
                               rootCa=rootCa,
                               private_key=private_key,
                               certificate=certificate)
if not shadowClient:
    print('Failed to connect to AWS IOT device')
    exit(1)

deviceShadow = shadowClient.createShadowHandlerWithName(device, False)

CAMERA_IP = '<CAMERA IP>:<PORT>'
print('Loading Model...')
det = ObjectDetector('ssd/saved_model')
det.loadModel()
print('Model Load completed...')
print('Warming up Model...')
det.getBoundingBoxes(readImage(ip=CAMERA_IP))
det.getBoundingBoxes(readImage(ip=CAMERA_IP))

classes = loadClasses('ssd/label_mapping.csv')

app = Flask(__name__, static_url_path='')

if __name__ == '__main__':
    image = readImage(ip=CAMERA_IP)
    numPersons = countPeople(image)
    print('Found {} people in the image!'.format(numPersons))
Пример #10
0
from train_validate_svm import *
from extract_features_and_store_to_mongo import *
import numpy as np
from db_interface import DbInterface, DbRecord
from joblib import dump, load
from sklearn.decomposition import IncrementalPCA
import sys
import os
from object_detection import ObjectDetector
import time
import argparse
from test_svm_model import *

print('Loading object detector...')
det = ObjectDetector('ssd/saved_model')
det.loadModel()

#correct,count,times,cl_times = find_acc('jaguar-alexnet-fc7-linear-pca.model', 'jaguars/reid', 100, None, 3, None, det)
#print('Jagaurs Test Accuracy: {:.4f}, out of {} images. Average Id Time: {:.3f}s, Average Classification Times: {:.3f}s'.format(correct/count, count, times/count, cl_times/count))

correct, count, times, cl_times = find_acc(
    'amur-alexnet-relu6-linear-pca.model', 'amur/plain_reid_train/train', 100,
    None, 3, None, det)
print(
    'Tigers Test Accuracy: {:.4f}, out of {} images. Average Id Time: {:.3f}s, Average Classification Times: {:.3f}s'
    .format(correct / count, count, times / count, cl_times / count))

#correct,count,times,cl_times = find_acc('elp-alexnet-fc7-linear-pca.model', 'ELPephants/images', 100, None, 3, None, det)
#print('Elephants Test Accuracy: {:.4f}, out of {} images'.format(correct/count, count, times/count, cl_times/count))
Пример #11
0
                required=True,
                help="path to the image to be classified")
args = vars(ap.parse_args())

# load the configuration file
conf = Conf(args["conf"])

# load the classifier, then initialize the Histogram of Oriented Gradients descriptor
# and the object detector
model = pickle.loads(open(conf["classifier_path"], "rb").read())
hog = HOG(orientations=conf["orientations"],
          pixelsPerCell=tuple(conf["pixels_per_cell"]),
          cellsPerBlock=tuple(conf["cells_per_block"]),
          normalize=conf["normalize"],
          block_norm="L1")
od = ObjectDetector(model, hog)

# load the image and convert it to grayscale
image = cv2.imread(args["image"])
image = imutils.resize(image, width=min(260, image.shape[1]))
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# detect objects in the image
(boxes, probs) = od.detect(gray,
                           conf["window_dim"],
                           winStep=conf["window_step"],
                           pyramidScale=conf["pyramid_scale"],
                           minProb=conf["min_probability"])

# loop over the bounding boxes and draw them
for (startX, startY, endX, endY) in boxes:
Пример #12
0
import sys
import cv2

args = sys.argv
if not len(args) == 2 and not len(args) == 4:
    print('Usage: {} <images folder> [lower] [upper]'.format(args[0]))
    exit(1)

folder = args[1]
imageFiles = [
    '{}/{}'.format(folder, x) for x in os.listdir(folder) if x.endswith('.jpg')
]
n = min(100, len(imageFiles))

print('Loading Model...')
det = ObjectDetector('ssd/saved_model')
det.loadModel()

print('Warming up model...')
# Run a dummy test once for warmup
img = cv2.imread(imageFiles[0])
det.getBoundingBoxes(img)
det.getBoundingBoxes(img)

batchTimes = []
lower = 10
upper = 11

if len(args) == 4:
    lower = int(args[2])
    upper = int(args[3])
Пример #13
0
 def __init__(self):
     self.detector = ObjectDetector()
     # 'http://host.docker.internal:5000/api/' #
     self.base_url = os.environ["MEDIA_API_URL"]
     print("API Url: {}".format(self.base_url))
Пример #14
0
if len(args) == 4:
    n = min(int(args[3]), len(imageFiles))
else:
    n = len(imageFiles)

imageFiles = imageFiles[:n]

print('Running Detection on {} images'.format(n))

box_map = {}
with open(args[2]) as f:
    box_map = json.load(f)
    f.close()

print('Loading Object Detector...')
det = ObjectDetector('ssd_quantized_1L_steps/saved_model')
det.loadModel()
#det.SCORE_THRESHOLD = 0.25
#det.IOU_THRESHOLD = 0.01

print('Warming up model...')
det.getBoundingBoxes(cv2.imread(imageFiles[0]))
det.getBoundingBoxes(cv2.imread(imageFiles[0]))

batchSize = 10
if 'jaguar' in folder.lower():
    expected_classid = 1  # Only tigers
elif 'elp' in folder.lower() or 'elephant' in folder.lower():
    expected_classid = 2
elif 'amur' in folder.lower():
    expected_classid = 3
Пример #15
0
class ObjectCategorizer():
    def __init__(self):
        self.object_detector = ObjectDetector()

        # connect to collision map processing service
        rospy.loginfo('waiting for tabletop_collision_map_processing service')
        rospy.wait_for_service(
            '/tabletop_collision_map_processing/tabletop_collision_map_processing'
        )
        self.collision_map_processing_srv = rospy.ServiceProxy(
            '/tabletop_collision_map_processing/tabletop_collision_map_processing',
            TabletopCollisionMapProcessing)
        rospy.loginfo('connected to tabletop_collision_map_processing service')

        # connect to gripper action server
        rospy.loginfo('waiting for wubble_gripper_grasp_action')
        self.posture_controller = SimpleActionClient(
            '/wubble_gripper_grasp_action', GraspHandPostureExecutionAction)
        self.posture_controller.wait_for_server()
        rospy.loginfo('connected to wubble_gripper_grasp_action')

        rospy.loginfo('waiting for wubble_grasp_status service')
        rospy.wait_for_service('/wubble_grasp_status')
        self.get_grasp_status_srv = rospy.ServiceProxy('/wubble_grasp_status',
                                                       GraspStatus)
        rospy.loginfo('connected to wubble_grasp_status service')

        rospy.loginfo('waiting for classify service')
        rospy.wait_for_service('/classify')
        self.classification_srv = rospy.ServiceProxy('/classify', classify)
        rospy.loginfo('connected to classify service')

        # connect to gripper action server
        rospy.loginfo('waiting for wubble_gripper_action')
        self.gripper_controller = SimpleActionClient('/wubble_gripper_action',
                                                     WubbleGripperAction)
        self.gripper_controller.wait_for_server()
        rospy.loginfo('connected to wubble_gripper_action')

        # connect to wubble actions
        rospy.loginfo('waiting for drop_object')
        self.drop_object_client = SimpleActionClient('/drop_object',
                                                     DropObjectAction)
        self.drop_object_client.wait_for_server()
        rospy.loginfo('connected to drop_object')

        rospy.loginfo('waiting for grasp_object')
        self.grasp_object_client = SimpleActionClient('/grasp_object',
                                                      GraspObjectAction)
        self.grasp_object_client.wait_for_server()
        rospy.loginfo('connected to grasp_object')

        rospy.loginfo('waiting for lift_object')
        self.lift_object_client = SimpleActionClient('/lift_object',
                                                     LiftObjectAction)
        self.lift_object_client.wait_for_server()
        rospy.loginfo('connected to lift_object')

        rospy.loginfo('waiting for place_object')
        self.place_object_client = SimpleActionClient('/place_object',
                                                      PlaceObjectAction)
        self.place_object_client.wait_for_server()
        rospy.loginfo('connected to plcae_object')

        rospy.loginfo('waiting for push_object')
        self.push_object_client = SimpleActionClient('/push_object',
                                                     PushObjectAction)
        self.push_object_client.wait_for_server()
        rospy.loginfo('connected to push_object')

        rospy.loginfo('waiting for shake_roll_object')
        self.shake_roll_object_client = SimpleActionClient(
            '/shake_roll_object', ShakeRollObjectAction)
        self.shake_roll_object_client.wait_for_server()
        rospy.loginfo('connected to drop_object')

        rospy.loginfo('waiting for shake_pitch_object')
        self.shake_pitch_object_client = SimpleActionClient(
            '/shake_pitch_object', ShakePitchObjectAction)
        self.shake_pitch_object_client.wait_for_server()
        rospy.loginfo('connected to pitch_object')

        rospy.loginfo('waiting for ready_arm')
        self.ready_arm_client = SimpleActionClient('/ready_arm',
                                                   ReadyArmAction)
        self.ready_arm_client.wait_for_server()
        rospy.loginfo('connected to ready_arm')

        # advertise InfoMax service
        rospy.Service('get_category_distribution', InfoMax,
                      self.process_infomax_request)

        rospy.loginfo(
            'all services contacted, object_categorization is ready to go')

        self.ACTION_INFO = {
            InfomaxAction.GRASP: {
                'client': self.grasp_object_client,
                'goal': GraspObjectGoal(),
                'prereqs': [InfomaxAction.GRASP]
            },
            InfomaxAction.LIFT: {
                'client': self.lift_object_client,
                'goal': LiftObjectGoal(),
                'prereqs': [InfomaxAction.GRASP, InfomaxAction.LIFT]
            },
            InfomaxAction.SHAKE_ROLL: {
                'client':
                self.shake_roll_object_client,
                'goal':
                ShakeRollObjectGoal(),
                'prereqs': [
                    InfomaxAction.GRASP, InfomaxAction.LIFT,
                    InfomaxAction.SHAKE_ROLL
                ]
            },
            InfomaxAction.DROP: {
                'client':
                self.drop_object_client,
                'goal':
                DropObjectGoal(),
                'prereqs':
                [InfomaxAction.GRASP, InfomaxAction.LIFT, InfomaxAction.DROP]
            },
            InfomaxAction.PLACE: {
                'client':
                self.place_object_client,
                'goal':
                PlaceObjectGoal(),
                'prereqs':
                [InfomaxAction.GRASP, InfomaxAction.LIFT, InfomaxAction.PLACE]
            },
            InfomaxAction.PUSH: {
                'client':
                self.push_object_client,
                'goal':
                PushObjectGoal(),
                'prereqs': [
                    InfomaxAction.GRASP, InfomaxAction.LIFT,
                    InfomaxAction.PLACE, InfomaxAction.PUSH
                ]
            },
            InfomaxAction.SHAKE_PITCH: {
                'client':
                self.shake_pitch_object_client,
                'goal':
                ShakePitchObjectGoal(),
                'prereqs': [
                    InfomaxAction.GRASP, InfomaxAction.LIFT,
                    InfomaxAction.SHAKE_PITCH
                ]
            },
        }

    def open_gripper(self):
        pg = GraspHandPostureExecutionGoal()
        pg.goal = GraspHandPostureExecutionGoal.RELEASE

        self.posture_controller.send_goal(pg)
        self.posture_controller.wait_for_result()

    def gentle_close_gripper(self):
        pg = GraspHandPostureExecutionGoal()
        pg.goal = GraspHandPostureExecutionGoal.GRASP

        self.posture_controller.send_goal(pg)
        self.posture_controller.wait_for_result()

        goal = WubbleGripperGoal()
        goal.command = WubbleGripperGoal.CLOSE_GRIPPER
        goal.torque_limit = 0.0
        goal.dynamic_torque_control = False

        self.gripper_controller.send_goal(goal)
        self.gripper_controller.wait_for_result()

    def segment_objects(self):
        res = self.object_detector.detect()

        if res is None:
            rospy.logerr('TabletopSegmentation did not find any clusters')
            return None

        segmentation_result = res['segmentation_result']
        self.info = res['cluster_information']

        tdr = TabletopDetectionResult()
        tdr.table = segmentation_result.table
        tdr.clusters = segmentation_result.clusters
        tdr.result = segmentation_result.result

        tcmpr = self.update_collision_map(tdr)

        return tcmpr

    def reset_collision_map(self):
        req = TabletopCollisionMapProcessingRequest()
        req.detection_result = TabletopDetectionResult()
        req.reset_collision_models = True
        req.reset_attached_models = True

        self.collision_map_processing_srv(req)
        rospy.loginfo('collision map reset')

    def update_collision_map(self, tabletop_detection_result):
        rospy.loginfo('collision_map update in progress')

        req = TabletopCollisionMapProcessingRequest()
        req.detection_result = tabletop_detection_result
        req.reset_collision_models = True
        req.reset_attached_models = True
        req.desired_frame = 'base_link'

        res = self.collision_map_processing_srv(req)

        rospy.loginfo('collision_map update is done')
        rospy.loginfo(
            'there are %d graspable objects %s, collision support surface name is "%s"'
            % (len(res.graspable_objects), res.collision_object_names,
               res.collision_support_surface_name))

        return res

    def reset_robot(self, tabletop_collision_map_processing_result=None):
        rospy.loginfo('resetting robot')
        ordered_collision_operations = OrderedCollisionOperations()

        if tabletop_collision_map_processing_result:
            closest_index = self.info[0][0]
            collision_object_name = tabletop_collision_map_processing_result.collision_object_names[
                closest_index]
            collision_support_surface_name = tabletop_collision_map_processing_result.collision_support_surface_name

            collision_operation = CollisionOperation()
            collision_operation.object1 = collision_support_surface_name
            collision_operation.object2 = ARM_GROUP_NAME
            collision_operation.operation = CollisionOperation.DISABLE
            collision_operation.penetration_distance = 0.02
            ordered_collision_operations.collision_operations = [
                collision_operation
            ]

            collision_operation = CollisionOperation()
            collision_operation.object1 = collision_support_surface_name
            collision_operation.object2 = GRIPPER_GROUP_NAME
            collision_operation.operation = CollisionOperation.DISABLE
            collision_operation.penetration_distance = 0.02
            ordered_collision_operations.collision_operations.append(
                collision_operation)

            collision_operation = CollisionOperation()
            collision_operation.object1 = collision_object_name
            collision_operation.object2 = GRIPPER_GROUP_NAME
            collision_operation.operation = CollisionOperation.DISABLE
            collision_operation.penetration_distance = 0.02
            ordered_collision_operations.collision_operations.append(
                collision_operation)
        else:
            self.reset_collision_map()

        current_state = find_current_arm_state()
        rospy.loginfo('arm is currently in %s state' % current_state)

        if current_state not in ['READY', 'TUCK']:
            # check if the gripper is empty
            grasp_status = self.get_grasp_status_srv()
            if grasp_status.is_hand_occupied:
                rospy.loginfo(
                    'arm is not in any of the following states %s, opening gripper'
                    % str(['READY', 'TUCK']))
                self.open_gripper()

        if current_state != 'READY' and not self.ready_arm(
                ordered_collision_operations):
            return False
        self.gentle_close_gripper()
        return True

    def ready_arm(self, collision_operations=OrderedCollisionOperations()):
        """
        Moves the arm to the side out of the view of all sensors. In this
        position the arm is ready to perform a grasp action.
        """
        goal = ReadyArmGoal()
        goal.collision_operations = collision_operations
        state = self.ready_arm_client.send_goal_and_wait(goal)

        if state == GoalStatus.SUCCEEDED: return True
        else: return False

    def execute_action(self, action, tabletop_collision_map_processing_result):
        goal = self.ACTION_INFO[action]['goal']
        goal.category_id = self.category_id
        closest_index = self.info[0][0]
        goal.graspable_object = tabletop_collision_map_processing_result.graspable_objects[
            closest_index]
        goal.collision_object_name = tabletop_collision_map_processing_result.collision_object_names[
            closest_index]
        goal.collision_support_surface_name = tabletop_collision_map_processing_result.collision_support_surface_name
        state = self.ACTION_INFO[action]['client'].send_goal_and_wait(goal)

        if state == GoalStatus.SUCCEEDED:
            result = self.ACTION_INFO[action]['client'].get_result()
            return result.recorded_sound
        else:
            return None

    # receive an InfoMax service request containing object ID and desired action
    def process_infomax_request(self, req):
        self.object_names = req.objectNames
        self.action_names = req.actionNames
        self.num_categories = req.numCats
        self.category_id = req.catID

        if not self.reset_robot(): return None

        # find a graspable object on the floor
        tcmpr = self.segment_objects()
        if tcmpr is None: return None

        # initialize as uniform distribution
        beliefs = [1.0 / self.num_categories] * self.num_categories
        actions = self.ACTION_INFO[req.actionID.val]['prereqs']

        for act in actions:
            sound = self.execute_action(act, tcmpr)

            if not sound:
                self.reset_robot(tcmpr)
                return None
            else:
                resp = self.classification_srv(sound)
                beliefs = resp.beliefs

        if not self.reset_robot(tcmpr): return None

        res = InfoMaxResponse()
        res.beliefs = beliefs
        res.location = self.category_id
        return res
    n = int(args[3])

folder = 'amur/detection_train/trainval'
images = ['{}/{}'.format(folder, x) for x in os.listdir(folder)]

imagelist = np.random.choice(images, n, replace=False)

print('Measuring performance over {} images...'.format(len(images)))

config = args[1]
weights = args[2]

#model = YoloObjectDetector(config, weights, '../darknet/darknet/custom_data/detector.data')
#model.loadModel()

model = ObjectDetector(config)
model.loadModel()

print('Warming up...')
#model.getBoundingBoxes(images[0])
#model.getBoundingBoxes(images[1])
model.getBoundingBoxes(cv2.imread(images[0]))
model.getBoundingBoxes(cv2.imread(images[1]))
[cv2.imread(x) for x in imagelist]

print('Starting...')
times = []
for im in imagelist:
    #model.getBoundingBoxes(im)
    img = cv2.imread(im)
    st = time.time()
Пример #17
0
    def __init__(self):
        self.object_detector = ObjectDetector()

        # connect to collision map processing service
        rospy.loginfo('waiting for tabletop_collision_map_processing service')
        rospy.wait_for_service(
            '/tabletop_collision_map_processing/tabletop_collision_map_processing'
        )
        self.collision_map_processing_srv = rospy.ServiceProxy(
            '/tabletop_collision_map_processing/tabletop_collision_map_processing',
            TabletopCollisionMapProcessing)
        rospy.loginfo('connected to tabletop_collision_map_processing service')

        # connect to gripper action server
        rospy.loginfo('waiting for wubble_gripper_grasp_action')
        self.posture_controller = SimpleActionClient(
            '/wubble_gripper_grasp_action', GraspHandPostureExecutionAction)
        self.posture_controller.wait_for_server()
        rospy.loginfo('connected to wubble_gripper_grasp_action')

        rospy.loginfo('waiting for wubble_grasp_status service')
        rospy.wait_for_service('/wubble_grasp_status')
        self.get_grasp_status_srv = rospy.ServiceProxy('/wubble_grasp_status',
                                                       GraspStatus)
        rospy.loginfo('connected to wubble_grasp_status service')

        rospy.loginfo('waiting for classify service')
        rospy.wait_for_service('/classify')
        self.classification_srv = rospy.ServiceProxy('/classify', classify)
        rospy.loginfo('connected to classify service')

        # connect to gripper action server
        rospy.loginfo('waiting for wubble_gripper_action')
        self.gripper_controller = SimpleActionClient('/wubble_gripper_action',
                                                     WubbleGripperAction)
        self.gripper_controller.wait_for_server()
        rospy.loginfo('connected to wubble_gripper_action')

        # connect to wubble actions
        rospy.loginfo('waiting for drop_object')
        self.drop_object_client = SimpleActionClient('/drop_object',
                                                     DropObjectAction)
        self.drop_object_client.wait_for_server()
        rospy.loginfo('connected to drop_object')

        rospy.loginfo('waiting for grasp_object')
        self.grasp_object_client = SimpleActionClient('/grasp_object',
                                                      GraspObjectAction)
        self.grasp_object_client.wait_for_server()
        rospy.loginfo('connected to grasp_object')

        rospy.loginfo('waiting for lift_object')
        self.lift_object_client = SimpleActionClient('/lift_object',
                                                     LiftObjectAction)
        self.lift_object_client.wait_for_server()
        rospy.loginfo('connected to lift_object')

        rospy.loginfo('waiting for place_object')
        self.place_object_client = SimpleActionClient('/place_object',
                                                      PlaceObjectAction)
        self.place_object_client.wait_for_server()
        rospy.loginfo('connected to plcae_object')

        rospy.loginfo('waiting for push_object')
        self.push_object_client = SimpleActionClient('/push_object',
                                                     PushObjectAction)
        self.push_object_client.wait_for_server()
        rospy.loginfo('connected to push_object')

        rospy.loginfo('waiting for shake_roll_object')
        self.shake_roll_object_client = SimpleActionClient(
            '/shake_roll_object', ShakeRollObjectAction)
        self.shake_roll_object_client.wait_for_server()
        rospy.loginfo('connected to drop_object')

        rospy.loginfo('waiting for shake_pitch_object')
        self.shake_pitch_object_client = SimpleActionClient(
            '/shake_pitch_object', ShakePitchObjectAction)
        self.shake_pitch_object_client.wait_for_server()
        rospy.loginfo('connected to pitch_object')

        rospy.loginfo('waiting for ready_arm')
        self.ready_arm_client = SimpleActionClient('/ready_arm',
                                                   ReadyArmAction)
        self.ready_arm_client.wait_for_server()
        rospy.loginfo('connected to ready_arm')

        # advertise InfoMax service
        rospy.Service('get_category_distribution', InfoMax,
                      self.process_infomax_request)

        rospy.loginfo(
            'all services contacted, object_categorization is ready to go')

        self.ACTION_INFO = {
            InfomaxAction.GRASP: {
                'client': self.grasp_object_client,
                'goal': GraspObjectGoal(),
                'prereqs': [InfomaxAction.GRASP]
            },
            InfomaxAction.LIFT: {
                'client': self.lift_object_client,
                'goal': LiftObjectGoal(),
                'prereqs': [InfomaxAction.GRASP, InfomaxAction.LIFT]
            },
            InfomaxAction.SHAKE_ROLL: {
                'client':
                self.shake_roll_object_client,
                'goal':
                ShakeRollObjectGoal(),
                'prereqs': [
                    InfomaxAction.GRASP, InfomaxAction.LIFT,
                    InfomaxAction.SHAKE_ROLL
                ]
            },
            InfomaxAction.DROP: {
                'client':
                self.drop_object_client,
                'goal':
                DropObjectGoal(),
                'prereqs':
                [InfomaxAction.GRASP, InfomaxAction.LIFT, InfomaxAction.DROP]
            },
            InfomaxAction.PLACE: {
                'client':
                self.place_object_client,
                'goal':
                PlaceObjectGoal(),
                'prereqs':
                [InfomaxAction.GRASP, InfomaxAction.LIFT, InfomaxAction.PLACE]
            },
            InfomaxAction.PUSH: {
                'client':
                self.push_object_client,
                'goal':
                PushObjectGoal(),
                'prereqs': [
                    InfomaxAction.GRASP, InfomaxAction.LIFT,
                    InfomaxAction.PLACE, InfomaxAction.PUSH
                ]
            },
            InfomaxAction.SHAKE_PITCH: {
                'client':
                self.shake_pitch_object_client,
                'goal':
                ShakePitchObjectGoal(),
                'prereqs': [
                    InfomaxAction.GRASP, InfomaxAction.LIFT,
                    InfomaxAction.SHAKE_PITCH
                ]
            },
        }
Пример #18
0
import matplotlib.pyplot as plt
from im_utils import *
import sys
import os

if not len(sys.argv) == 2:
    print('Usage: {} <Video File>'.format(sys.argv[0]))
    exit(1)

video = sys.argv[1]
if not os.path.exists(video):
    print('Error! File {} does not exists'.format(video))
    exit(1)

print('Loading Object Detector...')
detector = ObjectDetector('ssd/saved_model')
detector.loadModel()
#detector.getBoundingBoxes(cv2.imread('amur_small/002019.jpg'))
#detector.getBoundingBoxes(cv2.imread('amur_small/002019.jpg'))

print('Loading Identifiers...')
tigers_identifier = SvmIdentifier('amur-alexnet-relu6-linear-pca.model',
                                  detector)
jaguars_identifier = SvmIdentifier('jaguar-alexnet-fc7-linear-pca.model',
                                   detector)
elephants_identifier = SvmIdentifier('elp-alexnet-fc7-linear-pca.model',
                                     detector)

tigers_identifier.loadModel()
jaguars_identifier.loadModel()
elephants_identifier.loadModel()
Пример #19
0
    th = threading.Thread(target=client.publishImageWithDetections,
                          args=[image, detections])
    th.setDaemon(True)
    th.start()


if __name__ == '__main__':
    args = sys.argv

    if len(args) < 3:
        print('Usage: cmd <Model path> <MQTT server IP> [display]')
        exit()

    modelpath = args[1]
    server = args[2]
    detector = ObjectDetector(modelpath, numthreads=4)
    detector.loadModel()

    display = False
    if len(args) > 3 and args[3] == 'display':
        display = True

    publisher = TrackPublisher(DEVICE_NAME, server)
    publisher.register()
    imagePublisher = ImagePublisher(DEVICE_NAME, server)

    print('Registered with MQTT server, publishing messages on channel: {}'.
          format(DEVICE_NAME))

    with picamera.PiCamera(resolution=(640, 480), framerate=30) as camera:
        #camera.start_recording('my_video.h264')
Пример #20
0
                  for x in f.readlines()]
        id_map = [(x[0], x[1]) for x in id_map if x[0] in existingImages]
    minId, maxId = min([x[1] for x in id_map]), max([x[1] for x in id_map])

    idNum = None
    if len(args) > 4:
        idNum = int(args[4])

    rev_map = {}
    for x in id_map:
        if not x[1] in rev_map:
            rev_map[x[1]] = []
        rev_map[x[1]].append(x[0])

    print('Loading Object Detector')
    det = ObjectDetector(objdetmodel)
    det.SCORE_THRESHOLD = 0.7
    det.loadModel()
    # Warming up
    det.getBoundingBoxes(readImageFromFile(os.path.join(folder,
                                                        rev_map[0][0])))
    det.getBoundingBoxes(readImageFromFile(os.path.join(folder,
                                                        rev_map[0][0])))

    print('Loading Object Identifier')
    idn = SvmIdentifier(modelPath, det)
    idn.loadModel()

    if not idNum is None and not idn.isPresent(idNum):
        print(
            'Provided Id {} is not present in trained model, will choose random Id'
Пример #21
0
class PerceptionSystem(object):
    def __init__(self, config):
        self.config_ = config
        # Load Models
        if (config.model_name == 'fchardnet'):
            self.seg_model_ = FCHarDNetSemanticSegmentation(config.model_path)
        else:
            self.seg_model_ = PSPNetSematicSegmentation(
                config.model_config_file)

        self.object_detector_ = ObjectDetector(config.model_path_yolo)

        # Load perspective transforms
        mtxs = np.load(config.perspective_transform_path)
        self.M_ = mtxs['M']
        self.M_inv_ = mtxs['M_inv']

        # Test Detection Models
        print('Segmentation and Detection Models loaded, Testing the models')
        img = cv2.imread("/usr/src/app/dev_ws/src/vision/vision/dolly.png")
        self.h_orig_, self.w_orig_, = self.config_.original_height, self.config_.original_width
        _, _ = self.seg_model_.process_img_driveable(
            img, [self.config_.original_height, self.config_.original_width],
            self.config_.drivable_idx)
        _ = self.object_detector_.process_frame(img)
        self.im_hw_ = self.object_detector_.im_hw

        print('Imgs tested')

    def get_driveable(self, drivable_img):
        h, w, _ = drivable_img.shape
        # Warp driveable area
        warped = cv2.warpPerspective(drivable_img,
                                     self.M_, (480, 480),
                                     flags=cv2.INTER_LINEAR)

        # Calculate robot center
        original_center = np.array([[[w / 2, h]]], dtype=np.float32)
        warped_center = cv2.perspectiveTransform(original_center,
                                                 self.M_)[0][0]
        driveable_contour_mask = get_driveable_mask2(warped, warped_center,
                                                     self.config_)
        return driveable_contour_mask

    def add_detections_birdview(self, preds, driveable_mask):
        h, w, _ = self.object_detector_.im_hw
        h_rate = self.h_orig_ / h
        w_rate = self.w_orig_ / w
        for pred in preds:
            if (pred[4] > self.object_detector_.conf_thres
                ):  # If prediction has a bigger confidence than the threshold
                x = w_rate * (pred[0] + pred[2]) / 2.0  # Ground middle point
                y = h_rate * pred[3]
                if (pred[5] == 0):  #person
                    wr = 40
                    hr = 60
                    color = 253  #253
                else:
                    wr = 30
                    hr = 90
                    color = 255  #255
                pos_orig = np.array([[[x, y]]], dtype=np.float32)
                warped_birdview = cv2.perspectiveTransform(
                    pos_orig,
                    self.M_)[0][0]  # Transform middle ground point to birdview
                warped_birdview = np.uint16(warped_birdview)
                cv2.rectangle(
                    driveable_mask,
                    (warped_birdview[0] - int(wr / 2), warped_birdview[1]),
                    (warped_birdview[0] + int(wr / 2),
                     warped_birdview[1] - hr), color, -1)

    def process_frame(self, img):
        # Semantic Segmentation
        if (self.config_.model_name == 'fchardnet'):
            img_test = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        else:
            img_test = img.copy()

        ### TIME
        start = time.time()
        segmented_img, drivable_img = self.seg_model_.process_img_driveable(
            img_test,
            [self.config_.original_height, self.config_.original_width],
            self.config_.drivable_idx)
        end = time.time()
        print("segmentation: {} seconds".format(end - start))
        # Get bird eye view with driveable area limits

        start = time.time()
        drivable_edge_points_top = self.get_driveable(drivable_img)
        end = time.time()
        print("warp: {} seconds".format(end - start))

        # Object Detection
        preds = self.object_detector_.process_frame(img)
        # if(self.config_.debug):
        #     print('preds: {}'.format(preds))
        #     detections_img = self.object_detector_.draw_rectangles(img, preds)
        #     fig, ax = plt.subplots(figsize=(20, 10))
        #     ax.imshow(detections_img)
        #     plt.show()

        # Add Detections to birdview image
        driveable_edge_top_with_objects = drivable_edge_points_top.copy()
        self.add_detections_birdview(preds, driveable_edge_top_with_objects)

        return drivable_img, drivable_edge_points_top, preds, driveable_edge_top_with_objects, segmented_img