Esempio n. 1
0
    def __init__(self):
        ''' Method called when object of class is created. '''

        # Get options
        self.opt = opts().init()
        self.opt.debug = max(self.opt.debug, 1)

        # Instantiate the Model
        self.detector = CtdetDetector(self.opt)
Esempio n. 2
0
def prefetch_test(opt):
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt = opts().update_dataset_info_and_set_heads(opt, CTDetDataset)
    print(opt)
    split = 'val'

    dataset = CTDetDataset(opt, split)
    detector = CtdetDetector(opt)

    result = {}
    if not os.path.exists(os.path.join(opt.save_dir, 'result.json')):
        data_loader = torch.utils.data.DataLoader(PrefetchDataset(
            opt, dataset, detector.pre_process),
                                                  batch_size=1,
                                                  shuffle=False,
                                                  pin_memory=True)

        for ind, (img_id, pre_processes_images) in enumerate(data_loader):
            ret = detector.run(pre_processes_images)
            result[img_id.numpy().astype(np.int32)[0]] = ret['results']
            print(ind)

    dataset.run_eval(result, os.path.join(opt.save_dir, 'result.json'))
Esempio n. 3
0
    def __init__(self):
        ''' Method called when object of class is created. '''

        # Initialize Result Topic Consumer
        self.frames_consumer = Consumer({
            'bootstrap.servers': 'localhost:9092',
            'group.id': 'cameras',
            'auto.offset.reset': 'earliest'
        })
        self.frames_consumer.subscribe([frames_topic])

        # Initialize Frames Topic Producer
        self.inference_producer = Producer({
            'bootstrap.servers': 'localhost:9092',
            'message.max.bytes': '10000000'
        })

        # Get options
        self.opt = opts().init()
        self.opt.debug = max(self.opt.debug, 1)

        # Instantiate the Model
        self.detector = CtdetDetector(self.opt)
Esempio n. 4
0
def demo(opt):
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.debug = max(opt.debug, 1)
    detector = CtdetDetector(opt)

    if opt.demo == 'webcam' or \
            opt.demo[opt.demo.rfind('.') + 1:].lower() in video_ext:
        cam = cv2.VideoCapture(0 if opt.demo == 'webcam' else opt.demo)

        detector.pause = False
        while True:
            _, img = cam.read()
            cv2.imshow('input', img)
            ret = detector.run(img)
            time_str = ''
            for stat in time_stats:
                time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
            if cv2.waitKey(1) == 27:
                return  # esc to quit
    else:
        if os.path.isdir(opt.demo):
            image_names = []
            ls = os.listdir(opt.demo)
            for file_name in sorted(ls):
                ext = file_name[file_name.rfind('.') + 1:].lower()
                if ext in image_ext:
                    image_names.append(os.path.join(opt.demo, file_name))
        else:
            image_names = [opt.demo]

        for (image_name) in image_names:
            # ret = detector.run(image_name)
            ret = maven.infer(image_name)
            annotate(image_name, ret['results'])
            time_str = ''
            for stat in time_stats:
                time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
Esempio n. 5
0
class Maven:
    ''' Class to hold methods and variables for Inference. '''
    def __init__(self):
        ''' Method called when object of class is created. '''

        # Get options
        self.opt = opts().init()
        self.opt.debug = max(self.opt.debug, 1)

        # Instantiate the Model
        self.detector = CtdetDetector(self.opt)

    def infer(self, data):
        ''' Method to share inferred knowledge '''
        ret = self.detector.run(data)
        return ret['results']
Esempio n. 6
0
        "nvvidconv flip-method=%d ! "
        "video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
        "videoconvert ! "
        "video/x-raw, format=(string)BGR ! appsink" % (
            capture_width,
            capture_height,
            framerate,
            flip_method,
            display_width,
            display_height,
        ))


if __name__ == '__main__':
    opt = opts().init()
    detector = CtdetDetector(opt)
    imgID = 0
    if opt.demo == 'Rpicam':
        print(gstreamer_pipeline(flip_method=0))
        cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0),
                               cv2.CAP_GSTREAMER)
        if cap.isOpened():
            # Window
            while 1:
                ret_val, img = cap.read()
                cv2.imwrite('{}.jpg'.format(imgID), img)
                imgID = imgID + 1

                keyCode = cv2.waitKey(1) & 0xFF
                if keyCode == 27:
                    break
Esempio n. 7
0
class Zeus:
    ''' Class to hold methods and variables for Inference. '''
    def __init__(self):
        ''' Method called when object of class is created. '''

        # Initialize Result Topic Consumer
        self.frames_consumer = Consumer({
            'bootstrap.servers': 'localhost:9092',
            'group.id': 'cameras',
            'auto.offset.reset': 'earliest'
        })
        self.frames_consumer.subscribe([frames_topic])

        # Initialize Frames Topic Producer
        self.inference_producer = Producer({
            'bootstrap.servers': 'localhost:9092',
            'message.max.bytes': '10000000'
        })

        # Get options
        self.opt = opts().init()
        self.opt.debug = max(self.opt.debug, 1)

        # Instantiate the Model
        self.detector = CtdetDetector(self.opt)

    def delivery_report(self, err, msg):
        ''' Called once for each message produced to indicate delivery result.
            Triggered by poll() or flush(). '''
        if err is not None:
            print('Message delivery failed: {}'.format(err))
        else:
            pass

    def infer(self):
        ''' Method to share inferred knowledge '''

        print('Ready for Inference!')
        start = 0
        while True:
            start = time.time()

            self.inference_producer.poll(0)

            data = self.frames_consumer.poll()
            if data is None:
                time.sleep(0.01)
                continue
            if data.error():
                print("Consumer error: {}".format(data.error()))
                continue

            data = pickle.loads(data.value())
            # Parse the Data
            batch = data['Batch']  # Batch of Images
            batch_len = data['BatchLength']  # Length of Batch
            height = data['ImageHeight']  # Height of Image
            width = data['ImageWidth']  # Width of Image
            channels = data['Channels']  # Color Channels

            # Get batch in NumPy ndarray
            # batch = np.fromstring(batch, np.uint8).reshape((batch_len, height, width, channels))

            # Perform Inference
            results = self.detector.run(batch)
            results = results['results']

            # Cleanse the result

            results_scrubbed = list()
            for result in results.keys():
                classes = list()
                bbox = list()
                confidence = list()
                for cat in range(1, num_classes + 1):
                    for val in results[result][cat]:
                        conf = val[4]
                        if not conf > thresh_conf:
                            continue
                        x1 = val[0]
                        y1 = val[1]
                        x2 = val[2]
                        y2 = val[3]
                        classes.append(cat)
                        confidence.append(conf)
                        bbox.append([x1, y1, x2, y2])
                results_scrubbed.append([classes, confidence, bbox])

            data = dict()
            data.update({'Batch': batch})
            data.update({'BatchLength': batch_len})
            data.update({'ImageHeight': height})
            data.update({'ImageWidth': width})
            data.update({'Channels': channels})
            data.update({'Results': results_scrubbed})

            self.inference_producer.produce(inference_topic,
                                            pickle.dumps(data),
                                            callback=self.delivery_report)
            self.inference_producer.flush()

            print(time.time() - start, end='\r')

        self.frames_consumer.close()