Ejemplo n.º 1
0
def convert_single_core(proc_id,
                        image_set,
                        categories,
                        source_folder,
                        segmentations_folder,
                        VOID=0):
    annotations = []
    for working_idx, image_info in enumerate(image_set):
        if working_idx % 100 == 0:
            print('Core: {}, {} from {} images converted'.format(
                proc_id, working_idx, len(image_set)))

        file_name = '{}.png'.format(image_info['file_name'].rsplit('.')[0])
        try:
            original_format = np.array(Image.open(
                os.path.join(source_folder, file_name)),
                                       dtype=np.uint32)
        except IOError:
            raise KeyError('no prediction png file for id: {}'.format(
                image_info['id']))

        pan = OFFSET * original_format[:, :, 0] + original_format[:, :, 1]
        pan_format = np.zeros(
            (original_format.shape[0], original_format.shape[1], 3),
            dtype=np.uint8)

        id_generator = IdGenerator(categories)

        l = np.unique(pan)
        segm_info = []
        for el in l:
            sem = el // OFFSET
            if sem == VOID:
                continue
            if sem not in categories:
                raise KeyError('Unknown semantic label {}'.format(sem))
            mask = pan == el
            segment_id, color = id_generator.get_id_and_color(sem)
            pan_format[mask] = color
            segm_info.append({"id": int(segment_id), "category_id": int(sem)})

        annotations.append({
            'image_id': image_info['id'],
            'file_name': file_name,
            "segments_info": segm_info
        })

        Image.fromarray(pan_format).save(
            os.path.join(segmentations_folder, file_name))
    print('Core: {}, all {} images processed'.format(proc_id, len(image_set)))
    return annotations
Ejemplo n.º 2
0
def convert_detection_to_panoptic_coco_format_single_core(
        proc_id, coco_detection, img_ids, categories, segmentations_folder):
    id_generator = IdGenerator(categories)

    annotations_panoptic = []
    for working_idx, img_id in enumerate(img_ids):
        if working_idx % 100 == 0:
            print('Core: {}, {} from {} images processed'.format(
                proc_id, working_idx, len(img_ids)))
        img = coco_detection.loadImgs(int(img_id))[0]
        pan_format = np.zeros((img['height'], img['width'], 3), dtype=np.uint8)
        overlaps_map = np.zeros((img['height'], img['width']), dtype=np.uint32)

        anns_ids = coco_detection.getAnnIds(img_id)
        anns = coco_detection.loadAnns(anns_ids)

        panoptic_record = {}
        panoptic_record['image_id'] = img_id
        file_name = '{}.png'.format(img['file_name'].rsplit('.')[0])
        panoptic_record['file_name'] = file_name
        segments_info = []
        for ann in anns:
            if ann['category_id'] not in categories:
                raise Exception(
                    'Panoptic coco categories file does not contain \
                    category with id: {}'.format(ann['category_id']))
            segment_id, color = id_generator.get_id_and_color(
                ann['category_id'])
            mask = coco_detection.annToMask(ann)
            overlaps_map += mask
            pan_format[mask == 1] = color
            ann.pop('segmentation')
            ann.pop('image_id')
            ann['id'] = segment_id
            segments_info.append(ann)

        if np.sum(overlaps_map > 1) != 0:
            raise Exception(
                "Segments for image {} overlap each other.".format(img_id))
        panoptic_record['segments_info'] = segments_info
        annotations_panoptic.append(panoptic_record)

        Image.fromarray(pan_format).save(
            os.path.join(segmentations_folder, file_name))

    print('Core: {}, all {} images processed'.format(proc_id, len(img_ids)))
    return annotations_panoptic
Ejemplo n.º 3
0
    def __init__(self):
        self._buffer = bytearray()
        self.state = self.IDLE

        self.idGenerator = IdGenerator()
Ejemplo n.º 4
0
class MQTTProtocol(Protocol):
    worker = None

    IDLE        = 0
    CONNECTING  = 1
    CONNECTED   = 2

    def __init__(self):
        self._buffer = bytearray()
        self.state = self.IDLE

        self.idGenerator = IdGenerator()

    def connect(self, worker):
        print("INFO: Connecting Protocol")

        self.worker = worker
        self.state = self.CONNECTING

        msg = Connect(self.worker.clientId,
                      self.worker.version,
                      username=self.worker.username,
                      password=self.worker.appKey)

        self.transport.write(msg.pack())

    def joined(self):
        d = self.worker.joined()

    def dataReceived(self, data):
        print("************ Data Received ***************", data)
        self._buffer.extend(data)

        length = None

        while len(self._buffer):
            if length is None:
                # Start on a new packet

                # Haven't got enough data to start a new packet, wait for some more
                if len(self._buffer) < 2:
                    break

                lenLen = 1
                # Calculate the length of the length field
                while lenLen < len(self._buffer):
                    if not self._buffer[lenLen] & 0x80:
                        break
                    lenLen += 1

                # We still haven't got all of the remaining length field
                if lenLen < len(self._buffer) and self._buffer[lenLen] & 0x80:
                    return

                length = decodeLength(self._buffer[1:])

            if len(self._buffer) >= length + lenLen + 1:
                chunk = self._buffer[:length + lenLen + 1]
                self._processPacket(chunk)
                self._buffer = self._buffer[length + lenLen + 1:]
                length = None

            else:
                break

    def _processPacket(self, packet):
        """
        Generic MQTT packet decoder
        """
        packet_type = (packet[0] & 0xF0) >> 4
        packet_flags = (packet[0] & 0x0F)

        if packet_type == CONNECT:
            self._handleConnect(packet)
        elif packet_type == CONNACK:
            self._handleConnack(packet)
        elif packet_type == PUBLISH:
            self._handlePublish(packet)
        elif packet_type == PUBACK:
            self._handlePuback(packet)
        elif packet_type == PUBREC:
            self._handlePubrec(packet)
        elif packet_type == PUBREL:
            self._handlePubrel(packet)
        elif packet_type == PUBCOMP:
            self._handlePubcomp(packet)
        elif packet_type == SUBSCRIBE:
            self._handleSubscribe(packet)
        elif packet_type == SUBACK:
            self._handleSuback(packet)
        elif packet_type == UNSUBSCRIBE:
            self._handleUnsubscribe(packet)
        elif packet_type == UNSUBACK:
            self._handleUnsuback(packet)
        elif packet_type == PINGREQ:
            self._handlePingreq(packet)
        elif packet_type == PINGRESP:
            self._handlePingresp(packet)
        elif packet_type == DISCONNECT:
            self._handleDisconnect(packet)
        else:
            print("ERROR: Invalid Packet Type: %s -- Aborting Connection" %(packet_type))
            self.transport.abortConnection()

    def _handleConnect(self, packet):
        print("DEBUG: Received CONNECT")

    def _handleConnack(self, packet):
        print("DEBUG: Received CONNACK")
        res = Connack.unpack(packet)
        if res.resultCode == 0:
            self.state = self.CONNECTED
            # XXX To Do implement keepAlive
            self.joined()
        else:
            self.state = self.IDLE
            print("ERROR: Connection Refused -- Aborting Connection")
            self.transport.abortConnection()

    def _handlePublish(self, packet):
        print("DEBUG: Received PUBLISH")
        res = Publish.unpack(packet)
        func = self.worker.getTopic(res.topic)
        if func:
            func(res.payload)

    def _handlePuback(self, packet):
        print("DEBUG: Received PUBACK")

    def _handlePubrec(self, packet):
        print("DEBUG: Received PUBREC")

    def _handlePubrel(self, packet):
        print("DEBUG: Received PUBREL")

    def _handlePubcomp(self, packet):
        print("DEBUG: Received PUBCOMP")

    def _handleSubscribe(self, packet):
        print("DEBUG: Received SUBSCRIBE")

    def _handleSuback(self, packet):
        print("DEBUG: Received SUBACK")
        res = Suback.unpack(packet)
        d = self.worker.getSubscribeRequest(res._id, remove=True)
        if d:
            d.callback(res.subscribed)

    def _handleUnsubscribe(self, packet):
        print("DEBUG: Received UNSUBSCRIBE")

    def _handleUnsuback(self, packet):
        print("DEBUG: Received UNSUBACK")

    def _handlePingreq(self, packet):
        print("DEBUG: Received PINGREQ")

    def _handlePingresp(self, packet):
        print("DEBUG: Received PINGRESP")

    def _handleDisconnect(self, packet):
        print("DEBUG: Received DISCONNECT")

    def subscribe(self, topic, function, qos=0):
        print("DEBUG: Subscribing to topic %s"%(topic))

        # XXX Check if number of in fligth suscribe is not > than window
        # if len(self.factory.windowSubscribe[self.addr]) == self._window:
        #     raise MQTTWindowError("subscription requests exceeded limit", self._window)

        if not ( 0<= qos < 3):
            raise Exception("Invalid QOS")

        # XXX To do Add time out check.
        _id = self.idGenerator.next()
        msg = Subscribe(_id=_id, topics=[(topic, qos)])
        d = Deferred()

        self.worker.addSubscribeRequest(msg, d)
        self.worker.addTopic(topic, function)
        self.transport.write(msg.pack())

        return d

    def publish(self, topic, message, qos=0, retain=False):

        if not ( 0<= qos < 3):
            raise Exception("Invalid QOS")

        _id = self.idGenerator.next()
        msg = Publish(_id=_id, topic=topic, payload=message, qos=qos, retain=retain, dup=False)

        if msg.qos == QOS_0:
            d = succeed(None)
        else:
            d = Deferred()
            # XXX To DO: Add timer to check timeout
            self.worker.addPublishRequest(msg, d)

        self.transport.write(msg.pack())
        return d
def panoptic_converter(original_format_folder, out_folder, out_file):

    if not os.path.isdir(out_folder):
        print("Creating folder {} for panoptic segmentation PNGs".format(
            out_folder))
        os.mkdir(out_folder)

    categories = []
    for idx, el in enumerate(labels):
        if el.ignoreInEval:
            continue
        categories.append({
            'id': el.id,
            'name': el.name,
            'color': el.color,
            'supercategory': el.category,
            'isthing': 1 if el.hasInstances else 0
        })

    categories_dict = {cat['id']: cat for cat in categories}

    file_list = sorted(
        glob.glob(
            os.path.join(original_format_folder,
                         '*/*_gtFine_instanceIds.png')))

    images = []
    annotations = []
    for working_idx, f in enumerate(file_list):
        if working_idx % 10 == 0:
            print(working_idx, len(file_list))

        original_format = np.array(Image.open(f))

        file_name = f.split('/')[-1]
        image_id = file_name.rsplit('_', 2)[0]
        image_filename = '{}_leftImg8bit.png'.format(image_id)
        # image entry, id for image is its filename without extension
        images.append({
            "id": image_id,
            "width": original_format.shape[1],
            "height": original_format.shape[0],
            "file_name": image_filename
        })

        pan_format = np.zeros(
            (original_format.shape[0], original_format.shape[1], 3),
            dtype=np.uint8)
        id_generator = IdGenerator(categories_dict)

        idx = 0
        l = np.unique(original_format)
        segm_info = []
        for el in l:
            if el < 1000:
                semantic_id = el
                is_crowd = 1
            else:
                semantic_id = el // 1000
                is_crowd = 0
            if semantic_id not in categories_dict:
                continue
            if categories_dict[semantic_id]['isthing'] == 0:
                is_crowd = 0
            mask = original_format == el
            segment_id, color = id_generator.get_id_and_color(semantic_id)
            pan_format[mask] = color

            area = np.sum(mask)  # segment area computation

            # bbox computation for a segment
            hor = np.sum(mask, axis=0)
            hor_idx = np.nonzero(hor)[0]
            x = hor_idx[0]
            width = hor_idx[-1] - x + 1
            vert = np.sum(mask, axis=1)
            vert_idx = np.nonzero(vert)[0]
            y = vert_idx[0]
            height = vert_idx[-1] - y + 1
            bbox = [x, y, width, height]

            segm_info.append({
                "id": int(segment_id),
                "category_id": int(semantic_id),
                "area": area,
                "bbox": bbox,
                "iscrowd": is_crowd
            })

        annotations.append({
            'image_id': image_id,
            'file_name': file_name,
            "segments_info": segm_info
        })

        Image.fromarray(pan_format).save(os.path.join(out_folder, file_name))

    d = {
        'images': images,
        'annotations': annotations,
        'categories': categories,
    }

    with open(out_file, 'w') as f:
        json.dump(d, f)
Ejemplo n.º 6
0
def main(image_id):
    # whether from the PNG are used or new colors are generated
    generate_new_colors = True

    json_file = 'panoptic_val2017.json'
    segmentations_folder = './panoptic_val2017/'
    pred_folder = './panoptic_pred2017/'
    img_folder = '/Users/wuyangxin/Desktop/cv/dataset/coco/val2017'
    panoptic_coco_categories = './panoptic_coco_categories.json'

    with open(json_file, 'r') as f:
        coco_d = json.load(f)

    # ann = np.random.choice(coco_d['annotations'])
    ann = None
    for a in coco_d['annotations']:
        if a['image_id'] == image_id:
            ann = a
            break

    with open(panoptic_coco_categories, 'r') as f:
        categories_list = json.load(f)
    categegories = {category['id']: category for category in categories_list}

    # find input img that correspond to the annotation
    img = None
    pred_img = None
    for image_info in coco_d['images']:
        if image_info['id'] == ann['image_id']:
            try:
                img = np.array(
                    Image.open(
                        os.path.join(img_folder, image_info['file_name'])))
                pred_img = Image.open(
                    os.path.join(pred_folder,
                                 image_info['file_name'].replace('jpg',
                                                                 'png')))
            except:
                print("Undable to find correspoding input image.")
            break

    segmentation = np.array(Image.open(
        os.path.join(segmentations_folder, ann['file_name'])),
                            dtype=np.uint8)
    segmentation_id = rgb2id(segmentation)
    # find segments boundaries
    boundaries = find_boundaries(segmentation_id, mode='thick')

    if generate_new_colors:
        segmentation[:, :, :] = 0
        color_generator = IdGenerator(categegories)
        for segment_info in ann['segments_info']:
            color = color_generator.get_color(segment_info['category_id'])
            mask = segmentation_id == segment_info['id']
            segmentation[mask] = color

    # depict boundaries
    # segmentation[boundaries] = [0, 0, 0]

    if img is None:
        plt.figure()
        plt.imshow(segmentation)
        plt.axis('off')
    else:
        plt.figure(figsize=(9, 5))
        plt.subplot(131)
        plt.imshow(img)
        plt.axis('off')
        plt.subplot(132)
        plt.imshow(segmentation)
        plt.axis('off')
        plt.subplot(133)
        plt.imshow(pred_img)
        plt.axis('off')
        plt.tight_layout()
    plt.show()
Ejemplo n.º 7
0
                    Image.open(
                        os.path.join(img_folder, image_info['file_name'])))
            except:
                print("Undable to find correspoding input image.")
            break

    segmentation = np.array(Image.open(
        os.path.join(segmentations_folder, ann['file_name'])),
                            dtype=np.uint8)
    segmentation_id = rgb2id(segmentation)
    # find segments boundaries
    boundaries = find_boundaries(segmentation_id, mode='thick')

    if generate_new_colors:
        segmentation[:, :, :] = 0
        color_generator = IdGenerator(categegories)
        for segment_info in ann['segments_info']:
            color = color_generator.get_color(segment_info['category_id'])
            mask = segmentation_id == segment_info['id']
            segmentation[mask] = color

    # depict boundaries
    segmentation[boundaries] = [0, 0, 0]

    if img is None:
        plt.figure()
        plt.imshow(segmentation)
        plt.axis('off')
    else:
        plt.figure(figsize=(20, 10))
        plt.subplot(121)
def combine_to_panoptic_single_core(proc_id, img_ids, img_id2img,
                                    inst_by_image, sem_by_image,
                                    segmentations_folder, overlap_thr,
                                    stuff_area_limit, categories):
    panoptic_json = []
    id_generator = IdGenerator(categories)

    for idx, img_id in enumerate(img_ids):
        img = img_id2img[img_id]

        if idx % 100 == 0:
            print('Core: {}, {} from {} images processed.'.format(
                proc_id, idx, len(img_ids)))

        pan_segm_id = np.zeros((img['height'], img['width']), dtype=np.uint32)
        used = None
        annotation = {}
        try:
            annotation['image_id'] = int(img_id)
        except Exception:
            annotation['image_id'] = img_id

        annotation['file_name'] = img['file_name'].replace('.jpg', '.png')

        segments_info = []
        for ann in inst_by_image[img_id]:
            area = COCOmask.area(ann['segmentation'])
            if area == 0:
                continue
            if used is None:
                intersect = 0
                used = copy.deepcopy(ann['segmentation'])
            else:
                intersect = COCOmask.area(
                    COCOmask.merge([used, ann['segmentation']],
                                   intersect=True))
            if intersect / area > overlap_thr:
                continue
            used = COCOmask.merge([used, ann['segmentation']], intersect=False)

            mask = COCOmask.decode(ann['segmentation']) == 1
            if intersect != 0:
                mask = np.logical_and(pan_segm_id == 0, mask)
            segment_id = id_generator.get_id(ann['category_id'])
            panoptic_ann = {}
            panoptic_ann['id'] = segment_id
            panoptic_ann['category_id'] = ann['category_id']
            pan_segm_id[mask] = segment_id
            segments_info.append(panoptic_ann)

        for ann in sem_by_image[img_id]:
            mask = COCOmask.decode(ann['segmentation']) == 1
            mask_left = np.logical_and(pan_segm_id == 0, mask)
            if mask_left.sum() < stuff_area_limit:
                continue
            segment_id = id_generator.get_id(ann['category_id'])
            panoptic_ann = {}
            panoptic_ann['id'] = segment_id
            panoptic_ann['category_id'] = ann['category_id']
            pan_segm_id[mask_left] = segment_id
            segments_info.append(panoptic_ann)

        annotation['segments_info'] = segments_info
        panoptic_json.append(annotation)

        Image.fromarray(id2rgb(pan_segm_id)).save(
            os.path.join(segmentations_folder, annotation['file_name']))

    return panoptic_json