Beispiel #1
0
    def get_activity(self, team_id,
                     filter_user_id=None, filter_project_id=None, filter_job_id=None, filter_actions=None,
                     progress_cb=None, start_date=None, end_date=None):

        from datetime import datetime, timedelta

        filters = []
        if filter_user_id is not None:
            filters.append({"field": ApiField.USER_ID, "operator": "=", "value": filter_user_id})
        if filter_project_id is not None:
            filters.append({"field": ApiField.PROJECT_ID, "operator": "=", "value": filter_project_id})
        if filter_job_id is not None:
            filters.append({"field": ApiField.JOB_ID, "operator": "=", "value": filter_job_id})
        if filter_actions is not None:
            if type(filter_actions) is not list:
                raise TypeError(
                    "type(filter_actions) is {!r}. But has to be of type {!r}".format(type(filter_actions), list))
            filters.append({"field": ApiField.TYPE, "operator": "in", "value": filter_actions})

        def _add_dt_filter(filters, dt, op):
            dt_iso = None
            if dt is None:
                return
            if type(dt) is str:
                dt_iso = dt
            elif type(dt) is datetime:
                dt_iso = dt.isoformat()
            else:
                raise TypeError('DT type must be string in ISO8601 format or datetime, not {}'.format(type(dt)))
            filters.append({"field": ApiField.DATE, "operator": op, "value": dt_iso})

        _add_dt_filter(filters, start_date, ">=")
        _add_dt_filter(filters, end_date, "<=")

        method = 'teams.activity'
        data = {ApiField.TEAM_ID: team_id, ApiField.FILTER: filters}
        first_response = self._api.post(method, data)
        first_response = first_response.json()

        total = first_response['total']
        per_page = first_response['perPage']
        pages_count = first_response['pagesCount']
        results = first_response['entities']

        if progress_cb is not None:
            progress_cb(len(results), total)
        if pages_count == 1 and len(first_response['entities']) == total:
            pass
        else:
            for page_idx in range(2, pages_count + 1):
                temp_resp = self._api.post(method, {**data, 'page': page_idx, 'per_page': per_page})
                temp_items = temp_resp.json()['entities']
                results.extend(temp_items)
                if progress_cb is not None:
                    progress_cb(len(results), total)
            if len(results) != total:
                logger.warn(f"Method '{method}': new events were created during pagination, "
                            f"downloaded={len(results)}, total={total}")

        return results
Beispiel #2
0
    def add_pair(self, ann_gt, ann_pred):
        labels_gt = filter_labels_by_name(ann_gt.labels, self._gt_to_pred_class_mapping)
        all_labels_pred = [label for label in filter_labels_by_name(ann_pred.labels, self._pred_to_gt_class_mapping)]
        labels_pred = []
        for label in all_labels_pred:
            label_confidence = self._get_confidence_value(label)
            if label_confidence is None:
                logger.warn(f'Found a label with class {label.obj_class.name!r} that does not have a '
                            f'{self._confidence_tag_name!r} tag attached. Skipping this object for metric computation.')
            elif label_confidence >= self._confidence_threshold:
                labels_pred.append(label)
        match_result = match_labels_by_iou(labels_1=labels_gt, labels_2=labels_pred, img_size=ann_gt.img_size,
                                           iou_threshold=self._iou_threshold)
        for match in match_result.matches:
            gt_class = match.label_1.obj_class.name
            label_pred = match.label_2
            self._counters[gt_class][MATCHES].append(
                MatchWithConfidence(is_correct=(label_pred.obj_class.name == self._gt_to_pred_class_mapping[gt_class]),
                                    confidence=self._get_confidence_value(label_pred)))
        # Add unmatched predictions to the list as false positive matches.
        for umatched_pred in match_result.unmatched_labels_2:
            gt_class = self._pred_to_gt_class_mapping[umatched_pred.obj_class.name]
            self._counters[gt_class][MATCHES].append(
                MatchWithConfidence(is_correct=False, confidence=self._get_confidence_value(umatched_pred)))

        for label_1 in labels_gt:
            self._counters[label_1.obj_class.name][TOTAL_GROUND_TRUTH] += 1
Beispiel #3
0
    def _upload_data_bulk(self,
                          func_item_to_byte_stream,
                          items_hashes,
                          retry_cnt=3,
                          progress_cb=None,
                          item_progress=None):
        hash_to_items = {i_hash: item for item, i_hash in items_hashes}

        unique_hashes = set(hash_to_items.keys())
        remote_hashes = set(self.check_existing_hashes(
            list(unique_hashes)))  # existing -- from server
        if progress_cb:
            progress_cb(len(remote_hashes))
        #pending_hashes = unique_hashes #- remote_hashes #@TODO: only fo debug!
        pending_hashes = unique_hashes - remote_hashes

        for retry_idx in range(retry_cnt):
            # single attempt to upload all data which is not uploaded yet
            for hashes in batched(list(pending_hashes)):
                pending_hashes_items = [(h, hash_to_items[h]) for h in hashes]
                hashes_rcv = self._upload_uniq_videos_single_req(
                    func_item_to_byte_stream, pending_hashes_items,
                    item_progress)
                pending_hashes -= set(hashes_rcv)
                if set(hashes_rcv) - set(hashes):
                    logger.warn('Hash inconsistency in images bulk upload.',
                                extra={
                                    'sent': hashes,
                                    'received': hashes_rcv
                                })
                if progress_cb:
                    progress_cb(len(hashes_rcv))

            if not pending_hashes:
                return

            logger.warn('Unable to upload videos (data).',
                        extra={
                            'retry_idx':
                            retry_idx,
                            'items':
                            [(h, hash_to_items[h]) for h in pending_hashes]
                        })
            # now retry it for the case if it is a shadow server/connection error

        raise RuntimeError(
            "Unable to upload videos (data). "
            "Please check if videos are in supported format and if ones aren't corrupted."
        )
Beispiel #4
0
def get_data_dir():
    key = "SLY_APP_DATA_DIR"
    dir = None

    try:
        dir = os.environ[key]
    except KeyError as e:
        raise KeyError(f"Environment variable {key} is not defined")

    if dir_exists(dir) is False:
        logger.warn(
            f"App data directory {dir} doesn't exist. Will be made automatically."
        )
        mkdir(dir)
    return dir
Beispiel #5
0
    def crop(self, rect):
        '''
        Crop the current Polygon with a given rectangle, if polygon cat't be cropped it generate exception error
        :param rect: Rectangle class object
        :return: list of Poligon class objects
        '''
        from supervisely.geometry.point_location import PointLocation
        try:
            # points = [
            #     PointLocation(row=rect.top, col=rect.left),
            #     PointLocation(row=rect.top, col=rect.right + 1),
            #     PointLocation(row=rect.bottom + 1, col=rect.right + 1),
            #     PointLocation(row=rect.bottom + 1, col=rect.left)
            # ]
            points = [
                PointLocation(row=rect.top, col=rect.left),
                PointLocation(row=rect.top, col=rect.right),
                PointLocation(row=rect.bottom, col=rect.right),
                PointLocation(row=rect.bottom, col=rect.left)
            ]
            #points = rect.corners # old implementation with 1 pixel error (right bottom)
            # #@TODO: investigate here (critical issue)

            clipping_window_shpl = ShapelyPolygon(points_to_row_col_list(points))
            self_shpl = ShapelyPolygon(self.exterior_np, holes=self.interior_np)
            intersections_shpl = self_shpl.buffer(0).intersection(clipping_window_shpl)
            mapping_shpl = mapping(intersections_shpl)
        except Exception:
            logger.warn('Polygon cropping exception, shapely.', exc_info=True)
            # raise
            # if polygon is invalid, just print warning and skip it
            # @TODO: need more investigation here
            return []

        intersections = shapely_figure_to_coords_list(mapping_shpl)

        # Check for bad cropping cases (e.g. empty points list)
        out_polygons = []
        for intersection in intersections:
            if isinstance(intersection, list) and len(intersection) > 0 and len(intersection[0]) >= 3:
                exterior = row_col_list_to_points(intersection[0], do_round=True)
                interiors = []
                for interior_contour in intersection[1:]:
                    if len(interior_contour) > 2:
                        interiors.append(row_col_list_to_points(interior_contour, do_round=True))
                out_polygons.append(Polygon(exterior, interiors))
        return out_polygons
Beispiel #6
0
    def _upload_data_bulk(self, func_item_to_byte_stream, items_hashes, retry_cnt=3, progress_cb=None):
        """
        Upload images (binary data) to server. Works with already existing or duplicating images.
        :param func_item_to_byte_stream: converter for "item" to byte stream
        :param items_hashes: iterable of pairs (item, hash) where "item" is a some descriptor (e.g. image file path)
         for image data, and "hash" is a hash for the image binary data
        :param retry_cnt: int, number of retries to send the whole set of items
        :param progress_cb: callback to account progress (in number of items)
        """
        hash_to_items = {i_hash: item for item, i_hash in items_hashes}

        unique_hashes = set(hash_to_items.keys())
        remote_hashes = set(self.check_existing_hashes(list(unique_hashes)))  # existing -- from server
        if progress_cb:
            progress_cb(len(remote_hashes))
        pending_hashes = unique_hashes - remote_hashes

        # @TODO: some correlation with sly.io.network_exceptions. Should we perform retries here?
        for retry_idx in range(retry_cnt):
            # single attempt to upload all data which is not uploaded yet

            for hashes in batched(list(pending_hashes)):
                pending_hashes_items = [(h, hash_to_items[h]) for h in hashes]
                hashes_rcv = self._upload_uniq_images_single_req(func_item_to_byte_stream, pending_hashes_items)
                pending_hashes -= set(hashes_rcv)
                if set(hashes_rcv) - set(hashes):
                    logger.warn('Hash inconsistency in images bulk upload.',
                                extra={'sent': hashes, 'received': hashes_rcv})
                if progress_cb:
                    progress_cb(len(hashes_rcv))

            if not pending_hashes:
                return

            logger.warn('Unable to upload images (data).', extra={
                'retry_idx': retry_idx,
                'items': [(h, hash_to_items[h]) for h in pending_hashes]
            })
            # now retry it for the case if it is a shadow server/connection error

        raise RuntimeError("Unable to upload images (data). "
                           "Please check if images are in supported format and if ones aren't corrupted.")
Beispiel #7
0
    def __init__(self, exterior, interior,
                 sly_id=None, class_id=None, labeler_login=None, updated_at=None, created_at=None):
        '''
        :param exterior: list of PointLocation objects, the object contour is defined with these points
        :param interior: list of elements that has the same structure like the "exterior" field. This is the list of polygons that define object holes.
        '''
        if len(exterior) < 3:
            exterior.extend([exterior[-1]] * (3 - len(exterior)))
            logger.warn('"{}" field must contain at least 3 points to create "Polygon" object.'.format(EXTERIOR))
            #raise ValueError('"{}" field must contain at least 3 points to create "Polygon" object.'.format(EXTERIOR))

        for element in interior:
            if len(element) < 3:
                logger.warn('"{}" interior field must contain at least 3 points to create "Polygon" object.'.format(element))
                element.extend([element[-1]] * (3 - len(element)))
        #if any(len(element) < 3 for element in interior):
        #    raise ValueError('"{}" element must contain at least 3 points.'.format(INTERIOR))

        super().__init__(exterior, interior, sly_id=sly_id, class_id=class_id, labeler_login=labeler_login,
                         updated_at=updated_at, created_at=created_at)
Beispiel #8
0
def clean_dir(dir_: str, ignore_errors=True):
    """
    Recursively delete a directory tree, but save root directory.
    Args:
        dir_: Target directory path.
    """
    # old implementation
    #shutil.rmtree(dir_, ignore_errors=True)
    #mkdir(dir_)

    for filename in os.listdir(dir_):
        file_path = os.path.join(dir_, filename)
        try:
            if os.path.isfile(file_path) or os.path.islink(file_path):
                os.unlink(file_path)
            elif os.path.isdir(file_path):
                shutil.rmtree(file_path)
        except Exception as e:
            logger.warn(f"Failed to delete {file_path}. Reason: {repr(e)}")
            if ignore_errors is False:
                raise e
Beispiel #9
0
    def _upload_uniq_images_single_req(self, func_item_to_byte_stream, hashes_items_to_upload):
        """
        Upload images (binary data) to server with single request.
        Expects unique images that aren't exist at server.
        :param func_item_to_byte_stream: converter for "item" to byte stream
        :param hashes_items_to_upload: list of pairs (hash, item)
        :return: list of hashes for successfully uploaded items
        """
        content_dict = {}
        for idx, (_, item) in enumerate(hashes_items_to_upload):
            content_dict["{}-file".format(idx)] = (str(idx), func_item_to_byte_stream(item), 'image/*')
        encoder = MultipartEncoder(fields=content_dict)
        resp = self._api.post('images.bulk.upload', encoder)

        resp_list = json.loads(resp.text)
        remote_hashes = [d['hash'] for d in resp_list if 'hash' in d]
        if len(remote_hashes) != len(hashes_items_to_upload):
            problem_items = [(hsh, item, resp['errors'])
                             for (hsh, item), resp in zip(hashes_items_to_upload, resp_list) if resp.get('errors')]
            logger.warn('Not all images were uploaded within request.', extra={
                'total_cnt': len(hashes_items_to_upload), 'ok_cnt': len(remote_hashes), 'items': problem_items})
        return remote_hashes
Beispiel #10
0
    def _upload_uniq_videos_single_req(self,
                                       func_item_to_byte_stream,
                                       hashes_items_to_upload,
                                       progress_cb=None):
        content_dict = {}
        for idx, (_, item) in enumerate(hashes_items_to_upload):
            content_dict["{}-file".format(idx)] = (
                str(idx), func_item_to_byte_stream(item), 'video/*')
        encoder = MultipartEncoder(fields=content_dict)

        if progress_cb is not None:

            def _callback(monitor, progress):
                progress(monitor.bytes_read)

            callback = partial(_callback, progress=progress_cb)
            monitor = MultipartEncoderMonitor(encoder, callback)
            resp = self._api.post('videos.bulk.upload', monitor)
        else:
            resp = self._api.post('videos.bulk.upload', encoder)

        resp_list = json.loads(resp.text)
        remote_hashes = [d['hash'] for d in resp_list if 'hash' in d]
        if len(remote_hashes) != len(hashes_items_to_upload):
            problem_items = [
                (hsh, item, resp['errors'])
                for (hsh, item), resp in zip(hashes_items_to_upload, resp_list)
                if resp.get('errors')
            ]
            logger.warn('Not all images were uploaded within request.',
                        extra={
                            'total_cnt': len(hashes_items_to_upload),
                            'ok_cnt': len(remote_hashes),
                            'items': problem_items
                        })
        return remote_hashes