Пример #1
0
    def predict(self, image: np.ndarray) -> List[Object2D]:  # pylint:disable=too-many-locals
        """Predict the required bounding boxes on the given <image>."""
        # TODO: implement non-letterbox resizing inference
        image, ratio, d_w, d_h = letterbox_resize(image,
                                                  args.img_size[0],
                                                  args.img_size[1],
                                                  interp=1)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
        image = image / 255.0

        y_pred = self._session.run(self._y_pred,
                                   feed_dict={
                                       self._is_training: False,
                                       self._image: np.array([image])
                                   })
        pred_content = get_preds_gpu(self._session, self._gpu_nms_op,
                                     self._pred_boxes_flag,
                                     self._pred_scores_flag, [0], y_pred)

        predictions = []
        for pred in pred_content:
            _, x_min, y_min, x_max, y_max, score, label = pred
            x_min = int((x_min - d_w) / ratio)
            x_max = int((x_max - d_w) / ratio)
            y_min = int((y_min - d_h) / ratio)
            y_max = int((y_max - d_h) / ratio)

            predictions.append(
                Object2D(Bounds2D(x_min, y_min, x_max - x_min, y_max - y_min),
                         label, score))
        return predictions
Пример #2
0
    def predict(self, image: np.ndarray) -> List[Object2D]:
        """Predict the required bounding boxes on the given <image>."""
        # TODO: resize / other manipulations
        boxes, classes, scores = filter_batch(
            self.__squeeze.model.predict([image]), self.__squeeze.config)

        print(boxes, classes, scores)
        return [Object2D(Bounds2D(0, 0, 0, 0), class_index=0, score=0)]
Пример #3
0
def _lisa(path: str) -> Dataset:  # pylint:disable=too-many-locals,too-many-branches
    images: Dataset.Images = []
    classes: Dataset.Classes = []
    annotations: Dataset.Annotations = {}

    day_train_path = os.path.join(path, "dayTrain")
    if not os.path.isdir(day_train_path):
        raise FileNotFoundError("Could not find `dayTrain` in LISA dataset.")

    for file_name in os.listdir(day_train_path):
        if not file_name.startswith("dayClip"):
            continue

        clip_path = os.path.join(day_train_path, file_name)
        frames_path = os.path.join(clip_path, "frames")
        annotations_path = os.path.join(clip_path, "frameAnnotationsBOX.csv")
        if not os.path.exists(frames_path):
            raise FileNotFoundError(f"Could not find frames folder {frames_path}.")
        if not os.path.exists(annotations_path):
            raise FileNotFoundError(f"Could not find annotations file {annotations_path}")

        # Read annotations
        with open(annotations_path, "r") as annotations_file:
            reader = csv.reader(annotations_file, delimiter=";")
            for i, row in enumerate(reader):
                # Skip the first row, it is just headers
                if i == 0:
                    continue

                image_name = row[0].split("/")[-1]
                image_path = os.path.join(frames_path, image_name)

                detection_class = row[1]

                # Calculate the position and dimensions of the bounding box
                x_min = int(row[2])  # x-coordinate of top left corner
                y_min = int(row[3])  # y-coordinate of top left corner
                x_max = int(row[4])  # x-coordinate of bottom right corner
                y_max = int(row[5])  # y-coordinate of bottom right corner

                # Get the class index if it has already been registered
                # otherwise register it and select the index
                try:
                    class_index = classes.index(detection_class)
                except ValueError:
                    class_index = len(classes)
                    classes.append(detection_class)

                # Package the detection
                images.append(image_path)
                if image_path not in annotations:
                    annotations[image_path] = []
                annotations[image_path].append(
                    Object2D(Bounds2D(x_min, y_min, x_max - x_min, y_max - y_min), class_index))

    return Dataset(LIGHT_DATASET_NAME, images, classes, annotations)
Пример #4
0
def rotate_image(image, annots):
    def rotate_point(point, center, angle_rad):
        x, y = point
        x0, y0 = center
        x1 = x0 + (x - x0) * math.cos(angle_rad) + (y -
                                                    y0) * math.sin(angle_rad)
        y1 = y0 - (x - x0) * math.sin(angle_rad) + (y -
                                                    y0) * math.cos(angle_rad)
        return (x1, y1)

    def maxOf4(a, b, c, d):
        return max(max(a, b), max(c, d))

    def minOf4(a, b, c, d):
        return min(min(a, b), min(c, d))

    if len(annots) == 0:
        # If no annotations, then dont do anything
        return image, annots

    # Rotate the image
    angle = random.randint(-MAX_ROTATION_ANGLE_DEGREE,
                           MAX_ROTATION_ANGLE_DEGREE + 1)
    image_center = tuple(np.array(image.shape[1::-1]) / 2)
    rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
    rotated_img = cv2.warpAffine(image,
                                 rot_mat,
                                 image.shape[1::-1],
                                 flags=cv2.INTER_LINEAR)

    # Rotate the annotation
    x0, y0 = image_center
    angle_rad = math.radians(angle)
    for annot in annots:
        x1, y1 = annot.bounds.left, annot.bounds.top
        x2, y2 = annot.bounds.right, annot.bounds.bottom

        # Rotate each corner of the bounding box
        left_top_x, left_top_y = rotate_point((x1, y1), (x0, y0), angle_rad)
        left_bot_x, left_bot_y = rotate_point((x1, y2), (x0, y0), angle_rad)
        right_top_x, right_top_y = rotate_point((x2, y1), (x0, y0), angle_rad)
        right_bot_x, right_bot_y = rotate_point((x2, y2), (x0, y0), angle_rad)

        # Create new bounding box
        annot_max_x = maxOf4(left_top_x, left_bot_x, right_top_x, right_bot_x)
        annot_max_y = maxOf4(left_top_y, left_bot_y, right_top_y, right_bot_y)
        annot_min_x = minOf4(left_top_x, left_bot_x, right_top_x, right_bot_x)
        annot_min_y = minOf4(left_top_y, left_bot_y, right_top_y, right_bot_y)

        width = annot_max_x - annot_min_x
        height = annot_max_y - annot_min_y

        annot.bounds = Bounds2D(math.ceil(annot_min_x), math.ceil(annot_min_y),
                                math.ceil(width), math.ceil(height))

    return rotated_img, annots
Пример #5
0
def _Y4Signs(path: str) -> Dataset:

    if not os.path.isdir(path):
        raise FileNotFoundError("Could not find Y4Signs dataset on this path.")

    images: Dataset.Images = []
    classes: Dataset.Classes = []
    annotations: Dataset.Annotations = {}

    # list of classes [str] in set01/obj.names
    classes = open(os.path.join(path, "set01",
                                "obj.names")).read().splitlines()

    for set_ in os.listdir(path):
        if os.path.isdir(os.path.join(path, set_, "train.txt")):
            raise FileNotFoundError("train.txt does not exist in " + set_)

        f = open(os.path.join(path, set_, "train.txt"))
        #list of all absolute paths from train.txt. extra /data out
        for rel_image_path in f.read().split():
            # absolute image path to the image.
            abs_image_path = os.path.join(path, set_, rel_image_path[4:])

            # append the image path to list of images
            images.append(abs_image_path)

            # annotations file for corresponding image
            annotation_file_path = abs_image_path[:-4] + ".txt"
            annotation_file = open(annotation_file_path)

            temp_annotations = []

            # read the annotations file and loop through each line.
            for bounding_box in annotation_file.read().splitlines():
                # split each line with spaces to get the [class, x1, y1, x2, y2]
                annotation_arr = bounding_box.strip().split(" ")

                # check if legal
                if not len(annotation_arr) == 5:
                    raise InvalidBoundingBoxError(annotation_file_path)

                # Construct and append an Object2D object
                temp_annotations.append(
                    Object2D(
                        Bounds2D(
                            float(annotation_arr[1]),
                            float(annotation_arr[2]),
                            float(annotation_arr[3]),
                            float(annotation_arr[4]),
                        ), int(annotation_arr[0])))

            # exit for loop and added (abs_image_path, temp_annotation: list) key value pair.
            annotations[abs_image_path] = temp_annotations

    return Dataset(DATASET_NAME, images, classes, annotations)
Пример #6
0
 def predict(self, image: np.ndarray) -> List[Object2D]:
     """Predict the required bounding boxes on the given <image>."""
     predictions: List[Tuple[int, int, int,
                             int]] = self.__cascade.detectMultiScale(
                                 cv2.cvtColor(image, cv2.COLOR_RGB2GRAY),
                                 self.settings.scale_factor,
                                 self.settings.min_neighbours)
     return [
         Object2D(Bounds2D(*prediction), self.settings.class_index)
         for prediction in predictions
     ]
Пример #7
0
def _jaad_dataset(path: str) -> Dataset:  # pylint:disable=too-many-locals, too-many-branches
    images: Dataset.Images = []
    classes: Dataset.Classes = ['pedestrian', 'ped', 'people']
    annotations: Dataset.Annotations = {}

    data_params = {
        'fstride': 1,
        'sample_type': 'all',
        'subset':
        'all_videos',  # 'high_visibility' (high + low res, high vis), 'default' (high res, high vis only)
        'data_split_type': 'default',
        'height_rng': [0, float('inf')],
        'squarify_ratio': 0
    }

    jaad = JAAD(data_path=path)
    jaad_anns = jaad.generate_database()

    # get all video ids
    train_ids, _ = jaad.get_data_ids('train', data_params)
    val_ids, _ = jaad.get_data_ids('val', data_params)
    test_ids, _ = jaad.get_data_ids('test', data_params)
    video_ids = train_ids + val_ids + test_ids

    for vid in video_ids:
        for pid in jaad_anns[vid]['ped_annotations']:
            imgs = [
                os.path.join(jaad.jaad_path, 'images', vid,
                             '{:05d}.png'.format(f))
                for f in jaad_anns[vid]['ped_annotations'][pid]['frames']
            ]
            boxes = jaad_anns[vid]['ped_annotations'][pid]['bbox']

            for box, img in zip(boxes, imgs):

                bounds = Bounds2D(box[0], box[1], box[2] - box[0],
                                  box[3] - box[1])
                if 'pedestrian' in jaad_anns[vid]['ped_annotations'][pid][
                        'old_id']:
                    class_index = classes.index('pedestrian')
                elif 'people' in jaad_anns[vid]['ped_annotations'][pid][
                        'old_id']:
                    class_index = classes.index('people')
                else:
                    class_index = classes.index('ped')

                if img not in annotations:
                    images.append(img)
                    annotations[img] = [Object2D(bounds, class_index)]
                else:
                    annotations[img].append(Object2D(bounds, class_index))

    return Dataset(DATASET_NAME, images, classes, annotations)
Пример #8
0
def _lyftperception(
        path: str = '/home/lns/.lns-training/resources/data/LyftPerception/',
        datatype: str = 'train') -> Dataset:  # noqa
    images: Dataset.Images = []
    classes: Dataset.Classes = [
        'car', 'pedestrian', 'animal', 'other_vehicle', 'bus', 'motorcycle',
        'truck', 'emergency_vehicle', 'bicycle'
    ]
    annotations: Dataset.Annotations = {}

    lyft = LyftDataset(
        data_path=
        path,  # '/home/lns/.lns-training/resources/data/LyftPerception/train', # path,
        json_path=os.path.join(path, 'data'),  # datatype + '_data'),
        verbose=True,
        map_resolution=0.1)
    # '/home/lns/.lns-training/resources/data/LyftPerception/train/train_data',

    for sample in lyft.sample:
        cam_token = sample['data']['CAM_FRONT']

        # Returns the data path as well as all annotations related to that sample_data.
        # Note that the boxes are transformed into the current sensor's coordinate frame.
        data_path, boxes, camera_intrinsic = lyft.get_sample_data(cam_token)
        images.append(str(data_path))
        annotations[str(data_path)] = []

        for box in boxes:
            img_corners = view_points(box.corners(),
                                      camera_intrinsic,
                                      normalize=True)[:2, :]

            # Take an outer rect of the 3d projection
            xmin = img_corners[0].min(
            ) if img_corners[0].min() >= 0 else 0  # 1224x1024
            xmax = img_corners[0].max(
            ) if img_corners[0].max() <= 1224 else 1224
            ymin = img_corners[1].min() if img_corners[1].min() >= 0 else 0
            ymax = img_corners[1].max(
            ) if img_corners[1].max() <= 1024 else 1024

            if xmax - xmin <= 0 or ymax - ymin <= 0:
                continue

            bounds = Bounds2D(xmin, ymin, xmax - xmin, ymax - ymin)
            # car, pedestrian, animal, other_vehicle, bus, motorcycle, truck, emergency_vehicle, bicycle
            label = box.name
            if label is not None:
                class_index = classes.index(box.name)  # noqa
                annotations[str(data_path)].append(
                    Object2D(bounds, class_index))  # noqa

    return Dataset(DATASET_NAME, images, classes, annotations)
Пример #9
0
def _mocked(path: str) -> Dataset:
    images: Dataset.Images = []
    classes: Dataset.Classes = []
    annotations: Dataset.Annotations = {}

    for i, class_name in enumerate(os.listdir(path)):
        classes.append(class_name)
        class_folder = os.path.join(path, class_name)
        for file in os.listdir(class_folder):
            image_path = os.path.join(class_folder, file)
            images.append(image_path)
            annotations[image_path] = [Object2D(Bounds2D(0, 0, 0, 0), i)]

    return Dataset(DATASET_NAME, images, classes, annotations)
Пример #10
0
def _lisa_signs(path: str) -> Dataset:  # pylint: disable=too-many-locals
    images: Dataset.Images = []
    classes: Dataset.Classes = []
    annotations: Dataset.Annotations = {}

    annotations_path = os.path.join(path, "allAnnotations.csv")
    if not os.path.exists(annotations_path):
        raise FileNotFoundError("Could not find annotations file {annotations_path} in LISA Signs dataset.")

    # Read annotations
    with open(annotations_path, "r") as annotations_file:
        reader = csv.reader(annotations_file, delimiter=";")
        for i, row in enumerate(reader):
            # Skip the first row, it is just headers
            if i == 0:
                continue

            image_name = row[0]
            image_path = os.path.join(path, image_name)

            detection_class = row[1]

            # Calculate the position and dimensions of the bounding box
            x_min = int(row[2])  # x-coordinate of top left corner
            y_min = int(row[3])  # y-coordinate of top left corner
            x_max = int(row[4])  # x-coordinate of bottom right corner
            y_max = int(row[5])  # y-coordinate of bottom right corner

            # Get the class index if it has already been registered
            # otherwise register it and select the index
            try:
                class_index = classes.index(detection_class)
            except ValueError:
                class_index = len(classes)
                classes.append(detection_class)

            # Package the detection
            images.append(image_path)
            if image_path not in annotations:
                annotations[image_path] = []
            annotations[image_path].append(Object2D(Bounds2D(x_min, y_min, x_max - x_min, y_max - y_min), class_index))

    return Dataset(SIGN_DATASET_NAME, images, classes, annotations)
Пример #11
0
def _bosch(path: str) -> Dataset:  # pylint:disable=too-many-locals
    images: Dataset.Images = []
    classes: Dataset.Classes = []
    annotations: Dataset.Annotations = {}

    annotations_path = os.path.join(path, "train.yaml")
    if not os.path.isfile(annotations_path):
        raise FileNotFoundError(
            f"Could not find annotations file {annotations_path}.")
    with open(annotations_path, "r") as file:
        raw_annotations = yaml.load(file)

    for annotation in raw_annotations:
        detections = annotation["boxes"]
        image_path = os.path.abspath(os.path.join(path, annotation["path"]))

        for detection in detections:
            label = detection["label"]
            x_min = round(detection["x_min"])
            x_max = round(detection["x_max"])
            y_min = round(detection["y_min"])
            y_max = round(detection["y_max"])

            # Get the class index if it has already been registered otherwise register it and select the index
            try:
                class_index = classes.index(label)
            except ValueError:
                class_index = len(classes)
                classes.append(label)

            # Package the detection
            if image_path not in annotations:
                annotations[image_path] = []
                images.append(image_path)
            annotations[image_path].append(
                Object2D(Bounds2D(x_min, y_min, x_max - x_min, y_max - y_min),
                         class_index))

    return Dataset(DATASET_NAME, images, classes, annotations)
Пример #12
0
def _scale_od_ped(path: str) -> Dataset:  # noqa
    images: Dataset.Images = []
    classes: Dataset.Classes = ["Pedestrian"]
    annotations: Dataset.Annotations = {}

    images_path = os.path.join(path, "training", "image_2")
    labels_path = os.path.join(path, "training", "label_pedonly")

    for image_name in os.listdir(images_path):
        image_path = os.path.join(images_path, image_name)
        base_name = image_name.split(".")[0]

        label_path = os.path.join(labels_path, base_name + ".txt")
        if os.path.exists(label_path):
            images.append(image_path)
            image_annotations = []
            with open(label_path, "r") as file:
                for line in file:
                    bounds = Bounds2D(*[float(s) for s in line.split()[4:8]])
                    image_annotations.append(Object2D(bounds, 0))
            annotations[image_path] = image_annotations

    return Dataset(DATASET_NAME, images, classes, annotations)
Пример #13
0
def _nuscenes(path: str) -> Dataset:  # noqa
    images: Dataset.Images = []
    classes: Dataset.Classes = DETECTION_NAMES
    annotations: Dataset.Annotations = {}

    nusc = NuScenes(version="v1.0-trainval", dataroot=path)

    for sample in nusc.sample:
        for cam in CAMERAS:
            cam_token = sample['data'][cam]

            #Returns the data path as well as all annotations related to that sample_data.
            #Note that the boxes are transformed into the current sensor's coordinate frame.
            data_path, boxes, camera_intrinsic = nusc.get_sample_data(
                cam_token, box_vis_level=BoxVisibility.ALL)
            images.append(data_path)
            annotations[data_path] = []

            for box in boxes:
                img_corners = view_points(box.corners(),
                                          camera_intrinsic,
                                          normalize=True)[:2, :]
                # Take an outer rect of the 3d projection
                xmin = img_corners[0].min()
                xmax = img_corners[0].max()
                ymin = img_corners[1].min()
                ymax = img_corners[1].max()

                bounds = Bounds2D(xmin, ymin, xmax - xmin, ymax - ymin)
                label = category_to_detection_name(box.name)
                if label is not None:
                    class_index = classes.index(
                        category_to_detection_name(box.name))
                    annotations[data_path].append(Object2D(
                        bounds, class_index))

    return Dataset(DATASET_NAME, images, classes, annotations)
Пример #14
0
def resize_image(img, annots):
    if len(annots) == 0:
        # If no annotations, then dont do anything
        return img, annots

    # Find the max and min positions of the annotations. Make sure we dont crop the annotations out
    annot_max_x = annot_max_y = 0
    annot_min_x = annot_min_y = float('inf')
    for annot in annots:
        x1, y1 = annot.bounds.left, annot.bounds.top
        x2, y2 = annot.bounds.right, annot.bounds.bottom
        annot_max_x = max(annot_max_x, x2)
        annot_max_y = max(annot_max_y, y2)
        annot_min_x = min(annot_min_x, x1)
        annot_min_y = min(annot_min_y, y1)

    # generate a random cropping window. Make sure don't crop the annotations
    annot_min_x = max(0, annot_min_x)
    annot_min_y = max(0, annot_min_y)
    annot_max_x = min(annot_max_x, IMG_WIDTH - 1)
    annot_max_y = min(annot_max_y, IMG_HEIGHT - 1)

    new_x1 = random.randint(0, annot_min_x)
    new_x2 = random.randint(annot_max_x, IMG_WIDTH)
    new_y1 = random.randint(0, annot_min_y)
    new_y2 = random.randint(annot_max_y, IMG_HEIGHT)

    img = img[new_y1:new_y2, new_x1:new_x2]

    # Change annotation positions. Change bounds by creating a new bound
    for annot in annots:
        annot.bounds = Bounds2D(annot.bounds.left - new_x1,
                                annot.bounds.top - new_y1, annot.bounds.width,
                                annot.bounds.height)

    return img, annots
Пример #15
0
    def getDataset(self, path: str) -> Dataset:

        # use the same dataset for both but look for word train or test in either.
        is_train = True  # default is train.
        is_full = False
        if "test" in path:
            path = path[:len(path) - len("_test")]
            is_train = False
        elif "_train" in path:
            if "sam" in path:
                path = path[:len(path) - len("_train_sam")]
            elif "manav" in path:
                path = path[:len(path) - len("_train_manav")]
            elif "helen" in path:
                path = path[:len(path) - len("_train_helen")]
            elif "matthieu" in path:
                path = path[:len(path) - len("_train_matthieu")]
            else:
                path = path[:len(path) - len("_train")]
        else:
            is_full = True

        if is_train:
            print("Path: " + path + " for training")
        else:
            print("Path: " + path + " for testing")

        if not os.path.isdir(path):
            raise FileNotFoundError(
                "Could not find Y4Signs dataset on path: " + path)

        images: Dataset.Images = []
        classes: Dataset.Classes = []
        annotations: Dataset.Annotations = {}

        # list of classes [str] in set01/obj.names
        try:
            classes = open(os.path.join(path, "set01",
                                        "obj.names")).read().splitlines()
        except FileNotFoundError:
            print("Could not find obj.names in set01")

        list_of_sets = os.listdir(path)
        total_annotations = 0

        class_limit = Counter()
        class_stats = Counter()

        for set_ in list_of_sets:
            f = open(os.path.join(path, set_, "train.txt"))

            #list of all absolute paths from train.txt. extra /data out
            for rel_image_path in f.read().split():
                # absolute image path to the image.
                abs_image_path = os.path.join(path, set_, rel_image_path[5:])
                # annotations file for corresponding image
                annotation_file_path = abs_image_path[:-4] + ".txt"

                try:
                    annotation_file = open(annotation_file_path)
                except FileNotFoundError:
                    print("annotation file " + annotation_file_path +
                          " missing...skipping")
                    continue  # continue inner loop

                temp_annotations = []

                classnums = Counter()
                flag = True
                # read the annotations file and loop through each line.
                for bounding_box in annotation_file.read().splitlines():
                    # split each line with spaces to get the [class, x1, y1, x2, y2]
                    annotation_arr = bounding_box.strip().split(" ")
                    # check if legal annotation
                    if not len(annotation_arr) == 5:
                        raise InvalidBoundingBoxError(annotation_file_path)

                    label_class = int(annotation_arr[0])
                    xcent = int(float(annotation_arr[1]) * self.img_width)
                    ycent = int(float(annotation_arr[2]) * self.img_height)
                    width = int(float(annotation_arr[3]) * self.img_width)
                    height = int(float(annotation_arr[4]) * self.img_height)

                    xmin = int(xcent - 0.5 * width)
                    ymin = int(ycent - 0.5 * height)
                    # Construct and append an Object2D object
                    temp_annotations.append(
                        Object2D(Bounds2D(
                            xmin,
                            ymin,
                            width,
                            height,
                        ), label_class))

                    class_limit[label_class] += 1
                    classnums[label_class] += 1

                    flag = flag and (class_limit[label_class] >=
                                     self.per_class_limit)

                # flag = False signifies that the image along with annotations should go to the testbatch.

                if (flag and is_train) or (not flag
                                           and not is_train) or is_full:
                    for key in classnums:
                        class_stats[key] += classnums[key]

                    total_annotations += len(temp_annotations)
                    images.append(abs_image_path)

                    # add(abs_image_path, temp_annotation: list) key value pair.
                    annotations[abs_image_path] = temp_annotations

        print("Annotation stats: ")
        for key in class_stats:
            print(str(classes[key]) + ": " + str(class_stats[key]))

        if is_train:
            print("Dataset type: training")
        else:
            print("Dataset type: test")

        print("Found " + str(len(classes)) + " classes")
        print("Found " + str(len(images)) + " images")

        print("\nTotal annotations: " + str(total_annotations))

        return Dataset(self.dataset_name, images, classes, annotations)
Пример #16
0
def _scale_common(name: str,
                  path: str,
                  project: str,
                  batch: Union[str, List[str]] = None) -> Dataset:  # noqa
    images: Dataset.Images = []
    classes: Dataset.Classes = []
    annotations: Dataset.Annotations = {}

    scale_data_path = os.path.join(path, 'images')

    available_batches = requests.get(
        "https://api.scale.com/v1/batches?project={}".format(project),
        headers=HEADERS,
        auth=(SCALE_API_KEY, '')).json()

    batch_names = [
        b['name'] for b in available_batches['docs']
        if b['status'] == 'completed'
    ]
    if (batch or batch == '') and isinstance(
            batch, str) and batch not in batch_names + ['']:
        raise ValueError("Batch name {} does not exist".format(batch))

    if batch and isinstance(batch, list):
        for bat in batch:
            if bat not in batch_names:
                raise ValueError("Batch name {} does not exist".format(bat))

    client = scaleapi.ScaleClient(SCALE_API_KEY)

    if batch is None:
        batches_to_retrieve = batch_names
    elif isinstance(batch, str) or batch == '':
        batches_to_retrieve = [batch]
    else:
        batches_to_retrieve = batch
    print(batches_to_retrieve)
    regex = "([\w-]){33}|([\w-]){19}"  # noqa

    for batch_name in batches_to_retrieve:
        print('On Batch', batch_name)
        proper_batch_name = batch_name if batch_name else 'default'
        batch_path = os.path.join(scale_data_path, proper_batch_name)

        count = 0
        offset = 0
        has_next_page = True
        needs_download = False

        if not os.path.exists(scale_data_path):
            os.makedirs(scale_data_path)

        if not os.path.exists(batch_path):
            os.makedirs(batch_path)
            needs_download = True

        while has_next_page:
            tasklist = client.tasks(status="completed",
                                    project=project,
                                    batch=batch_name,
                                    offset=offset)
            offset += 100

            for obj in tasklist:
                task_id = obj.param_dict['task_id']
                task = client.fetch_task(task_id)
                bbox_list = task.param_dict['response']['annotations']
                img_url = task.param_dict['params']['attachment']

                try:
                    if 'drive.google.com' in img_url:
                        match = re.search(regex, img_url)
                        task_id = match[0]  # noqa
                        api_initialize()
                        file_name = print_file_metadata(SERVICE,
                                                        task_id)  # noqa
                        local_path = os.path.join(batch_path, file_name)
                        # request_=urllib.request.Request(img_url,None,headers)
                        # remotefile = urllib.request.urlopen(request_)
                        # #remotefile = urllib.request.urlopen(img_url)
                        # content = remotefile.info()['Content-Disposition']
                        # _, params = cgi.parse_header(content)
                        # local_path = os.path.join(batch_path, params["filename"])
                    else:
                        local_path = os.path.join(batch_path,
                                                  img_url.rsplit('/', 1)[-1])

                    if needs_download or not os.path.isfile(local_path):
                        print('Batch Path', batch_path)
                        print('Local Path', local_path)
                        # Download the image
                        # urllib.request.urlretrieve(img_url, local_path)
                        request_ = urllib.request.Request(
                            img_url, None, headers)
                        response = urllib.request.urlopen(request_)
                        local_file = open(local_path, 'wb')
                        local_file.write(response.read())
                        local_file.close()

                except HTTPError as error:
                    print(
                        "Image {} failed to download due to HTTPError {}: {}".
                        format(img_url, error.code, error.reason))
                    continue
                except TypeError as error:
                    print(
                        "Image {} failed to download due to improper header: {}"
                        .format(img_url, str(error)))
                    continue

                annotations[local_path] = []
                for bbox in bbox_list:
                    # Get the label of the detected object
                    detection_class = bbox['label']

                    # Calculate the position and dimensions of the bounding box
                    x_min = int(
                        bbox['left'])  # x-coordinate of top left corner
                    y_min = int(bbox['top'])  # y-coordinate of top left corner
                    width = int(bbox['width'])  # width of the bounding box
                    height = int(bbox['height'])  # height of the bounding box

                    # Get the class index if it has already been registered
                    # otherwise register it and select the index
                    try:
                        class_index = classes.index(detection_class)
                    except ValueError:
                        class_index = len(classes)
                        classes.append(detection_class)

                    # Package the detection
                    annotations[local_path].append(
                        Object2D(Bounds2D(x_min, y_min, width, height),
                                 class_index))

                images.append(local_path)
                print("Processed {}\r".format(local_path), end="")
                count += 1

                if len(tasklist) < 100 or count > MAX_TO_PROCESS:
                    has_next_page = False

    return Dataset(name, images, classes, annotations)