Beispiel #1
0
def plot_img_with_bbox3(img, bbox1: bbox.BBox, bbox2: bbox.BBox,
                        bbox3: bbox.BBox) -> None:
    x, y, w, h = bbox1.as_xywh()
    fig, ax = plt.subplots(1)
    img = mpimg.imread(img)
    ax.imshow(img)
    rect = patches.Rectangle((x, y),
                             w,
                             h,
                             linewidth=1,
                             edgecolor='r',
                             facecolor='none')
    ax.add_patch(rect)
    x, y, w, h = bbox2.as_xywh()
    rect = patches.Rectangle((x, y),
                             w,
                             h,
                             linewidth=1,
                             edgecolor='g',
                             facecolor='none')
    ax.add_patch(rect)
    # Intersection

    x, y, w, h = bbox3.as_xywh()
    rect = patches.Rectangle((x, y),
                             w,
                             h,
                             linewidth=1,
                             edgecolor='b',
                             facecolor='blue')
    ax.add_patch(rect)
    plt.show()
Beispiel #2
0
    def get_annotation_from_ufpr_txt_files(path: str) -> 'Annotations':
        with open(path, 'r') as f:
            # txt = f.readlines()
            txt = [line.strip() for line in f.readlines()]
            picture_bboxes = [
                od.Annotation(
                    BBox(int(txt[1].split(' ')[1]), int(txt[1].split(' ')[2]),
                         int(txt[1].split(' ')[3]), int(txt[1].split(' ')[4])),
                    1.0, txt[2].split(' ')[1]),
                od.Annotation(
                    BBox(int(txt[7].split(' ')[1]), int(txt[7].split(' ')[2]),
                         int(txt[7].split(' ')[3]), int(txt[7].split(' ')[4])),
                    1.0, 'license_plate')
            ]

        return picture_bboxes
Beispiel #3
0
    def get_labels(self) -> 'Annotations':
        df = pandas.read_csv(self.path.joinpath('dataset.csv'), delimiter=',')
        annotations = []
        for index_annotations in range(len(df)):
            # h, w, _ = cv2.imread(str(self.paths_to_images[anno])).shape
            with Image.open(str(self.imgs_path[index_annotations]),
                            'r') as img:
                w, h = img.size
            annotations.append([
                od.Annotation(
                    BBox.build_from_center_and_size(
                        np.array([
                            int(df['x_center'][index_annotations] * w),
                            int(df['y_center'][index_annotations] * h)
                        ]),
                        np.array([
                            int(df['width'][index_annotations] * w),
                            int(df['height'][index_annotations] * h)
                        ])), 1.0, 'license_plate')
            ])
        del df

        annos = dataset_merger.merging.DatasetMerger.create_dict_from_annotations_detected(
            self, annotations)
        with open(self.path.joinpath('detections.json')) as det:
            detections_vehicles = json.load(det)

        for img_ann in annos['content']:
            ann = self.find_anos(detections_vehicles, img_ann['file_name'])
            for a in ann:
                img_ann['annotations'].append(a)

        return annos
Beispiel #4
0
    def start(self, uris: Iterable, const_confidence: float) -> Annotations:
        """
        return type class Detection[] -> bbboxes + confidences + labels
        """
        annotations = []
        inputs = [self.utils.prepare_input(uri) for uri in uris]
        tensor = self.utils.prepare_tensor(inputs, self.precision == 'fp16')
        with torch.no_grad():
            detections_batch = self.ssd_model(tensor)

        results_per_input = self.utils.decode_results(detections_batch)
        best_results_per_input = [
            self.utils.pick_best(results, const_confidence)
            for results in results_per_input
        ]
        # best_results_per_input = [self.utils.pick_best(results, 0.40) for results in results_per_input]
        for image_idx in range(len(best_results_per_input)):
            bboxes, classes, confidences = best_results_per_input[image_idx]
            bboxes_on_picture = []
            for idx in range(len(bboxes)):
                left, bot, right, top = bboxes[idx]
                x, y, w, h = [
                    val * 300 for val in [left, bot, right - left, top - bot]
                ]
                bboxes_on_picture.append \
                    (Annotation(BBox(int(x), int(y), int(w), int(h)), confidences[idx] * 100, self.classes_to_labels[classes[idx] - 1]))
            annotations.append(bboxes_on_picture)
        return annotations
Beispiel #5
0
    def build_dict(self) -> dict:
        # with Image.open(str(self.imgs_path[index_annotations]), 'r') as img:
        #    w, h = img.size
        annotations_path = self.path.joinpath('annotations')
        annotations = []
        data = {'content': []}

        # n = self.path.joinpath('annotations').iterdir()
        # print(n[0])
        for img in self.imgs_path:
            data['content'].append({
                'file_name': f'{img.name}',
                'annotations': []
            })
            picture_bboxes = []
            img_name = img.name.removesuffix('.jpg')
            txt_path = annotations_path.joinpath(f"{img_name}.txt")

            with Image.open(img.__str__(), 'r') as img:
                w, h = img.size

            with open(txt_path, "r") as file:
                picture_bboxes = []
                lines = file.readlines()
                for line in lines:
                    atrs = line.split()
                    width = int((float(atrs[3]) * w))
                    height = int((float(atrs[4]) * h))
                    t_x = int((float(atrs[0]) * w) - (width / 2))
                    t_y = int((float(atrs[1]) * h) - (height / 2))
                    if t_x < 0:
                        t_x = 0
                    if t_y < 0:
                        t_y = 0

                    data['content'][
                        len(data['content']) - 1]['annotations'].append(
                            od.Annotation(BBox(t_x, t_y, width, height), 1.0,
                                          "license_plate").build_dictionary())
        return data
Beispiel #6
0
    def get_labels(self) -> 'Annotations':
        """
        Reading .csv file
        delimiter = ';'
        example:
        relative_img_path;bbox_x1;bbox_y1;bbox_x2;bbox_y2;class;test
        car_ims/000001.jpg;112;7;853;717;1;0
        """
        df = pandas.read_csv(self.path.joinpath('cars_annos.txt'),
                             delimiter=';')

        annotations = []
        for img in range(len(df)):
            bboxes_on_picture = [
                od.Annotation(
                    BBox(int(df['bbox_x1'][img]), df['bbox_y1'][img],
                         df['bbox_x2'][img], df['bbox_y2'][img]), 1.0, 'car')
            ]
            annotations.append(bboxes_on_picture)
        del df

        #complete

        return annotations
Beispiel #7
0
    def detect_vehicles_in_dataset(path,
                                   img_size=416,
                                   conf_thres=0.5,
                                   nms_thres=0.4) -> 'Annotations':
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        act_dir = os.path.dirname(__file__)

        #model = Darknet(os.path.join(act_dir, )r"D:\bakalarkaaaa\object_detectors\yolov3\config\yolov3.cfg", img_size=img_size).to(device)
        model = Darknet(os.path.join(act_dir, r"config\yolov3.cfg"),
                        img_size=img_size).to(device)
        #     model.load_darknet_weights(r"D:\bakalarkaaaa\object_detectors\yolov3\weights\yolov3.weights")
        model.load_darknet_weights(
            os.path.join(act_dir, r"weights\yolov3.weights"))

        model.eval()  # Set in evaluation mode

        dataloader = DataLoader(
            ImageFolder(path, transform= \
                transforms.Compose([DEFAULT_TRANSFORMS, Resize(img_size)])),
            batch_size=1,
            shuffle=False,
            num_workers=0, # Mozna zmena!
        )

        #        classes = load_classes(r"D:\bakalarkaaaa\object_detectors\yolov3\data\coco.names")  # Extracts class labels from file
        classes = load_classes(os.path.join(
            act_dir, r"data\coco.names"))  # Extracts class labels from file

        Tensor = torch.cuda.FloatTensor if torch.cuda.is_available(
        ) else torch.FloatTensor

        imgs = []  # Stores image paths
        img_detections = []  # Stores detections for each image index

        print("\nPerforming object detection:")
        prev_time = time.time()
        for batch_i, (img_paths, input_imgs) in enumerate(dataloader):
            # Configure input
            input_imgs = Variable(input_imgs.type(Tensor))

            # Get detections
            with torch.no_grad():
                detections = model(input_imgs)
                detections = non_max_suppression(detections, conf_thres,
                                                 nms_thres)

            # Log progress
            current_time = time.time()
            inference_time = datetime.timedelta(seconds=current_time -
                                                prev_time)
            prev_time = current_time
            print("\t+ Batch %d, Inference Time: %s" %
                  (batch_i, inference_time))

            # Save image and detections
            imgs.extend(img_paths)
            img_detections.extend(detections)

        # Bounding-box colors
        cmap = plt.get_cmap("tab20b")
        colors = [cmap(i) for i in np.linspace(0, 1, 20)]
        confirm_labels = ['car', 'truck', 'motorcycle', 'bus']
        annotations = []

        for img_i, (path, detections) in enumerate(zip(imgs, img_detections)):
            print("(%d) Image: '%s'" % (img_i, path))

            # Create plot
            img = np.array(Image.open(path))

            if detections is not None:
                # Rescale boxes to original image
                detections = rescale_boxes(detections, img_size, img.shape[:2])
                unique_labels = detections[:, -1].cpu().unique()
                n_cls_preds = len(unique_labels)
                bbox_colors = random.sample(colors, n_cls_preds)
                # print(detections)
                bboxes_on_picture = []
                for x1, y1, x2, y2, conf, cls_pred in detections:

                    box_w = x2 - x1
                    box_h = y2 - y1

                    if classes[int(cls_pred)] in confirm_labels:
                        bboxes_on_picture.append(
                            object_detector.Annotation(
                                BBox(int(x1), int(y1), int(box_w), int(box_h)),
                                conf.item(), classes[int(cls_pred)]))

                annotations.append(bboxes_on_picture)

        return annotations

        pass
Beispiel #8
0
    def compare_two_annotations(annotation1: dict,
                                annotation2: dict,
                                confidence_treshold=0.8,
                                difference_in_confidence=0.05,
                                bbox_perc_intersection=0.6):
        bbox_1 = BBox(annotation1['bbox']['x'], annotation1['bbox']['y'],
                      annotation1['bbox']['width'],
                      annotation1['bbox']['height'])
        bbox_2 = BBox(annotation2['bbox']['x'], annotation2['bbox']['y'],
                      annotation2['bbox']['width'],
                      annotation2['bbox']['height'])
        bbox_i = BBox.intersection(bbox_1, bbox_2)

        if bbox_i is not None:
            bbox_1_c, bbox_2_c, bbox_i_c = bbox_1.capacity(), bbox_2.capacity(
            ), bbox_i.capacity()

            if bbox_i_c / bbox_1_c > bbox_perc_intersection or bbox_i_c / bbox_2_c > bbox_perc_intersection:  # Maybe I will change this one
                del bbox_1_c, bbox_2_c, bbox_i_c

                if annotation1[
                        'confidence'] > confidence_treshold and annotation2[
                            'confidence'] > confidence_treshold:
                    if abs(annotation1['confidence'] -
                           annotation2['confidence']
                           ) <= difference_in_confidence:
                        bbox_i.rescale(1.1, 1.1)
                        return 3, object_detector.Annotation(
                            bbox_i,
                            min(annotation1['confidence'],
                                annotation2['confidence']),
                            annotation1['label'])
                else:
                    if annotation1['confidence'] > annotation2['confidence']:
                        bbox_1.rescale(1.1, 1.1)
                        return 1, object_detector.Annotation(
                            bbox_1, annotation1['confidence'],
                            annotation1['label'])
                    else:
                        bbox_2.rescale(1.1, 1.1)
                        return 2, object_detector.Annotation(
                            bbox_2, annotation2['confidence'],
                            annotation2['label'])

        return -1, None
Beispiel #9
0
    def compare_annotations_in_img(
            img_detection: dict,
            confidence_treshold=0.8,
            difference_in_confidence=0.05,
            bbox_perc_intersection=0.6) -> 'Annotations':
        """
        Work: Separate vehicles and lpns
        Note:
        vehicles
        lpns
        """
        vehicles = []
        lpns = []

        for index_annotation in range(len(img_detection['annotations'])):
            # Check if img contains 'license_plate' else add to vehicle list
            if img_detection['annotations'][index_annotation][
                    'label'] == 'license_plate':
                lpns.append(img_detection['annotations'][index_annotation])
            else:
                vehicles.append(img_detection['annotations'][index_annotation])

        # If img does not contain vehicles, img is automaticaly added to manual annotation list
        #if len(vehicles) == 0:
        #    return None # None for manual annotations or I return new annotations
        """
        Work: Compare vehicles
        Note:
        result successful Annotations
        """
        # index, which were added
        added = []
        successful_annotations_vehicles = []

        if len(vehicles) == 1:
            if vehicles[0]['confidence'] > confidence_treshold:
                bbox_ = BBox(vehicles[0]['bbox']['x'],
                             vehicles[0]['bbox']['y'],
                             vehicles[0]['bbox']['width'],
                             vehicles[0]['bbox']['height'])
                successful_annotations_vehicles.append(
                    object_detector.Annotation(bbox_,
                                               vehicles[0]['confidence'],
                                               vehicles[0]['label']))
            else:
                return None
        else:
            for x, y in itertools.combinations(range(len(vehicles)), 2):
                if not x in added:
                    if not y in added:
                        index, result = DatasetMerger.compare_two_annotations(
                            vehicles[x], vehicles[y], confidence_treshold,
                            difference_in_confidence, bbox_perc_intersection
                        )  # If none is for manual annotations

                        if index == -1:
                            continue
                        if result is None:
                            return None

                        successful_annotations_vehicles.append(result)
                        if index == 3:
                            added.append(x)
                            added.append(y)
                        elif index == 1:
                            added.append(x)
                        else:
                            added.append(y)

            del added
        #return successful_annotations

        if len(successful_annotations_vehicles) == 0:
            return None
        """
        Work: Compare LPNs
        Note: 
        """
        added = []
        successful_annotations_lpns = []

        if len(lpns) == 1:
            if lpns[0]['confidence'] > confidence_treshold:
                bbox_ = BBox(lpns[0]['bbox']['x'], lpns[0]['bbox']['y'],
                             lpns[0]['bbox']['width'],
                             lpns[0]['bbox']['height'])
                successful_annotations_lpns.append(
                    object_detector.Annotation(bbox_, lpns[0]['confidence'],
                                               lpns[0]['label']))
        else:
            for x, y in itertools.combinations(range(len(lpns)), 2):
                if not x in added:
                    if not y in added:
                        index, result = DatasetMerger.compare_two_annotations(
                            lpns[x], lpns[y], confidence_treshold,
                            difference_in_confidence, bbox_perc_intersection
                        )  # If none is for manual annotations

                        if index == -1:
                            continue
                        if result is None:
                            continue
                            #return None # -----------------------?-----------------------------

                        successful_annotations_lpns.append(result)
                        if index == 3:
                            added.append(x)
                            added.append(y)
                        elif index == 1:
                            added.append(x)
                        else:
                            added.append(y)

            del added
        """
        Work: Detect LPNs on vehicles.
        Note: LPN should be in the detected vehicle.
              LPN can't be without vehicle 
        append to successful_annotations_vehicles for return
        """
        #

        for lpn_detection in successful_annotations_lpns:
            bbox_lpn = lpn_detection.bbox

            for vehicle_detection in successful_annotations_vehicles:
                bbox_vehicle = vehicle_detection.bbox
                bbox_i = BBox.intersection(bbox_lpn, bbox_vehicle)
                if bbox_i is not None:
                    if bbox_i.capacity() / bbox_lpn.capacity(
                    ) > 0.9:  # Define number, which define us when the bounding boxe is in another
                        successful_annotations_vehicles.append(lpn_detection)
                        break

        return successful_annotations_vehicles
Beispiel #10
0
    def get_labels(self) -> 'Annotations':
        annotations_path = self.path.joinpath('annotations')
        annotations = []
        for img_xml in annotations_path.iterdir():
            picture_bboxes = []
            data = bdict.from_xml(img_xml.__str__())
            if len(data['annotation']['object']) != 6:
                for object in data['annotation']['object']:
                    picture_bboxes.append(
                        od.Annotation(
                            BBox(
                                int(object['bndbox']['xmin']),
                                int(object['bndbox']['ymin']),
                                int(object['bndbox']['xmax']) -
                                int(object['bndbox']['xmin']),
                                int(object['bndbox']['ymax']) -
                                int(object['bndbox']['ymin'])), 1,
                            'license_plate'))
                    #print(object)
            else:
                #print(data['annotation']['object']['bndbox']['xmin'])
                try:
                    picture_bboxes.append(
                        od.Annotation(
                            BBox(
                                int(data['annotation']['object']['bndbox']
                                    ['xmin']),
                                int(data['annotation']['object']['bndbox']
                                    ['ymin']),
                                int(data['annotation']['object']['bndbox']
                                    ['xmax']) -
                                int(data['annotation']['object']['bndbox']
                                    ['xmin']),
                                int(data['annotation']['object']['bndbox']
                                    ['ymax']) -
                                int(data['annotation']['object']['bndbox']
                                    ['ymin'])), 1, 'license_plate'))
                except:
                    for object in data['annotation']['object']:
                        picture_bboxes.append(
                            od.Annotation(
                                BBox(
                                    int(object['bndbox']['xmin']),
                                    int(object['bndbox']['ymin']),
                                    int(object['bndbox']['xmax']) -
                                    int(object['bndbox']['xmin']),
                                    int(object['bndbox']['ymax']) -
                                    int(object['bndbox']['ymin'])), 1,
                                'license_plate'))

            annotations.append(picture_bboxes)

        annos = dataset_merger.merging.DatasetMerger.create_dict_from_annotations_detected(
            self, annotations)
        with open(
                self.path.joinpath('Our_detections').joinpath(
                    'detections.json')) as file:
            detections = json.load(file)
        for index in range(len(detections['content'])):
            #add annootations

            for x in CarLicensePlates.find_anos(
                    annos, detections['content'][index]['file_name']):
                detections['content'][index]['annotations'].append(x)

        dataset_merger.merging.DatasetMerger.file_write('detections.json',
                                                        detections,
                                                        indent=4)

        #Add our annotations

        return detections