예제 #1
0
def shapes_to_label(img_shape, shape, label_name_to_value):
    cls = np.zeros(img_shape[:2], dtype=np.int32)
    ins = np.zeros_like(cls)
    instances = []

    points = shape['points']
    label = shape['label']
    group_id = shape.get('group_id')
    if group_id is None:
        group_id = uuid.uuid1()
    shape_type = shape.get('shape_type', None)

    cls_name = label
    instance = (cls_name, group_id)

    if instance not in instances:
        instances.append(instance)
    ins_id = instances.index(instance) + 1
    cls_id = label_name_to_value[cls_name]

    mask = utils.shape_to_mask(img_shape[:2], points, shape_type)
    cls[mask] = cls_id
    ins[mask] = ins_id

    return cls, ins
예제 #2
0
파일: postpro.py 프로젝트: abrosua/piv-viz
    def get_flo(
        self,
        key,
        fill_with: Optional[float] = None,
        use_stereo: bool = False,
    ) -> Tuple[Optional[np.array], Optional[np.array]]:
        """
        Acquiring the masked flow vector and its respective mask array.
        params:
            key: The label key of the flow to obtain (e.g., 'flow', 'v1', 'v2')
            fill_with: Filling value to the masked vector.
        """
        if key in self.label.keys():
            flow_label = self.label[key]

            # Flow init.
            out_flow = utils.read_flow(self.flopath, use_stereo=use_stereo)
            mask = np.full(out_flow.shape[:2], False)
            mask_flow = np.full(
                out_flow.shape,
                fill_with) if fill_with is not None else out_flow

            # Filling the masked flow array
            for flow_point in flow_label['points']:
                mask += shape_to_mask(self.img_shape,
                                      flow_point,
                                      shape_type=flow_label['shape_type'])

            mask_flow[mask] = out_flow[mask]
            return mask_flow, mask
        else:
            print(f"The '{key}' label is NOT found in '{self.label_path}'"
                  ) if self.verbose else None
            return None, None
예제 #3
0
    def __getitem__(self, idx):
        coco = self.coco
        img_id = self.ids[idx]
        ann_ids = coco.getAnnIds(imgIds=img_id)
        ann_target = coco.loadAnns(ann_ids)

        path = coco.loadImgs(img_id)[0]['file_name']

        # Image
        im = F.to_tensor(Image.open(os.path.join(self.root, path)).convert('RGB'))
        _, height, width = im.shape

        # Generate mask and bbox from segmentation of coco
        bboxes = []
        masks = []
        labels = []
        for cat in ann_target:
            # convert polygon into mask and then into bbox
            for p in cat['segmentation']:
                points = [(p[i], p[i+1]) for i in range(0, len(p), 2)]
                # Smooth the mask using PIL.ImageFilter.SMOOTH
                smooth_mask = Image.fromarray(shape_to_mask(im.shape[1:], points, 'polygon')).filter(ImageFilter.SMOOTH)
                masks.append(np.asarray(smooth_mask))

                # left top (x, y), width and height
                x, y, w, h = toBbox(encode(np.asfortranarray(smooth_mask).astype(np.uint8)))
                # normalization
                x1 = x
                y1 = y
                x2 = x + w
                y2 = y + h
                bboxes.append([x1, y1, x2, y2])

                labels.append(cat['category_id'])

        # convert everything into a torch.Tensor
        bboxes = torch.as_tensor(bboxes, dtype=torch.float32)
        masks = torch.as_tensor(masks, dtype=torch.uint8)
        labels = torch.as_tensor(labels, dtype=torch.int64)  # float32

        image_id = torch.tensor([idx])
        area = (bboxes[:, 3] - bboxes[:, 1]) * (bboxes[:, 2] - bboxes[:, 0])
        # suppose all instances are not crowd
        iscrowd = torch.zeros((len(bboxes), ), dtype=torch.int64)

        # no batch idx
        target = {}
        target["boxes"] = bboxes
        target["labels"] = labels + 1  # add background class
        target["masks"] = masks
        target["image_id"] = image_id
        target["area"] = area
        target["iscrowd"] = iscrowd

        return im, target
예제 #4
0
def shapes_to_label(img_shape, shapes, label_name_to_value, type='class'):
    assert type in ['class', 'instance']

    cls = np.zeros(img_shape[:2], dtype=np.int32)
    if type == 'instance':
        ins = np.zeros(img_shape[:2], dtype=np.int32)
        instance_names = ['_background_']
    for shape in shapes:
        points = shape['points']
        label = shape['label']
        if not label in label_name_to_value:
            continue
        shape_type = shape.get('shape_type', None)
        if type == 'class':
            cls_name = label
        cls_id = label_name_to_value[cls_name]
        mask = utils.shape_to_mask(img_shape[:2], points, shape_type)
        cls[mask] = cls_id
    return cls
def shapes_to_label(img_shape, shapes, label_name_to_value):
    """
    override for this region split fuc
    :param img_shape:
    :param shapes:
    :param label_name_to_value:
    :return:
    """
    cls = np.zeros(img_shape[:2], dtype=np.int32)
    for shape in shapes:
        xmin = shape['bndbox'][0]
        ymin = shape['bndbox'][1]
        xmax = shape['bndbox'][2]
        ymax = shape['bndbox'][3]
        points = [[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]]
        label = shape['name']
        shape_type = shape.get('shape_type', None)
        cls_name = label
        cls_id = label_name_to_value[cls_name]
        mask = shape_to_mask(img_shape[:2], points, shape_type)
        cls[mask] = cls_id
    return cls
예제 #6
0
def annotation2np(annotation):
    """
        Args:
            annotation: labelmeで作成したjsonを読み込んだ変数
        Return:
            output_mask: 0...クラス数 で構成されたnumpy型のマスク
    """
    height = annotation['imageHeight']
    width = annotation['imageWidth']
    output_mask = np.zeros((height, width), dtype=int)
    bool_masks = dict()
    for label in CLASSES.keys():
        bool_masks[label] = np.zeros((height, width), dtype=bool)
    for shape in annotation['shapes']:
        mask = utils.shape_to_mask((height, width), shape['points'], shape_type=None, line_width=1, point_size=1)
        if shape['label'] in CLASSES:
            bool_masks[shape['label']] += mask
    for label in CLASSES.keys():
        output_mask[bool_masks[label]] = 0
        output_mask += np.where(bool_masks[label]==True, CLASSES[label], 0)
    plt.imshow(output_mask)
    plt.show()
    
    return output_mask
예제 #7
0
    def saveDataset(self, samples_per_image, split):
        image_id = 0
        num_samples = 0

        self.checkAborted()

        data = self.getEmptyData()

        class_name_to_id = {}
        labels = self.intermediate.getLabels()
        for i, label in enumerate(labels):
            class_id = i
            class_name = label.strip()
            class_name_to_id[class_name] = class_id
            data['categories'].append(dict(
                supercategory=None,
                id=class_id,
                name=class_name,
            ))

        output_folder = self.output_folder
        output_file = self.getOutputFileName(split)
        out_ann_file = os.path.join(output_folder, output_file)
        out_ann_dir = os.path.dirname(out_ann_file)
        if not os.path.exists(out_ann_dir):
            os.makedirs(out_ann_dir)
        image_folder = os.path.join(output_folder, split)
        if not os.path.exists(image_folder):
            os.makedirs(image_folder)

        input_folder = self.input_folder_or_file

        self.checkAborted()

        failed_images = []
        for image in samples_per_image:
            try:
                samples = samples_per_image[image]
                num_samples = num_samples + len(samples)
                base = replace_special_chars(os.path.splitext(image)[0])
                out_img_file = os.path.join(image_folder, base + '.jpg')
                img_file = os.path.normpath(os.path.join(input_folder, image))
                image = PIL.Image.open(img_file)
                img = np.asarray(image)
                if not os.path.exists(out_img_file):
                    save_image_as_jpeg(image, out_img_file)

                self.checkAborted()

                data['images'].append(dict(
                    license=0,
                    url=None,
                    file_name=os.path.basename(out_img_file),
                    height=img.shape[0],
                    width=img.shape[1],
                    date_captured=None,
                    id=image_id,
                ))

                masks = {}                                     # for area
                segmentations = collections.defaultdict(list)  # for segmentation
                for sample in samples:
                    label = sample.label
                    shape_type = sample.shape_type
                    points = sample.points
                    
                    mask = shape_to_mask(
                        img.shape[:2], points, shape_type
                    )

                    points = np.asarray(points).flatten().tolist()

                    self.checkAborted()

                    if label in masks:
                        masks[label].append(mask)
                        segmentations[label].append(points)
                    else:
                        masks[label] = [mask]
                        segmentations[label] = [points]

                for label, mask in masks.items():
                    for i in range(len(mask)):
                        m = mask[i]
                        cls_name = label.split('-')[0]
                        if cls_name not in class_name_to_id:
                            continue
                        cls_id = class_name_to_id[cls_name]

                        m = np.asfortranarray(m.astype(np.uint8))
                        m = pycocotools.mask.encode(m)
                        area = float(pycocotools.mask.area(m))
                        bbox = pycocotools.mask.toBbox(m).flatten().tolist()

                        data['annotations'].append(dict(
                            id=len(data['annotations']),
                            image_id=image_id,
                            category_id=cls_id,
                            segmentation=segmentations[label][i],
                            area=area,
                            bbox=bbox,
                            iscrowd=0,
                        ))

                        self.thread.update.emit(_('Writing samples ...'), -1, -1)
                        self.checkAborted()

                image_id = image_id + 1

            except Exception as e:
                failed_images.append(image)
                logger.error(traceback.format_exc())

        if len(failed_images) > 0:
            msg = _('The following images could not be exported:') + '\n' + ', '.join(failed_images)
            self.thread.message.emit(_('Warning'), msg, MessageType.Warning)
            if len(data['annotations']) == 0:
                self.throwUserException(_('Dataset contains no images for export'))

        self.checkAborted()

        with open(out_ann_file, 'w') as f:
            json.dump(data, f)

        return num_samples
예제 #8
0
파일: postpro.py 프로젝트: abrosua/piv-viz
def get_max_flow(flodir: str,
                 labelpath: Optional[str] = None,
                 start_at: int = 0,
                 end_at: int = -1,
                 filename: Optional[str] = None,
                 aggregate: Tuple[str, ...] = ('max'),
                 calib: float = 1.0,
                 fps: int = 1,
                 post_factor: Optional[float] = None,
                 verbose: int = 0) -> Tuple[float, np.array]:
    """
    Get maximum flow magnitude within the flow direction.
    params:
        flodir: Flow directory.
        labelpath: Label file input to mask the flow, optional.
        start_at: Starting index.
        end_at: Ending index.
    Returns the maximum flow magnitude.
    """
    # Init.
    assert os.path.isdir(flodir)
    name_list = utils.split_abspath(flodir)[0].split(os.sep)
    floname, netname = str(name_list[-2]), str(name_list[-3])
    velo_factor = fps * calib
    if post_factor is None:
        post_factor = calib

    if filename:
        savedir = os.path.dirname(filename)
        if not os.path.isdir(savedir):
            os.makedirs(savedir)

    if labelpath is not None:
        assert os.path.isfile(labelpath)
        mask_label = Label(labelpath, flodir, verbose=verbose).label["video"]
    else:
        mask_label = None

    flopaths_raw = sorted(glob(os.path.join(flodir, "*.flo")))
    end_at = len(flopaths_raw) if end_at < 0 else end_at
    flopaths = flopaths_raw[start_at:end_at]

    # Iterate over the flopaths
    max_flo, vort_flo, shear_flo, normal_flo = 0.0, 0.0, 0.0, 0.0
    data_flos = {
        "flow": [[0.0] * (len(aggregate) + 1)],
        "vort": [[0.0] * (len(aggregate) * 2 + 1)],
        "shear": [[0.0] * (len(aggregate) * 2 + 1)],
        "normal": [[0.0] * (len(aggregate) * 2 + 1)],
    }

    for flopath in tqdm(flopaths, desc=f"Max flow at {floname}", unit="frame"):
        flow = utils.read_flow(flopath)
        flow = flow * velo_factor  # Calibrate flow into real velocity
        vort, shear, normal = utils.calc_vorticity(flow, calib=post_factor)

        # Time frame indexing
        idx = int(os.path.splitext(flopath)[0].split("_")[-2])
        time_id = (idx + 1) / fps

        if mask_label is not None:
            mask = shape_to_mask(flow.shape[:2],
                                 mask_label["points"][0],
                                 shape_type=mask_label['shape_type'])
            flow, vort, shear, normal = flow[mask], vort[mask], shear[
                mask], normal[mask]

        mag_flo = np.linalg.norm(flow, axis=-1)
        max_flo = np.max(mag_flo) if np.max(mag_flo) > max_flo else max_flo
        vort_flo = np.max(
            np.abs(vort)) if np.max(np.abs(vort)) > vort_flo else vort_flo
        shear_flo = np.max(
            np.abs(shear)) if np.max(np.abs(shear)) > shear_flo else shear_flo
        normal_flo = np.max(np.abs(normal)) if np.max(
            np.abs(normal)) > normal_flo else normal_flo

        agg_flo, agg_vor, agg_shear, agg_normal = [time_id], [time_id
                                                              ], [time_id
                                                                  ], [time_id]
        for agg in aggregate:
            agg_module = getattr(np, agg)
            agg_flo.append(agg_module(mag_flo))
            agg_vor.extend([
                agg_module(vort[vort > 0]),
                agg_module(np.abs(vort[vort < 0]))
            ])
            agg_shear.extend([
                agg_module(shear[shear > 0]),
                agg_module(np.abs(shear[shear < 0]))
            ])
            agg_normal.extend([
                agg_module(normal[normal > 0]),
                agg_module(np.abs(normal[normal < 0]))
            ])

        data_flos["flow"].append(agg_flo)
        data_flos["vort"].append(agg_vor)
        data_flos["shear"].append(agg_shear)
        data_flos["normal"].append(agg_normal)

    col_name, col_name_de = ['time'], ['time']
    for k in aggregate:
        col_name.append(k)
        col_name_de.extend([f"{k}_p", f"{k}_n"])

    data_flos_df = {}
    for k, v in data_flos.items():
        data_flos_tmp = pd.DataFrame(np.array(v), columns=col_name) if k == "flow" else \
            pd.DataFrame(np.array(v), columns=col_name_de)
        data_flos_df[k] = data_flos_tmp

        if filename:
            filename_tmp, ext = os.path.splitext(filename)
            filepath = filename_tmp + f"-{k}{ext}"
            data_flos_tmp.to_csv(filepath, index_label="frame")

    if verbose:
        tqdm.write(
            f"Maximum flow at {floname} (from frame {start_at} to {end_at}) is {max_flo:.2f}"
        )
        tqdm.write(
            f"Maximum vorticity at {floname} (from frame {start_at} to {end_at}) is {vort_flo:.2f}"
        )
        tqdm.write(
            f"Maximum shear stress at {floname} (from frame {start_at} to {end_at}) is {shear_flo:.2f}"
        )
        tqdm.write(
            f"Maximum normal stress at {floname} (from frame {start_at} to {end_at}) is {normal_flo:.2f}"
        )

    return max_flo, data_flos_df
예제 #9
0
파일: postpro.py 프로젝트: abrosua/piv-viz
def region_velo(labelpath: str,
                netname: str,
                flodir: str,
                key: str,
                fps: int = 1,
                start_at: int = 0,
                end_at: int = -1,
                num_flows: int = -1,
                avg_step: int = 1,
                show: bool = False,
                filename: Optional[str] = None,
                calibration_factor: float = 1.0,
                verbose: int = 0) -> np.array:
    """
    Get regional velocity data (either v1 or v2).
    params:
        labelpath: The base label file to use.
        netname: Network name result to use.
        flodir: Main directory of the flow.
        key: Which regional flow to choose (v1 or v2).
        fps: Image frame frequency (Frame Per Second).
        start_at: Flow index to start.
        end_at: Last flow index (if -1, use the last flow in the flowdir).
        num_flows: Number of flows to choose (if -1, use all the available flows in the flowdir).
        avg_step: Number of steps to average the flow value (if 1, calculate instantaneous velocity instead).
        calibration_factor: To calibrate pixel to (estimated) real displacement.
        verbose: The verbosal option
    returns:
        numpy array of the regional velocity summary at each time frame, in terms of average 2d velocity and magnitude.
        The flow regional velocity is in mm/second.
    """
    # Init.
    assert os.path.isfile(labelpath)
    assert os.path.isdir(flodir)
    assert avg_step > 0

    # Flow metadata
    flopaths = sorted(glob(os.path.join(flodir, "*.flo")))
    nflow = len(flopaths)
    end_at = nflow if end_at < 0 else end_at
    num_flows = nflow - start_at if num_flows < 0 else num_flows

    step = int(np.floor(end_at / num_flows))
    idx = list(range(start_at, end_at, step))
    key_title = "instantaneous" if avg_step == 1 else "mean"

    # Getting the v1/v2 label
    label = Label(labelpath, netname, verbose=verbose)
    flow_label = label.label[key]

    # Iterate over the flows
    velo_record = [[0.0, [0.0, 0.0], 0.0]]

    for id in tqdm(idx, desc=f"Flow at {key}", unit="frame"):
        out_flow, _ = utils.read_flow_collection(flodir,
                                                 start_at=id,
                                                 num_images=avg_step)
        out_flow = out_flow * fps * calibration_factor  # Calibrating into mm/second

        out_mag = np.linalg.norm(out_flow, axis=-1)
        avg_flow, avg_mag = np.mean(out_flow, axis=0), np.mean(out_mag, axis=0)
        mask = np.full(avg_flow.shape[:2], False)

        # Filling the masked flow array
        for flow_point in flow_label['points']:
            mask += shape_to_mask(avg_flow.shape[:2],
                                  flow_point,
                                  shape_type=flow_label['shape_type'])

        velo_record.append([(id + 1) / fps,
                            np.mean(avg_flow[mask], axis=0),
                            np.mean(avg_mag[mask], axis=0)])
    velo_record = np.array(velo_record)

    plt.plot(velo_record[:, 0], velo_record[:, -1])
    plt.title(f"{key} {key_title} velocity at each time frame")
    plt.ylim(bottom=0)
    plt.xlim(left=0)
    plt.xlabel("Time stamp [frame]")
    plt.ylabel(f"{key} velocity [pix]")

    plt.show() if show else None
    plt.savefig(filename, dpi=300, bbox_inches='tight') if filename else None
    plt.clf()

    return velo_record