Пример #1
0
 def __init__(self, config, subset, default_data_dir_suffix, data_keys_to_use, num_classes=2):
   super().__init__(config, subset, data_keys_to_use, num_classes)
   self._data_dir = config.dir("dataset_path", "/fastwork/" + username() + "/mywork/data/" + default_data_dir_suffix)
   self._video_idx = None
   self._object_idx = None
   self._curr_video_data = None
   self._keys_to_sample = []
   # TODO: maybe later share the cache between the subsets
   self._video_data_cache = {}
   self._video_tags = self._read_video_tags()
Пример #2
0
 def __init__(self, config, subset):
     davis_sequence = config.string("model", '')
     # data_list_path = LIST_PATH_ROOT + davis_sequence + '/'
     self.data_dir = config.string('data_dir', DEFAULT_PATH)
     annotation_file = "/fastwork/" + username(
     ) + "/mywork/data/coco/annotations/instances_train2014.json"
     self.build_filename_to_coco_anns_dict(annotation_file)
     super().__init__(config,
                      subset,
                      NAME,
                      self.data_dir,
                      "",
                      100,
                      cat_ids_to_use=None)
Пример #3
0
#!/usr/bin/env python3
import glob
from PIL import Image
import numpy as np
import tqdm

from refinement_net.datasets.util.Util import username
import datasets.cityscapes

PATH = "/fastwork/" + username() + "/mywork/data/cityscapes/"
CITYSCAPES_CODE_PATH = datasets.cityscapes.__file__.replace("__init__.py", "")

for subset, outfile in [("train", "training.txt"), ("val", "validation.txt")]:
    with open(CITYSCAPES_CODE_PATH + outfile, "w") as f:
        instances = glob.glob(PATH + "gtFine/" + subset +
                              "/*/*_instanceIds.png")
        for inst in tqdm.tqdm(instances):
            x = np.array(Image.open(inst))
            ids = np.unique(x)
            im = inst.replace("/gtFine/",
                              "/leftImg8bit/").replace("_gtFine_instanceIds",
                                                       "_leftImg8bit")
            print(im.replace(PATH, ""),
                  inst.replace(PATH, ""),
                  file=f,
                  sep=" ",
                  end="")
            for id_ in ids:
                n = (x == id_).sum()
                print(" ", id_, ":", n, file=f, sep="", end="")
            print(file=f)
Пример #4
0
import glob

import tensorflow as tf

from refinement_net.datasets.Loader import register_dataset
from refinement_net.datasets.Dataset import FileListDataset
from refinement_net.datasets.util.Util import username

NUM_CLASSES = 2
VOID_LABEL = 255  # for translation augmentation
DAVIS_DEFAULT_PATH = "/fastwork/" + username() + "/mywork/data/DAVIS/"
DAVIS2017_DEFAULT_PATH = "/fastwork/" + username() + "/mywork/data/DAVIS2017/"
DAVIS_FLOW_DEFAULT_PATH = "/fastwork/" + username(
) + "/mywork/data/DAVIS_data/"
DAVIS_LUCID_DEFAULT_PATH = "/fastwork/" + username(
) + "/mywork/data/DAVIS_data/lucid/"
DAVIS2017_LUCID_DEFAULT_PATH = "/fastwork/" + username(
) + "/mywork/data/DAVIS2017_data/lucid/"
DAVIS_IMAGE_SIZE = (480, 854)
DAVIS2017_IMAGE_SIZE = (480, None)


def read_image_and_annotation_list(fn, data_dir):
    imgs = []
    ans = []
    with open(fn) as f:
        for l in f:
            sp = l.split()
            an = data_dir + sp[1]
            im = data_dir + sp[0]
            imgs.append(im)
#!/usr/bin/env python3
import glob
from PIL import Image
import numpy as np
import tqdm

from refinement_net.datasets.util.Util import username
import datasets.KITTI

PATH = "/home/" + username() + "/data/KITTI_instance/"
KITTI_CODE_PATH = datasets.KITTI.__file__.replace("__init__.py", "")

with open(KITTI_CODE_PATH + "training.txt", "w") as f:
    instances = glob.glob(PATH + "instance/*.png")
    for inst in tqdm.tqdm(instances):
        x = np.array(Image.open(inst))
        ids = np.unique(x)
        im = inst.replace("/instance/", "/image_2/")
        print(im.replace(PATH, ""),
              inst.replace(PATH, ""),
              file=f,
              sep=" ",
              end="")
        for id_ in ids:
            n = (x == id_).sum()
            print(" ", id_, ":", n, file=f, sep="", end="")
        print(file=f)
Пример #6
0
import json
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
from refinement_net.datasets.util.Util import username

coco_data_folder = "/fastwork/" + username() + "/mywork/data/coco/"
minival_gt_file = coco_data_folder + "annotations/instances_val2014.json"
minival_det_file = "/home/krause/vision/savitar2/forwarded/frcnn_test/frcnn_test-1-detections.json"


def evaluate_coco():
    c = coco.COCO(minival_gt_file)
    cocoDt = c.loadRes(minival_det_file)
    cocoEval = COCOeval(c, cocoDt, 'bbox')
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
    print(cocoEval.stats[0])


def adjust_detections():
    c = coco.COCO(minival_gt_file)
    keys = list(c.cats.keys())
    detections_list = json.load(open(minival_det_file))
    for det in detections_list:
        det['category_id'] = keys[det['category_id']]
    json.dump(
        detections_list,
        open("/home/krause/vision/savitar2/forwarded/temp_edited.json", 'w'))

Пример #7
0
  Convert polygons to binary masks.

  Args:
      polys: a list of nx2 float array

  Returns:
      a binary matrix of (height, width)
  """
    import pycocotools.mask as cocomask
    polys = [p.flatten().tolist() for p in polys]
    rles = cocomask.frPyObjects(polys, height, width)
    rle = cocomask.merge(rles)
    return cocomask.decode(rle)


DEFAULT_PATH = "/fastwork/" + username() + "/mywork/data/coco/"
# To clarify, COCO uses category IDs from 1 to 90 which we take verbatim (no +1/-1!!!)
# The classes_to_cat array contains the mapping
NUM_CLASSES = 81
N_MAX_DETECTIONS = 100
NAME = "COCO_detection"


@register_dataset(NAME)
class CocoDetectionDataset(FileListDataset):
    def __init__(self, config, subset):
        super().__init__(config, NAME, subset, DEFAULT_PATH, NUM_CLASSES)

        self.add_masks = config.bool("add_masks", True)
        self.exclude_crowd_images = config.bool("exclude_crowd_images", False)
        self.exclude_crowd_annotations = config.bool(
Пример #8
0
#!/usr/bin/env python3

import glob
import os

from refinement_net.datasets.util.Util import username

PATH = "/fastwork/" + username() + "/mywork/data/mapillary/"
files = glob.glob(PATH + "training/*/-*.png") + glob.glob(PATH + "training/*/-*.jpg") + \
        glob.glob(PATH + "validation/*/-*.png") + glob.glob(PATH + "validation/*/-*.jpg")
with open(PATH + "renamed.txt", "w") as f_out:
    for f in files:
        idx_begin = f.rfind("/-")
        idx_end = idx_begin + 1
        while f[idx_end] == "-":
            idx_end += 1
        f_new = f[:idx_begin + 1] + f[idx_end:]
        print(f, "->", f_new)
        os.rename(f, f_new)
        print(f, f_new, file=f_out)
Пример #9
0
import glob

import tensorflow as tf

from refinement_net.datasets.Loader import register_dataset
from refinement_net.datasets.Dataset import FileListDataset
from refinement_net.datasets.util.Util import username

NUM_CLASSES = 2
VOID_LABEL = 255  # for translation augmentation
DAVIS_DEFAULT_IMAGES_PATH = "/work2/" + username() + "/data/DAVIS/train-val/val/"
DAVIS_DEFAULT_GT_PATH = "/work2/" + username() + "/data/DAVIS/train-val/val-gt/"
DAVIS_DEFAULT_PROPOSALS_PATH = "/home/" + username() + "/vision/maskrcnn_tensorpack/train_log/thesis1/notrain-val/"
DAVIS_IMAGE_SIZE = (480, 854)

@register_dataset("davisjono")
class DAVISjonoDataset(FileListDataset):
  def __init__(self, config, subset, name="davisjono"):
    self.image_dir = config.string("DAVIS_images_dir", DAVIS_DEFAULT_IMAGES_PATH)
    self.gt_dir = config.string("DAVIS_gt_dir", DAVIS_DEFAULT_GT_PATH)
    self.proposals_dir = config.string("DAVIS_proposals_dir", DAVIS_DEFAULT_PROPOSALS_PATH)

    super().__init__(config, name, subset, DAVIS_DEFAULT_IMAGES_PATH, 2)

  def read_inputfile_lists(self):
    img_filenames = glob.glob(self.image_dir + "*/*.jpg")
    seq_tags = [f.split("/")[-2] for f in img_filenames]
    framenum = [f.split('/')[-1].split('.jpg')[0] for f in img_filenames]
    label_filenames = [self.gt_dir + s + '/' + f + '.png' for s,f in zip(seq_tags,framenum)]
    return img_filenames, label_filenames
Пример #10
0
#!/usr/bin/env python3
import glob
from PIL import Image
import numpy as np

from refinement_net.datasets.util.Util import username
import datasets.Mapillary

PATH = "/fastwork/" + username() + "/mywork/data/mapillary_quarter/"
MAPILLARY_CODE_PATH = datasets.Mapillary.__file__.replace("__init__.py", "")

for subset in ["training", "validation"]:
    with open(MAPILLARY_CODE_PATH + subset, "w") as f:
        instances = glob.glob(PATH + subset + "/instances/*.png")
        for inst in instances:
            x = np.array(Image.open(inst))
            ids = np.unique(x)
            im = inst.replace("/instances/",
                              "/images/").replace(".png", ".jpg")
            print(im.replace(PATH, ""),
                  inst.replace(PATH, ""),
                  file=f,
                  sep=" ",
                  end="")
            for id_ in ids:
                n = (x == id_).sum()
                print(" ", id_, ":", n, file=f, sep="", end="")
            print(file=f)