Example #1
0
def download_dataset(path: str, otb50: bool = False):

    from vot.utilities.net import download_uncompress, join_url, NetworkException

    dataset = _SEQUENCES

    if otb50:
        dataset = {k: v for k, v in dataset.items() if k in _OTB50_SUBSET}

    with Progress("Downloading", len(dataset)) as progress:
        for name, metadata in dataset.items():
            name = metadata.get("base", name)
            if not os.path.isdir(os.path.join(path, name)):
                try:
                    download_uncompress(join_url(_BASE_URL, "%s.zip" % name),
                                        path)
                except NetworkException as ex:
                    raise DatasetException(
                        "Unable do download sequence data") from ex
                except IOError as ex:
                    raise DatasetException(
                        "Unable to extract sequence data, is the target directory writable and do you have enough space?"
                    ) from ex

            progress.relative(1)
Example #2
0
    def _read(self):

        channels = {}
        tags = {}
        values = {}
        groundtruth = []

        channels["color"] = load_channel(os.path.join(self._base, "%08d.jpg"))
        self._metadata["channel.default"] = "color"
        self._metadata["width"], self._metadata["height"] = six.next(
            six.itervalues(channels)).size

        groundtruth_file = os.path.join(
            self._base, self.metadata("groundtruth", "groundtruth.txt"))

        with open(groundtruth_file, 'r') as filehandle:
            for region in filehandle.readlines():
                groundtruth.append(parse(region))

        self._metadata["length"] = len(groundtruth)

        tagfiles = glob.glob(os.path.join(self._base, '*.label'))

        for tagfile in tagfiles:
            with open(tagfile, 'r') as filehandle:
                tagname = os.path.splitext(os.path.basename(tagfile))[0]
                tag = [line.strip() == "1" for line in filehandle.readlines()]
                while not len(tag) >= len(groundtruth):
                    tag.append(False)
                tags[tagname] = tag

        valuefiles = glob.glob(os.path.join(self._base, '*.value'))

        for valuefile in valuefiles:
            with open(valuefile, 'r') as filehandle:
                valuename = os.path.splitext(os.path.basename(valuefile))[0]
                value = [
                    float(line.strip()) for line in filehandle.readlines()
                ]
                while not len(value) >= len(groundtruth):
                    value.append(0.0)
                values[valuename] = value

        for name, channel in channels.items():
            if not channel.length == len(groundtruth):
                raise DatasetException("Length mismatch for channel %s" % name)

        for name, tag in tags.items():
            if not len(tag) == len(groundtruth):
                tag_tmp = len(groundtruth) * [False]
                tag_tmp[:len(tag)] = tag
                tag = tag_tmp

        for name, value in values.items():
            if not len(value) == len(groundtruth):
                raise DatasetException("Length mismatch for value %s" % name)

        return channels, groundtruth, tags, values
Example #3
0
    def _read(self):

        channels = {}
        groundtruth = []

        metadata = _SEQUENCES[self.name]

        channels["color"] = PatternFileListChannel(
            os.path.join(self._base, "img", "%04d.jpg"),
            start=metadata.get("start", 1),
            end=metadata.get("end", None))

        self._metadata["channel.default"] = "color"
        self._metadata["width"], self._metadata["height"] = six.next(
            six.itervalues(channels)).size

        groundtruth_file = os.path.join(self._base, "groundtruth_rect.txt")

        with open(groundtruth_file, 'r') as filehandle:
            for region in filehandle.readlines():
                groundtruth.append(parse(region))

        self._metadata["length"] = len(groundtruth)

        if not channels["color"].length == len(groundtruth):
            raise DatasetException(
                "Length mismatch between groundtruth and images")

        return channels, groundtruth, {}, {}
Example #4
0
    def __init__(self, path, splits=False):
        super().__init__(path)

        if not splits and not TrackingNetDataset.check(path):
            raise DatasetException(
                "Unsupported dataset format, expected TrackingNet")

        sequences = []
        if not splits:
            for file in glob.glob(os.path.join(path, "anno", "*.txt")):
                sequences.append(file)
        else:
            # Special mode to load all training splits
            for split in ["TRAIN_%d" % i for i in range(0, 12)]:
                for file in glob.glob(
                        os.path.join(path, split, "anno", "*.txt")):
                    sequences.append(file)

        self._sequences = OrderedDict()

        with Progress("Loading dataset", len(sequences)) as progress:
            for sequence in sequences:
                name = os.path.splitext(os.path.basename(sequence))[0]
                self._sequences[name] = TrackingNetSequence(sequence,
                                                            dataset=self)
                progress.relative(1)
Example #5
0
def convert_frame(frame: Frame, channels: list) -> dict:
    tlist = dict()

    for channel in channels:
        image = frame.filename(channel)
        if image is None:
            raise DatasetException("Frame does not have information for channel: {}".format(channel))

        tlist[channel] = FileImage.create(image)

    return tlist
Example #6
0
    def __init__(self, path):
        super().__init__(path)

        if not os.path.isfile(os.path.join(path, "list.txt")):
            raise DatasetException("Dataset not available locally")

        with open(os.path.join(path, "list.txt"), 'r') as fd:
            names = fd.readlines()
        self._sequences = {
            name.strip(): VOTSequence(os.path.join(path, name.strip()),
                                      dataset=self)
            for name in Progress(
                names, desc="Loading dataset", unit="sequences")
        }
Example #7
0
    def __init__(self, path):
        super().__init__(path)

        if not os.path.isfile(os.path.join(path, "list.txt")):
            raise DatasetException("Dataset not available locally")

        with open(os.path.join(path, "list.txt"), 'r') as fd:
            names = fd.readlines()

        self._sequences = OrderedDict()

        with Progress("Loading dataset", len(names)) as progress:

            for name in names:
                self._sequences[name.strip()] = VOTSequence(os.path.join(
                    path, name.strip()),
                                                            dataset=self)
                progress.relative(1)
Example #8
0
    def __init__(self, path, sequence_list="list.txt"):
        super().__init__(path)

        if not os.path.isabs(sequence_list):
            sequence_list = os.path.join(path, sequence_list)

        if not os.path.isfile(sequence_list):
            raise DatasetException("Sequence list does not exist")

        with open(sequence_list, 'r') as handle:
            names = handle.readlines()

        self._sequences = OrderedDict()

        with Progress("Loading dataset", len(names)) as progress:

            for name in names:
                self._sequences[name.strip()] = GOT10kSequence(os.path.join(
                    path, name.strip()),
                                                               dataset=self)
                progress.relative(1)
Example #9
0
    def download(self, url, path="."):
        from vot.utilities.net import download_uncompress, download_json, get_base_url, join_url, NetworkException

        if os.path.splitext(url)[1] == '.zip':
            logger.info(
                'Downloading sequence bundle from "%s". This may take a while ...',
                url)

            try:
                download_uncompress(url, path)
            except NetworkException as e:
                raise DatasetException(
                    "Unable do download dataset bundle, Please try to download the bundle manually from {} and uncompress it to {}'"
                    .format(url, path))
            except IOError as e:
                raise DatasetException(
                    "Unable to extract dataset bundle, is the target directory writable and do you have enough space?"
                )

        else:

            meta = download_json(url)

            logger.info('Downloading sequence dataset "%s" with %s sequences.',
                        meta["name"], len(meta["sequences"]))

            base_url = get_base_url(url) + "/"

            with Progress("Donwloading", len(meta["sequences"])) as progress:
                for sequence in meta["sequences"]:
                    sequence_directory = os.path.join(path, sequence["name"])
                    os.makedirs(sequence_directory, exist_ok=True)

                    data = {
                        'name': sequence["name"],
                        'fps': sequence["fps"],
                        'format': 'default'
                    }

                    annotations_url = join_url(base_url,
                                               sequence["annotations"]["url"])

                    try:
                        download_uncompress(annotations_url,
                                            sequence_directory)
                    except NetworkException as e:
                        raise DatasetException(
                            "Unable do download annotations bundle")
                    except IOError as e:
                        raise DatasetException(
                            "Unable to extract annotations bundle, is the target directory writable and do you have enough space?"
                        )

                    for cname, channel in sequence["channels"].items():
                        channel_directory = os.path.join(
                            sequence_directory, cname)
                        os.makedirs(channel_directory, exist_ok=True)

                        channel_url = join_url(base_url, channel["url"])

                        try:
                            download_uncompress(channel_url, channel_directory)
                        except NetworkException as e:
                            raise DatasetException(
                                "Unable do download channel bundle")
                        except IOError as e:
                            raise DatasetException(
                                "Unable to extract channel bundle, is the target directory writable and do you have enough space?"
                            )

                        if "pattern" in channel:
                            data["channels." +
                                 cname] = cname + os.path.sep + channel[
                                     "pattern"]
                        else:
                            data["channels." + cname] = cname + os.path.sep

                    write_properties(
                        os.path.join(sequence_directory, 'sequence'), data)

                    progress.relative(1)

            with open(os.path.join(path, "list.txt"), "w") as fp:
                for sequence in meta["sequences"]:
                    fp.write('{}\n'.format(sequence["name"]))
Example #10
0
    def _read(self):

        channels = {}
        tags = {}
        values = {}
        groundtruth = []

        channels["color"] = load_channel(os.path.join(self._base, "%08d.jpg"))
        self._metadata["channel.default"] = "color"
        self._metadata["width"], self._metadata["height"] = six.next(
            six.itervalues(channels)).size

        groundtruth_file = os.path.join(
            self._base, self.metadata("groundtruth", "groundtruth.txt"))
        groundtruth = read_trajectory(groundtruth_file)

        if len(groundtruth) == 1 and channels["color"].length > 1:
            # We are dealing with testing dataset, only first frame is available, so we pad the
            # groundtruth with unknowns. Only unsupervised experiment will work, but it is ok
            groundtruth.extend([Special(Special.UNKNOWN)] *
                               (channels["color"].length - 1))

        self._metadata["length"] = len(groundtruth)

        tagfiles = glob.glob(os.path.join(self._base, '*.label'))

        for tagfile in tagfiles:
            with open(tagfile, 'r') as filehandle:
                tagname = os.path.splitext(os.path.basename(tagfile))[0]
                tag = [line.strip() == "1" for line in filehandle.readlines()]
                while not len(tag) >= len(groundtruth):
                    tag.append(False)
                tags[tagname] = tag

        valuefiles = glob.glob(os.path.join(self._base, '*.value'))

        for valuefile in valuefiles:
            with open(valuefile, 'r') as filehandle:
                valuename = os.path.splitext(os.path.basename(valuefile))[0]
                value = [
                    float(line.strip()) for line in filehandle.readlines()
                ]
                while not len(value) >= len(groundtruth):
                    value.append(0.0)
                values[valuename] = value

        for name, channel in channels.items():
            if not channel.length == len(groundtruth):
                raise DatasetException("Length mismatch for channel %s" % name)

        for name, tag in tags.items():
            if not len(tag) == len(groundtruth):
                tag_tmp = len(groundtruth) * [False]
                tag_tmp[:len(tag)] = tag
                tag = tag_tmp

        for name, value in values.items():
            if not len(value) == len(groundtruth):
                raise DatasetException("Length mismatch for value %s" % name)

        return channels, groundtruth, tags, values
Example #11
0
    def _read(self):

        channels = {}
        tags = {}
        values = {}
        groundtruth = []

        for c in ["color", "depth", "ir"]:
            channel_path = self.metadata("channels.%s" % c, None)
            if not channel_path is None:
                channels[c] = load_channel(
                    os.path.join(self._base, localize_path(channel_path)))

        # Load default channel if no explicit channel data available
        if len(channels) == 0:
            channels["color"] = load_channel(
                os.path.join(self._base, "color", "%08d.jpg"))
        else:
            self._metadata["channel.default"] = next(iter(channels.keys()))

        self._metadata["width"], self._metadata["height"] = six.next(
            six.itervalues(channels)).size

        groundtruth_file = os.path.join(
            self._base, self.metadata("groundtruth", "groundtruth.txt"))
        groundtruth = read_trajectory(groundtruth_file)

        self._metadata["length"] = len(groundtruth)

        tagfiles = glob.glob(os.path.join(self._base, '*.tag')) + glob.glob(
            os.path.join(self._base, '*.label'))

        for tagfile in tagfiles:
            with open(tagfile, 'r') as filehandle:
                tagname = os.path.splitext(os.path.basename(tagfile))[0]
                tag = [line.strip() == "1" for line in filehandle.readlines()]
                while not len(tag) >= len(groundtruth):
                    tag.append(False)
                tags[tagname] = tag

        valuefiles = glob.glob(os.path.join(self._base, '*.value'))

        for valuefile in valuefiles:
            with open(valuefile, 'r') as filehandle:
                valuename = os.path.splitext(os.path.basename(valuefile))[0]
                value = [
                    float(line.strip()) for line in filehandle.readlines()
                ]
                while not len(value) >= len(groundtruth):
                    value.append(0.0)
                values[valuename] = value

        for name, channel in channels.items():
            if not channel.length == len(groundtruth):
                raise DatasetException(
                    "Length mismatch for channel %s (%d != %d)" %
                    (name, channel.length, len(groundtruth)))

        for name, tag in tags.items():
            if not len(tag) == len(groundtruth):
                tag_tmp = len(groundtruth) * [False]
                tag_tmp[:len(tag)] = tag
                tag = tag_tmp

        for name, value in values.items():
            if not len(value) == len(groundtruth):
                raise DatasetException("Length mismatch for value %s" % name)

        return channels, groundtruth, tags, values
Example #12
0
    def _scan(self, base):

        metadata_file = os.path.join(base, 'sequence')
        data = read_properties(metadata_file)
        for c in ["color", "depth", "ir"]:
            if "channels.%s" % c in data:
                self._channels[c] = load_channel(
                    os.path.join(self._base,
                                 localize_path(data["channels.%s" % c])))

        # Load default channel if no explicit channel data available
        if len(self._channels) == 0:
            self._channels["color"] = load_channel(
                os.path.join(self._base, "color", "%08d.jpg"))
        else:
            self._metadata["channel.default"] = next(
                iter(self._channels.keys()))

        self._metadata["width"], self._metadata["height"] = six.next(
            six.itervalues(self._channels)).size

        groundtruth_file = os.path.join(
            self._base, data.get("groundtruth", "groundtruth.txt"))

        with open(groundtruth_file, 'r') as groundtruth:
            for region in groundtruth.readlines():
                self._groundtruth.append(parse(region))

        self._metadata["length"] = len(self._groundtruth)

        tagfiles = glob.glob(os.path.join(self._base, '*.tag')) + glob.glob(
            os.path.join(self._base, '*.label'))

        for tagfile in tagfiles:
            with open(tagfile, 'r') as filehandle:
                tagname = os.path.splitext(os.path.basename(tagfile))[0]
                tag = [line.strip() == "1" for line in filehandle.readlines()]
                while not len(tag) >= len(self._groundtruth):
                    tag.append(False)
                self._tags[tagname] = tag

        valuefiles = glob.glob(os.path.join(self._base, '*.value'))

        for valuefile in valuefiles:
            with open(valuefile, 'r') as filehandle:
                valuename = os.path.splitext(os.path.basename(valuefile))[0]
                value = [
                    float(line.strip()) for line in filehandle.readlines()
                ]
                while not len(value) >= len(self._groundtruth):
                    value.append(0.0)
                self._values[valuename] = value

        for name, channel in self._channels.items():
            if not channel.length == len(self._groundtruth):
                raise DatasetException("Length mismatch for channel %s" % name)

        for name, tags in self._tags.items():
            if not len(tags) == len(self._groundtruth):
                tag_tmp = len(self._groundtruth) * [False]
                tag_tmp[:len(tags)] = tags
                tags = tag_tmp

        for name, values in self._values.items():
            if not len(values) == len(self._groundtruth):
                raise DatasetException("Length mismatch for value %s" % name)