Пример #1
0
    def _download(self, root_dir):
        if not osp.isdir(root_dir):
            os.makedirs(root_dir)
        elif len(os.listdir(root_dir)) > 100:
            ops.sys_print('Files already downloaded.')
            return

        url = 'http://www.dabi.temple.edu/~hbling/data/TColor-128/Temple-color-128.zip'
        zip_file = osp.join(root_dir, 'Temple-color-128.zip')
        ops.sys_print('Downloading to %s...' % zip_file)
        ops.download(url, zip_file)
        ops.sys_print('\nExtracting to %s...' % root_dir)
        ops.extract(zip_file, root_dir)

        return root_dir
Пример #2
0
    def _download(self, root_dir, version):
        assert version in self.__version_dict
        seq_names = self.__version_dict[version]

        if not osp.isdir(root_dir):
            os.makedirs(root_dir)
        elif all([osp.isdir(osp.join(root_dir, s)) for s in seq_names]):
            ops.sys_print('Files already downloaded.')
            return

        url_fmt = 'http://cvlab.hanyang.ac.kr/tracker_benchmark/seq/%s.zip'
        for seq_name in seq_names:
            seq_dir = osp.join(root_dir, seq_name)
            if osp.isdir(seq_dir):
                continue
            url = url_fmt % seq_name
            zip_file = osp.join(root_dir, seq_name + '.zip')
            ops.sys_print('Downloading to %s...' % zip_file)
            ops.download(url, zip_file)
            ops.sys_print('\nExtracting to %s...' % root_dir)
            ops.extract(zip_file, root_dir)

        return root_dir
Пример #3
0
    def _download(self, root_dir, version):
        assert version in self.__valid_versions

        if not osp.isdir(root_dir):
            os.makedirs(root_dir)
        elif osp.isfile(osp.join(root_dir, 'list.txt')):
            with open(osp.join(root_dir, 'list.txt')) as f:
                seq_names = f.read().strip().split('\n')
            if all([osp.isdir(osp.join(root_dir, s)) for s in seq_names]):
                ops.sys_print('Files already downloaded.')
                return

        url = 'http://data.votchallenge.net/'
        if version in range(2013, 2015 + 1):
            # main challenge (2013~2015)
            homepage = url + 'vot{}/dataset/'.format(version)
        elif version in range(2015, 2019 + 1):
            # main challenge (2016~2019)
            homepage = url + 'vot{}/main/'.format(version)
        elif version.startswith('LT'):
            # long-term tracking challenge
            year = int(version[2:])
            homepage = url + 'vot{}/longterm/'.format(year)
        elif version.startswith('RGBD'):
            # RGBD tracking challenge
            year = int(version[4:])
            homepage = url + 'vot{}/rgbd/'.format(year)
        elif version.startswith('RGBT'):
            # RGBT tracking challenge
            year = int(version[4:])
            url = url + 'vot{}/rgbtir/'.format(year)
            homepage = url + 'meta/'

        # download description file
        bundle_url = homepage + 'description.json'
        bundle_file = osp.join(root_dir, 'description.json')
        if not osp.isfile(bundle_file):
            ops.sys_print('Downloading description file...')
            ops.download(bundle_url, bundle_file)

        # read description file
        ops.sys_print('\nParsing description file...')
        with open(bundle_file) as f:
            bundle = json.load(f)

        # md5 generator
        def md5(filename):
            hash_md5 = hashlib.md5()
            with open(filename, 'rb') as f:
                for chunk in iter(lambda: f.read(4096), b""):
                    hash_md5.update(chunk)
            return hash_md5.hexdigest()

        # download all sequences
        seq_names = []
        for seq in bundle['sequences']:
            seq_name = seq['name']
            seq_names.append(seq_name)

            # download channel (color/depth/ir) files
            channels = seq['channels'].keys()
            seq_files = []
            for cn in channels:
                seq_url = seq['channels'][cn]['url']
                if not seq_url.startswith(('http', 'https')):
                    seq_url = url + seq_url[seq_url.find('sequence'):]
                seq_file = osp.join(root_dir, '{}_{}.zip'.format(seq_name, cn))
                if not osp.isfile(seq_file) or \
                    md5(seq_file) != seq['channels'][cn]['checksum']:
                    ops.sys_print('\nDownloading %s...' % seq_name)
                    ops.download(seq_url, seq_file)
                seq_files.append(seq_file)

            # download annotations
            anno_url = homepage + '%s.zip' % seq_name
            anno_file = osp.join(root_dir, seq_name + '_anno.zip')
            if not osp.isfile(anno_file) or \
                md5(anno_file) != seq['annotations']['checksum']:
                ops.download(anno_url, anno_file)

            # unzip compressed files
            seq_dir = osp.join(root_dir, seq_name)
            if not osp.isfile(seq_dir) or len(os.listdir(seq_dir)) < 10:
                ops.sys_print('\nExtracting %s...' % seq_name)
                os.makedirs(seq_dir)
                for seq_file in seq_files:
                    ops.extract(seq_file, seq_dir)
                ops.extract(anno_file, seq_dir)

        # save list.txt
        list_file = osp.join(root_dir, 'list.txt')
        with open(list_file, 'w') as f:
            f.write(str.join('\n', seq_names))

        return root_dir