示例#1
0
def download_coco(path, overwrite=False):
    _DOWNLOAD_URLS = [
        ('http://images.cocodataset.org/zips/train2017.zip',
         '10ad623668ab00c62c096f0ed636d6aff41faca5'),
        ('http://images.cocodataset.org/zips/val2017.zip',
         '4950dc9d00dbe1c933ee0170f5797584351d2a41'),
        ('http://images.cocodataset.org/annotations/annotations_trainval2017.zip',
         '8551ee4bb5860311e79dace7e79cb91e432e78b3'),
        #('http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip',
        # '46cdcf715b6b4f67e980b529534e79c2edffe084'),
        #('http://images.cocodataset.org/zips/test2017.zip',
        # '99813c02442f3c112d491ea6f30cecf421d0e6b3'),
        ('https://hangzh.s3.amazonaws.com/encoding/data/coco/train_ids.pth',
         '12cd266f97c8d9ea86e15a11f11bcb5faba700b6'),
        ('https://hangzh.s3.amazonaws.com/encoding/data/coco/val_ids.pth',
         '4ce037ac33cbf3712fd93280a1c5e92dae3136bb'),
    ]
    mkdir(path)
    for url, checksum in _DOWNLOAD_URLS:
        filename = download(url,
                            path=path,
                            overwrite=overwrite,
                            sha1_hash=checksum)
        # extract
        if os.path.splitext(filename)[1] == '.zip':
            with zipfile.ZipFile(filename) as zf:
                zf.extractall(path=path)
        else:
            shutil.move(
                filename,
                os.path.join(path,
                             'annotations/' + os.path.basename(filename)))
示例#2
0
def download_ade(path, overwrite=False):
    _AUG_DOWNLOAD_URLS = [
        ('http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar',
         'bf9985e9f2b064752bf6bd654d89f017c76c395a'),
        ('https://codalabuser.blob.core.windows.net/public/trainval_merged.json',
         '169325d9f7e9047537fedca7b04de4dddf10b881'),
        # You can skip these if the network is slow, the dataset will automatically generate them.
        ('https://hangzh.s3.amazonaws.com/encoding/data/pcontext/train.pth',
         '4bfb49e8c1cefe352df876c9b5434e655c9c1d07'),
        ('https://hangzh.s3.amazonaws.com/encoding/data/pcontext/val.pth',
         'ebedc94247ec616c57b9a2df15091784826a7b0c'),
    ]
    download_dir = os.path.join(path, 'downloads')
    mkdir(download_dir)
    for url, checksum in _AUG_DOWNLOAD_URLS:
        filename = download(url,
                            path=download_dir,
                            overwrite=overwrite,
                            sha1_hash=checksum)
        # extract
        if os.path.splitext(filename)[1] == '.tar':
            with tarfile.open(filename) as tar:
                tar.extractall(path=path)
        else:
            shutil.move(
                filename,
                os.path.join(path, 'VOCdevkit/VOC2010/' +
                             os.path.basename(filename)))
示例#3
0
def download_aug(path, overwrite=False):
    _AUG_DOWNLOAD_URLS = [(
        'http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz',
        '7129e0a480c2d6afb02b517bb18ac54283bfaa35')]
    download_dir = os.path.join(path, 'downloads')
    mkdir(download_dir)
    for url, checksum in _AUG_DOWNLOAD_URLS:
        filename = download(url,
                            path=download_dir,
                            overwrite=overwrite,
                            sha1_hash=checksum)
        # extract
        with tarfile.open(filename) as tar:
            tar.extractall(path=path)
            shutil.move(os.path.join(path, 'benchmark_RELEASE'),
                        os.path.join(path, 'VOCaug'))
            filenames = ['VOCaug/dataset/train.txt', 'VOCaug/dataset/val.txt']
            # generate trainval.txt
            with open(os.path.join(path, 'VOCaug/dataset/trainval.txt'),
                      'w') as outfile:
                for fname in filenames:
                    fname = os.path.join(path, fname)
                    with open(fname) as infile:
                        for line in infile:
                            outfile.write(line)
def build_rec_process(img_dir, train=False, num_thread=1):
    rec_dir = os.path.abspath(os.path.join(img_dir, '../rec'))
    mkdir(rec_dir)
    prefix = 'train' if train else 'val'
    print('Building ImageRecord file for ' + prefix + ' ...')
    to_path = rec_dir

    # download lst file and im2rec script
    script_path = os.path.join(rec_dir, 'im2rec.py')
    script_url = 'https://raw.githubusercontent.com/apache/incubator-encoding/master/tools/im2rec.py'
    download(script_url, script_path)

    lst_path = os.path.join(rec_dir, prefix + '.lst')
    lst_url = 'http://data.encoding.io/models/imagenet/resnet/' + prefix + '.lst'
    download(lst_url, lst_path)

    # execution
    import sys
    cmd = [
        sys.executable, script_path, rec_dir, img_dir, '--recursive',
        '--pass-through', '--pack-label', '--num-thread',
        str(num_thread)
    ]
    subprocess.call(cmd)
    os.remove(script_path)
    os.remove(lst_path)
    print('ImageRecord file for ' + prefix + ' has been built!')
示例#5
0
def download_minc(path, overwrite=False):
    _AUG_DOWNLOAD_URLS = [
        ('http://opensurfaces.cs.cornell.edu/static/minc/minc-2500.tar.gz', 'bcccbb3b1ab396ef540f024a5ba23eff54f7fe31')]
    download_dir = os.path.join(path, 'downloads')
    mkdir(download_dir)
    for url, checksum in _AUG_DOWNLOAD_URLS:
        filename = download(url, path=download_dir, overwrite=overwrite, sha1_hash=checksum)
        # extract
        with tarfile.open(filename) as tar:
            tar.extractall(path=path)
def download_voc(path, overwrite=False):
    _DOWNLOAD_URLS = [
        ('http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar',
         '4e443f8a2eca6b1dac8a6c57641b67dd40621a49')]
    download_dir = os.path.join(path, 'downloads')
    mkdir(download_dir)
    for url, checksum in _DOWNLOAD_URLS:
        filename = download(url, path=download_dir, overwrite=overwrite, sha1_hash=checksum)
        # extract
        with tarfile.open(filename) as tar:
            tar.extractall(path=path)
def download_ade(path, overwrite=False):
    _AUG_DOWNLOAD_URLS = [
        ('http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip', '219e1696abb36c8ba3a3afe7fb2f4b4606a897c7'),
        ('http://data.csail.mit.edu/places/ADEchallenge/release_test.zip', 'e05747892219d10e9243933371a497e905a4860c'),]
    download_dir = os.path.join(path, 'downloads')
    mkdir(download_dir)
    for url, checksum in _AUG_DOWNLOAD_URLS:
        filename = download(url, path=download_dir, overwrite=overwrite, sha1_hash=checksum)
        # extract
        with zipfile.ZipFile(filename,"r") as zip_ref:
            zip_ref.extractall(path=path)
示例#8
0
def extract_val(tar_fname, target_dir, with_rec=False, num_thread=1):
    mkdir(target_dir)
    print('Extracting ' + tar_fname)
    with tarfile.open(tar_fname) as tar:
        tar.extractall(target_dir)
    # build rec file before images are moved into subfolders
    # move images to proper subfolders
    subprocess.call([
        "wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash"
    ],
                    cwd=target_dir,
                    shell=True)
示例#9
0
def download_city(path, overwrite=False):
    _CITY_DOWNLOAD_URLS = [('gtFine_trainvaltest.zip',
                            '99f532cb1af174f5fcc4c5bc8feea8c66246ddbc'),
                           ('leftImg8bit_trainvaltest.zip',
                            '2c0b77ce9933cc635adda307fbba5566f5d9d404')]
    download_dir = os.path.join(path, 'downloads')
    mkdir(download_dir)
    for filename, checksum in _CITY_DOWNLOAD_URLS:
        if not check_sha1(filename, checksum):
            raise UserWarning('File {} is downloaded but the content hash does not match. ' \
                              'The repo may be outdated or download may be incomplete. ' \
                              'If the "repo_url" is overridden, consider switching to ' \
                              'the default repo.'.format(filename))
        # extract
        with zipfile.ZipFile(filename, "r") as zip_ref:
            zip_ref.extractall(path=path)
        print("Extracted", filename)
def main():
    os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
    logging.basicConfig(level=logging.DEBUG)

    args = get_args()
    mkdir(args.output_folder)

    config_files = get_config_files(args.config_file_folder)
    print(f"len(config_files): {len(config_files)}")

    ngpus = torch.cuda.device_count()
    gpu_manager = GPUManager(ngpus)
    tasks = ([args, gpu_manager, config_file]
             for i, config_file in enumerate(config_files))

    p = MyPool(processes=ngpus)
    p.map(train_network_map, tasks)
示例#11
0
def extract_train(tar_fname, target_dir, with_rec=False, num_thread=1):
    mkdir(target_dir)
    with tarfile.open(tar_fname) as tar:
        print("Extracting " + tar_fname + "...")
        # extract each class one-by-one
        pbar = tqdm(total=len(tar.getnames()))
        for class_tar in tar:
            pbar.set_description('Extract ' + class_tar.name)
            tar.extract(class_tar, target_dir)
            class_fname = os.path.join(target_dir, class_tar.name)
            class_dir = os.path.splitext(class_fname)[0]
            os.mkdir(class_dir)
            with tarfile.open(class_fname) as f:
                f.extractall(class_dir)
            os.remove(class_fname)
            pbar.update(1)
        pbar.close()
def download_coco(path, overwrite=False):
    _DOWNLOAD_URLS = [
        ('http://images.cocodataset.org/zips/train2017.zip',
         '10ad623668ab00c62c096f0ed636d6aff41faca5'),
        ('http://images.cocodataset.org/annotations/annotations_trainval2017.zip',
         '8551ee4bb5860311e79dace7e79cb91e432e78b3'),
        ('http://images.cocodataset.org/zips/val2017.zip',
         '4950dc9d00dbe1c933ee0170f5797584351d2a41')
    ]
    mkdir(path)
    for url, checksum in _DOWNLOAD_URLS:
        filename = download(url,
                            path=path,
                            overwrite=overwrite,
                            sha1_hash=checksum)
        # extract
        with zipfile.ZipFile(filename) as zf:
            zf.extractall(path=path)
示例#13
0
def download_coco(path, overwrite=False):
    _DOWNLOAD_URLS = [
        ('http://images.cocodataset.org/zips/train2017.zip',
         '10ad623668ab00c62c096f0ed636d6aff41faca5'),
        ('http://images.cocodataset.org/annotations/annotations_trainval2017.zip',
         '8551ee4bb5860311e79dace7e79cb91e432e78b3'),
        ('http://images.cocodataset.org/zips/val2017.zip',
         '4950dc9d00dbe1c933ee0170f5797584351d2a41'),
        #('http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip',
        # '46cdcf715b6b4f67e980b529534e79c2edffe084'),
        #('http://images.cocodataset.org/zips/test2017.zip',
        # '99813c02442f3c112d491ea6f30cecf421d0e6b3'),
    ]
    mkdir(path)
    for url, checksum in _DOWNLOAD_URLS:
        filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum)
        # extract
        with zipfile.ZipFile(filename) as zf:
            zf.extractall(path=path)
示例#14
0
def download_coco(path, overwrite=False):
    download_dir = os.path.join(path, 'downloads')
    # _DOWNLOAD_URLS = [
    #     ('http://images.cocodataset.org/zips/train2017.zip',
    #      '10ad623668ab00c62c096f0ed636d6aff41faca5'),
    #     ('http://images.cocodataset.org/annotations/annotations_trainval2017.zip',
    #      '8551ee4bb5860311e79dace7e79cb91e432e78b3'),
    #     ('http://images.cocodataset.org/zips/val2017.zip',
    #      '4950dc9d00dbe1c933ee0170f5797584351d2a41'),
    #     ('http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip',
    #      'e7aa0f7515c07e23873a9f71d9095b06bcea3e12'),
    #     ('http://images.cocodataset.org/zips/test2017.zip',
    #      '99813c02442f3c112d491ea6f30cecf421d0e6b3'),
    # ]
    # mkdir(download_dir)
    # for url, checksum in _DOWNLOAD_URLS:
    #     filename = download(url, path=download_dir, overwrite=overwrite, sha1_hash=checksum)
    #     # extract
    #     with zipfile.ZipFile(filename) as zf:
    #         zf.extractall(path=path)
    _DOWNLOAD_URLS = [
        ('http://images.cocodataset.org/zips/train2017.zip'),
        ('http://images.cocodataset.org/annotations/annotations_trainval2017.zip'
         ),
        ('http://images.cocodataset.org/zips/val2017.zip'),
        ('http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip'
         ),
        ('http://images.cocodataset.org/zips/test2017.zip'),
    ]
    mkdir(download_dir)
    for url in _DOWNLOAD_URLS:
        filename = download(url,
                            path=download_dir,
                            overwrite=overwrite,
                            sha1_hash=None)
        # extract
        with zipfile.ZipFile(filename) as zf:
            zf.extractall(path=path)
def download_ade(path, overwrite=False):
    _AUG_DOWNLOAD_URLS = [
        ('http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar',
         'bf9985e9f2b064752bf6bd654d89f017c76c395a'),
        ('https://codalabuser.blob.core.windows.net/public/trainval_merged.json',
         '169325d9f7e9047537fedca7b04de4dddf10b881')
    ]
    download_dir = os.path.join(path, 'downloads')
    mkdir(download_dir)
    for url, checksum in _AUG_DOWNLOAD_URLS:
        filename = download(url,
                            path=download_dir,
                            overwrite=overwrite,
                            sha1_hash=checksum)
        # extract
        if os.path.splitext(filename)[1] == '.tar':
            with tarfile.open(filename) as tar:
                tar.extractall(path=path)
        else:
            shutil.move(
                filename,
                os.path.join(path, 'VOCdevkit/VOC2010/' +
                             os.path.basename(filename)))
示例#16
0
                filename,
                os.path.join(path, 'VOCdevkit/VOC2010/' +
                             os.path.basename(filename)))


def install_pcontext_api():
    repo_url = "https://github.com/zhanghang1989/detail-api"
    os.system("git clone " + repo_url)
    os.system("cd detail-api/PythonAPI/ && python setup.py install")
    shutil.rmtree('detail-api')
    try:
        import detail
    except Exception:
        print(
            "Installing PASCAL Context API failed, please install it manually %s"
            % (repo_url))


if __name__ == '__main__':
    args = parse_args()
    mkdir(
        os.path.expanduser('/ifs/home/caiyideng_data/lcz/FastFCN-master/data'))
    if args.download_dir is not None:
        if os.path.isdir(_TARGET_DIR):
            os.remove(_TARGET_DIR)
        # make symlink
        os.symlink(args.download_dir, _TARGET_DIR)
    else:
        download_ade(_TARGET_DIR, overwrite=False)
    install_pcontext_api()
示例#17
0
        ('gtFine_trainvaltest.zip', '99f532cb1af174f5fcc4c5bc8feea8c66246ddbc'
         ),
        ('leftImg8bit_trainvaltest.zip',
         '2c0b77ce9933cc635adda307fbba5566f5d9d404')
    ]
    download_dir = os.path.join(path, 'downloads')
    mkdir(download_dir)
    for filename, checksum in _CITY_DOWNLOAD_URLS:
        if not check_sha1(filename, checksum):
            raise UserWarning('File {} is downloaded but the content hash does not match. ' \
                              'The repo may be outdated or download may be incomplete. ' \
                              'If the "repo_url" is overridden, consider switching to ' \
                              'the default repo.'.format(filename))
        # extract
        with zipfile.ZipFile(filename, "r") as zip_ref:
            zip_ref.extractall(path=path)
        print("Extracted", filename)


if __name__ == '__main__':
    args = parse_args()
    mkdir(os.path.expanduser('~/.encoding/data'))
    mkdir(os.path.expanduser('~/.encoding/data/cityscapes'))
    if args.download_dir is not None:
        if os.path.isdir(_TARGET_DIR):
            os.remove(_TARGET_DIR)
        # make symlink
        os.symlink(args.download_dir, _TARGET_DIR)
    else:
        download_city(_TARGET_DIR, overwrite=False)
示例#18
0
            shutil.move(
                filename,
                os.path.join(path, 'VOCdevkit/VOC2010/' +
                             os.path.basename(filename)))


def install_pcontext_api():
    repo_url = "https://github.com/zhanghang1989/detail-api"
    os.system("git clone " + repo_url)
    os.system("cd detail-api/PythonAPI/ && python setup.py install")
    shutil.rmtree('detail-api')
    try:
        import detail
    except Exception:
        print(
            "Installing PASCAL Context API failed, please install it manually %s"
            % (repo_url))


if __name__ == '__main__':
    args = parse_args()
    mkdir(os.path.expanduser('~/.dataset/data'))
    if args.download_dir is not None:
        if os.path.isdir(_TARGET_DIR):
            os.remove(_TARGET_DIR)
        # make symlink
        os.symlink(args.download_dir, _TARGET_DIR)
    else:
        download_ade(_TARGET_DIR, overwrite=False)
    install_pcontext_api()
示例#19
0

def download_ade(path, overwrite=False):
    _AUG_DOWNLOAD_URLS = [
        ('http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip',
         '219e1696abb36c8ba3a3afe7fb2f4b4606a897c7'),
        ('http://data.csail.mit.edu/places/ADEchallenge/release_test.zip',
         'e05747892219d10e9243933371a497e905a4860c'),
    ]
    download_dir = os.path.join(path, 'downloads')
    mkdir(download_dir)
    for url, checksum in _AUG_DOWNLOAD_URLS:
        filename = download(url,
                            path=download_dir,
                            overwrite=overwrite,
                            sha1_hash=checksum)
        # extract
        with zipfile.ZipFile(filename, "r") as zip_ref:
            zip_ref.extractall(path=path)


if __name__ == '__main__':
    args = parse_args()
    mkdir(os.path.expanduser('~/.attention/data'))
    if args.download_dir is not None:
        if os.path.isdir(_TARGET_DIR):
            os.remove(_TARGET_DIR)
        # make symlink
        os.symlink(args.download_dir, _TARGET_DIR)
    else:
        download_ade(_TARGET_DIR, overwrite=False)
示例#20
0
        else:
            shutil.move(
                filename,
                os.path.join(path,
                             'annotations/' + os.path.basename(filename)))


def install_coco_api():
    repo_url = "https://github.com/cocodataset/cocoapi"
    os.system("git clone " + repo_url)
    os.system("cd cocoapi/PythonAPI/ && python setup.py install")
    shutil.rmtree('cocoapi')
    try:
        import pycocotools
    except Exception:
        print("Installing COCO API failed, please install it manually %s" %
              (repo_url))


if __name__ == '__main__':
    args = parse_args()
    mkdir(os.path.expanduser('~/.encoding/data'))
    if args.download_dir is not None:
        if os.path.isdir(_TARGET_DIR):
            os.remove(_TARGET_DIR)
        # make symlink
        os.symlink(args.download_dir, _TARGET_DIR)
    else:
        download_coco(_TARGET_DIR, overwrite=False)
    install_coco_api()
示例#21
0
        ('gtFine_trainvaltest.zip', '99f532cb1af174f5fcc4c5bc8feea8c66246ddbc'
         ),
        ('leftImg8bit_trainvaltest.zip',
         '2c0b77ce9933cc635adda307fbba5566f5d9d404')
    ]
    download_dir = os.path.join(path, 'downloads')
    mkdir(download_dir)
    for filename, checksum in _CITY_DOWNLOAD_URLS:
        if not check_sha1(filename, checksum):
            raise UserWarning('File {} is downloaded but the content hash does not match. ' \
                              'The repo may be outdated or download may be incomplete. ' \
                              'If the "repo_url" is overridden, consider switching to ' \
                              'the default repo.'.format(filename))
        # extract
        with zipfile.ZipFile(filename, "r") as zip_ref:
            zip_ref.extractall(path=path)
        print("Extracted", filename)


if __name__ == '__main__':
    args = parse_args()
    mkdir(os.path.expanduser('~/.attention/data'))
    mkdir(os.path.expanduser('~/.attention/data/cityscapes'))
    if args.download_dir is not None:
        if os.path.isdir(_TARGET_DIR):
            os.remove(_TARGET_DIR)
        # make symlink
        os.symlink(args.download_dir, _TARGET_DIR)
    else:
        download_city(_TARGET_DIR, overwrite=False)