예제 #1
0
    def DownloadAndUnpack(self, archive_dir_path, unpack_dir_path,
                          metadata_dict):

        archives_to_download = [
            'archives_imgjpg_train', 'archives_flow_train',
            'archives_imgjpg_test'
        ]

        for archive in archives_to_download:
            archive_dict = getattr(self, archive)
            for archive_id, gdrive_id in archive_dict.items():
                archive_path = os.path.join(
                    archive_dir_path, archive[9:] + '_' + archive_id + '.zip')
                if not os.path.exists(archive_path):
                    url = 'https://viper-dataset.s3.amazonaws.com/' + gdrive_id
                    download_file_with_resume(url,
                                              archive_path,
                                              try_resume_bytes=-1,
                                              total_sz=None)

                    # DownloadFileFromGDrive(gdrive_id, archive_path)
                    pass
                if os.path.exists(archive_path):
                    print(archive_path)
                    UnzipFile(archive_path, unpack_dir_path, overwrite=False)
                    pass
                pass
            pass
        pass
예제 #2
0
def get_release_scans(release_file):
    dstname = tempfile.mktemp(suffix='_rvc_scannet')
    download_file_with_resume(release_file, dstname)
    with open(dstname, 'r', newline='\n') as release_scan_fo:
        scans = [
            s.rstrip('\n') for s in release_scan_fo.readlines() if len(s) > 3
        ]
    return scans
예제 #3
0
def download_file(url, out_file):
    conn_reset_retry = 3 #needed for some http connection resets; typically at ~828MB
    for _ in range(conn_reset_retry):
        try:
            download_file_with_resume(url, out_file, try_resume_bytes=0, total_sz = None, params={})
        except:
            time.sleep(1.2) #prevent connection overload
            continue
        break
예제 #4
0
    def DownloadAndUnpack(self, archive_dir_path, unpack_dir_path, metadata_dict):
        archives_to_download = ['archives_imgjpg_train', 'archives_inst_train', 'archives_cls_train', 'archives_imgjpg_val', 'archives_inst_val', 'archives_cls_val', 'archives_imgjpg_test']
        
        for archive in archives_to_download:
            archive_dict = getattr(self, archive)
            for archive_id, gdrive_id in archive_dict.items():
                archive_path = os.path.join(archive_dir_path, archive[9:] + '_' + archive_id + '.zip')
                if not os.path.exists(archive_path):

                    url = 'https://viper-dataset.s3.amazonaws.com/' + gdrive_id
                    download_file_with_resume(url, archive_path, try_resume_bytes=-1, total_sz = None)

                    # original viper website (GDrive)
                    # download_file_from_google_drive(gdrive_id, archive_path, try_resume_bytes=-1, total_sz = None)
                    if os.path.exists(archive_path):
                        UnzipFile(archive_path, unpack_dir_path)
                        pass
                    pass
            pass
        pass
예제 #5
0
def DownloadFile(url, dest_dir_path):
    file_name = url.split('/')[-1]
    dest_file_path = os.path.join(dest_dir_path, file_name)
    download_file_with_resume(url, dest_file_path, try_resume_bytes=0)
    return dest_file_path