예제 #1
0
 def get_chunk(part_id_to_get, start, end):
     do_debug(
         "dxdatabase_functions.py get_chunk - start {}, end {}, part id {}".
         format(start, end, part_id_to_get))
     url, headers = dxdatabase.get_download_url(src_filename=src_filename,
                                                project=project,
                                                **kwargs)
     # No sub ranges for database file downloads
     sub_range = False
     data_url = dxpy._dxhttp_read_range(url, headers, start, end,
                                        FILE_REQUEST_TIMEOUT, sub_range)
     do_debug("dxdatabase_functions.py get_chunk - data_url = {}".format(
         data_url))
     # 'data_url' is the s3 URL, so read again, just like in DNAxFileSystem
     data = dxpy._dxhttp_read_range(data_url, headers, start, end,
                                    FILE_REQUEST_TIMEOUT, sub_range)
     return part_id_to_get, data
예제 #2
0
 def get_chunk(part_id_to_get, start, end):
     url, headers = dxfile.get_download_url(project=project, **kwargs)
     # If we're fetching the whole object in one shot, avoid setting the Range header to take advantage of gzip
     # transfer compression
     sub_range = False
     if len(parts) > 1 or (start > 0) or (end - start + 1 < parts[part_id_to_get]["size"]):
         sub_range = True
     data = dxpy._dxhttp_read_range(url, headers, start, end, FILE_REQUEST_TIMEOUT, sub_range)
     return part_id_to_get, data
예제 #3
0
 def get_chunk(part_id_to_get, start, end):
     url, headers = dxfile.get_download_url(project=project, **kwargs)
     # If we're fetching the whole object in one shot, avoid setting the Range header to take advantage of gzip
     # transfer compression
     sub_range = False
     if len(parts) > 1 or (start > 0) or (end - start + 1 < parts[part_id_to_get]["size"]):
         sub_range = True
     data = dxpy._dxhttp_read_range(url, headers, start, end, FILE_REQUEST_TIMEOUT, sub_range)
     return part_id_to_get, data