def stream(): zip = ziputil.ZipGenerator(item['name']) for (path, file) in self.model('item').fileList(item, subpath=False): for data in zip.addFile(file, path): yield data yield zip.footer()
def stream(): zip = ziputil.ZipGenerator(folder['name']) for (path, file) in self.model('folder').fileList( folder, user=user, subpath=False): for data in zip.addFile(file, path): yield data yield zip.footer()
def stream(): z = ziputil.ZipGenerator(dsFolder['name']) # Always add the metadata file for data in z.addFile(makeMetajson, 'meta.json'): yield data if includeMedia: # Add media for item in Folder().childItems( mediaFolder, filters={"lowerName": { "$regex": mediaRegex }}, ): for (path, file) in Item().fileList(item): for data in z.addFile(file, path): yield data break # Media items should only have 1 valid file if includeDetections: # TODO Add back in dump to json # add CSV detections for data in z.addFile(gen, "output_tracks.csv"): yield data yield z.footer()
def stream(): zip = ziputil.ZipGenerator() for (path, file) in model.fileList( doc=resource, user=user, subpath=True): for data in zip.addFile(file, path): yield data yield zip.footer()
def stream(): zip = ziputil.ZipGenerator(collection['name']) for (path, file) in self._model.fileList( collection, user=self.getCurrentUser(), subpath=False, mimeFilter=mimeFilter): for data in zip.addFile(file, path): yield data yield zip.footer()
def stream(): zip = ziputil.ZipGenerator(zip_name) # Add files from the Tale folder for (path, f) in self.model('folder').fileList(folder, user=user, subpath=False): for data in zip.addFile(f, path): yield data # Temporary: Add Image metadata for data in zip.addFile(lambda: image.__str__(), 'image.txt'): yield data # Temporary: Add Recipe metadata for data in zip.addFile(lambda: recipe.__str__(), 'recipe.txt'): yield data # Temporary: Add a zip of the recipe archive # TODO: Grab proper filename from header # e.g. 'Content-Disposition': 'attachment; filename= \ # jupyter-base-b45f9a575602e6038b4da6333f2c3e679ee01c58.tar.gz' for data in zip.addFile(req.iter_content, 'archive.tar.gz'): yield data yield zip.footer()
def _imagesZipGenerator(self, downloadFileName, images, include): datasetCache = {} zipGenerator = ziputil.ZipGenerator(downloadFileName) for image in images: datasetId = image['meta']['datasetId'] if datasetId not in datasetCache: datasetCache[datasetId] = Dataset().load(datasetId, force=True, exc=True) dataset = datasetCache[datasetId] if include in {'all', 'images'}: imageFile = Image().originalFile(image) imageFileGenerator = File().download(imageFile, headers=False) for data in zipGenerator.addFile(imageFileGenerator, path=os.path.join( dataset['name'], imageFile['name'])): yield data if include in {'all', 'metadata'}: def metadataGenerator(): # TODO: Consider replacing this with Image().filter yield json.dumps({ '_id': str(image['_id']), 'name': image['name'], 'meta': { 'acquisition': image['meta']['acquisition'], 'clinical': image['meta']['clinical'] } }) for data in zipGenerator.addFile( metadataGenerator, path=os.path.join(dataset['name'], '%s.json' % image['name'])): yield data for dataset in six.viewvalues(datasetCache): licenseText = mail_utils.renderTemplate('license_%s.mako' % dataset['license']) attributionText = mail_utils.renderTemplate( 'attribution_%s.mako' % dataset['license'], { 'work': dataset['name'], 'author': dataset['attribution'] }) for data in zipGenerator.addFile(lambda: [licenseText], path=os.path.join( dataset['name'], 'LICENSE.txt')): yield data for data in zipGenerator.addFile(lambda: [attributionText], path=os.path.join( dataset['name'], 'ATTRIBUTION.txt')): yield data yield zipGenerator.footer()
def stream(): zip = ziputil.ZipGenerator(item['name']) for file in self.model('item').childFiles(item=item, limit=0): for data in zip.addFile(self.model('file') .download(file, headers=False), file['name']): yield data yield zip.footer()
def stream(): z = ziputil.ZipGenerator(folder['name']) for (path, file) in Folder().fileList(folder, user=user, subpath=False): for data in z.addFile(file, path): yield data for data in z.addFile(gen, "output_tracks.csv"): yield data yield z.footer()
def stream(): zip = ziputil.ZipGenerator(folder['name']) for data in zip.addFile(DetectionResource.generateKPFContent(folder), folder['name'] + '.geom.kpf'): yield data for data in zip.addFile(TypesResource.generateKPFContent(folder), folder['name'] + '.types.kpf'): yield data for data in zip.addFile(ActivitiesResource.generateKPFContent(folder), folder['name'] + '.activities.kpf'): yield data yield zip.footer()
def stream(): zip = ziputil.ZipGenerator() for kind in resources: model = ModelImporter.model(kind) for id in resources[kind]: doc = model.load(id=id, user=user, level=AccessType.READ) for (path, file) in model.fileList( doc=doc, user=user, includeMetadata=includeMetadata, subpath=True): for data in zip.addFile(file, path): yield data yield zip.footer()
def stream(): def recursive_file_list(p): for obj in p.iterdir(): if obj.is_file(): yield obj elif obj.is_dir(): yield from recursive_file_list(obj) zip_stream = ziputil.ZipGenerator(rootPath="") for obj in recursive_file_list(path): zip_path = os.path.relpath(obj.as_posix(), path.as_posix()) for data in zip_stream.addFile(lambda: file_stream(obj), zip_path): yield data yield zip_stream.footer()
def __init__(self, tale, user, algs=None, expand_folders=False): if algs is None: self.algs = ["md5", "sha256"] self.tale = tale self.user = user self.image = Image().load( tale['imageId'], user=user, fields=['config', 'description', 'icon', 'iframe', 'name', 'tags'], level=AccessType.READ, ) self.image.pop('_id') self.workspace = Folder().load(tale['workspaceId'], user=user, level=AccessType.READ) self.manifest = Manifest(tale, user, expand_folders).manifest self.zip_generator = ziputil.ZipGenerator(str(tale['_id'])) self.tale_license = WholeTaleLicense().license_from_spdx( tale.get('licenseSPDX', WholeTaleLicense.default_spdx())) self.state = {} for alg in self.algs: self.state[alg] = []
def stream(): zip = ziputil.ZipGenerator() for (path, file) in list_simulation_assets(user, simulation): for data in zip.addFile(file, path): yield data yield zip.footer()
def _imagesZipGenerator(self, downloadFileName, images, include): # noqa C901 datasetCache = {} zipGenerator = ziputil.ZipGenerator(downloadFileName) if include in {'all', 'metadata'}: metadataFieldnames = ['_id', 'name'] for k in sorted([ 'age_approx', 'anatom_site_general', 'benign_malignant', 'clin_size_long_diam_mm', 'diagnosis', 'diagnosis_confirm_type', 'family_hx_mm', 'lesion_id', 'mel_class', 'mel_mitotic_index', 'mel_thick_mm', 'mel_type', 'mel_ulcer', 'melanocytic', 'nevus_type', 'patient_id', 'personal_hx_mm', 'sex', ]): metadataFieldnames.append(f'meta.clinical.{k}') for k in sorted([ 'acquisition_day', 'blurry', 'color_tint', 'dermoscopic_type', 'hairy', 'image_type', 'marker_pen', 'pixelsX', 'pixelsY', ]): metadataFieldnames.append(f'meta.acquisition.{k}') csvStream = io.StringIO() csvWriter = csv.DictWriter(csvStream, metadataFieldnames) csvWriter.writeheader() for image in images: datasetId = image['meta']['datasetId'] if datasetId not in datasetCache: datasetCache[datasetId] = Dataset().load(datasetId, force=True, exc=True) dataset = datasetCache[datasetId] if include in {'all', 'images'}: imageFile = Image().originalFile(image) imageFileGenerator = File().download(imageFile, headers=False) for data in zipGenerator.addFile(imageFileGenerator, path=os.path.join( dataset['name'], imageFile['name'])): yield data if include in {'all', 'metadata'}: flattenedMetadata = { '_id': str(image['_id']), 'name': image['name'] } for metaType in ['acquisition', 'clinical']: for k, v in image['meta'][metaType].items(): flattenedMetadata[f'meta.{metaType}.{k}'] = v csvWriter.writerow(flattenedMetadata) if include in {'all', 'metadata'}: csvStream.seek(0) csvStreamChunks = iter(csvStream.readline, '') for data in zipGenerator.addFile( # Girder expects a callable function instead of an actual generator lambda: csvStreamChunks, path='metadata.csv'): yield data for dataset in datasetCache.values(): licenseText = mail_utils.renderTemplate( f'license_{dataset["license"]}.mako') attributionText = mail_utils.renderTemplate( f'attribution_{dataset["license"]}.mako', { 'work': dataset['name'], 'author': dataset['attribution'] }) for data in zipGenerator.addFile(lambda: [licenseText], path=os.path.join( dataset['name'], 'LICENSE.txt')): yield data for data in zipGenerator.addFile(lambda: [attributionText], path=os.path.join( dataset['name'], 'ATTRIBUTION.txt')): yield data yield zipGenerator.footer()