def saveAsBinarySeries(self, outputDirPath, overwrite=False): """Writes out Series-formatted data. Subclasses are *not* expected to override this method. Parameters ---------- outputdirname : string path or URI to directory to be created Output files will be written underneath outputdirname. This directory must not yet exist (unless overwrite is True), and must be no more than one level beneath an existing directory. It will be created as a result of this call. overwrite : bool If true, outputdirname and all its contents will be deleted and recreated as part of this call. """ from thunder.rdds.fileio.writers import getParallelWriterForPath from thunder.rdds.fileio.seriesloader import writeSeriesConfig if not overwrite: from thunder.utils.common import raiseErrorIfPathExists raiseErrorIfPathExists(outputDirPath) overwrite = True # prevent additional downstream checks for this path writer = getParallelWriterForPath(outputDirPath)(outputDirPath, overwrite=overwrite) binseriesRdd = self.toBinarySeries() binseriesRdd.foreach(writer.writerFcn) writeSeriesConfig(outputDirPath, len(self.dims), self.nimages, keyType='int16', valueType=self.dtype, overwrite=overwrite)
def saveAsBinarySeries(self, outputDirPath, overwrite=False): """Writes out Series-formatted data. Subclasses are *not* expected to override this method. Parameters ---------- outputdirname : string path or URI to directory to be created Output files will be written underneath outputdirname. This directory must not yet exist (unless overwrite is True), and must be no more than one level beneath an existing directory. It will be created as a result of this call. overwrite : bool If true, outputdirname and all its contents will be deleted and recreated as part of this call. """ from thunder.rdds.fileio.writers import getParallelWriterForPath from thunder.rdds.fileio.seriesloader import writeSeriesConfig from thunder.utils.aws import AWSCredentials if not overwrite: self._checkOverwrite(outputDirPath) overwrite = True # prevent additional downstream checks for this path awsCredentialsOverride = AWSCredentials.fromContext(self.rdd.ctx) writer = getParallelWriterForPath(outputDirPath)(outputDirPath, overwrite=overwrite, awsCredentialsOverride=awsCredentialsOverride) binseriesRdd = self.toBinarySeries() binseriesRdd.foreach(writer.writerFcn) writeSeriesConfig(outputDirPath, len(self.dims), self.nimages, keyType='int16', valueType=self.dtype, overwrite=overwrite, awsCredentialsOverride=awsCredentialsOverride)
def saveAsBinarySeries(self, outputdirname, blockSize="150M", splitsPerDim=None, groupingDim=None, overwrite=False): """Writes Image into files on a local filesystem, suitable for loading by SeriesLoader.fromBinary() The mount point specified by outputdirname must be visible to all workers; thus this method is primarily useful either when Spark is being run locally or in the presence of an NFS mount or similar shared filesystem. Parameters ---------- outputdirname : string path or URI to directory to be created Output files will be written underneath outputdirname. This directory must not yet exist (unless overwrite is True), and must be no more than one level beneath an existing directory. It will be created as a result of this call. blockSize : positive int or string Requests a particular size for individual output files; see toSeries() splitsPerDim : n-tuple of positive int Specifies that output files are to be generated by splitting the i-th dimension of the image into splitsPerDim[i] roughly equally-sized partitions; see toSeries() groupingDim : nonnegative int, 0 <= groupingDim <= len(self.dims) Specifies that intermediate blocks are to be generated by splitting the image into "planes" of dimensionality len(self.dims) - 1, along the dimension given by self.dims[groupingDim]; see toSeries() overwrite : bool If true, outputdirname and all its contents will be deleted and recreated as part of this call. """ from thunder.rdds.fileio.writers import getParallelWriterForPath from thunder.rdds.fileio.seriesloader import writeSeriesConfig writer = getParallelWriterForPath(outputdirname)(outputdirname, overwrite=overwrite) blocksdata = self._scatterToBlocks(blockSize=blockSize, blocksPerDim=splitsPerDim, groupingDim=groupingDim) binseriesrdd = blocksdata.toBinarySeries(seriesDim=0) def appendBin(kv): binlabel, binvals = kv return binlabel + '.bin', binvals binseriesrdd.map(appendBin).foreach(writer.writerFcn) writeSeriesConfig(outputdirname, len(self.dims), self.nimages, dims=self.dims.count, keytype='int16', valuetype=self.dtype, overwrite=overwrite)
def saveAsBinarySeries(self, outputdirname, blockSize="150M", splitsPerDim=None, groupingDim=None, overwrite=False): """Writes Image into files on a local filesystem, suitable for loading by SeriesLoader.fromBinary() The mount point specified by outputdirname must be visible to all workers; thus this method is primarily useful either when Spark is being run locally or in the presence of an NFS mount or similar shared filesystem. Parameters ---------- outputdirname : string path or URI to directory to be created Output files will be written underneath outputdirname. This directory must not yet exist (unless overwrite is True), and must be no more than one level beneath an existing directory. It will be created as a result of this call. blockSize : positive int or string Requests a particular size for individual output files; see toSeries() splitsPerDim : n-tuple of positive int Specifies that output files are to be generated by splitting the i-th dimension of the image into splitsPerDim[i] roughly equally-sized partitions; see toSeries() groupingDim : nonnegative int, 0 <= groupingDim <= len(self.dims) Specifies that intermediate blocks are to be generated by splitting the image into "planes" of dimensionality len(self.dims) - 1, along the dimension given by self.dims[groupingDim]; see toSeries() overwrite : bool If true, outputdirname and all its contents will be deleted and recreated as part of this call. """ from thunder.rdds.fileio.writers import getParallelWriterForPath from thunder.rdds.fileio.seriesloader import writeSeriesConfig writer = getParallelWriterForPath(outputdirname)(outputdirname, overwrite=overwrite) blocksdata = self._scatterToBlocks(blockSize=blockSize, blocksPerDim=splitsPerDim, groupingDim=groupingDim) binseriesrdd = blocksdata.toBinarySeries(seriesDim=0) def appendBin(kv): binlabel, binvals = kv return binlabel+'.bin', binvals binseriesrdd.map(appendBin).foreach(writer.writerFcn) writeSeriesConfig(outputdirname, len(self.dims), self.nimages, dims=self.dims.count, keytype='int16', valuetype=self.dtype, overwrite=overwrite)
def saveAsBinarySeries(self, outputDirPath, overwrite=False): """ Writes out Series-formatted data. This method (Series.saveAsBinarySeries) writes out binary series files using the current partitioning of this Series object. (That is, if mySeries.rdd.getNumPartitions() == 5, then 5 files will be written out, one per partition.) The records will not be resorted; the file names for each partition will be taken from the key of the first Series record in that partition. If the Series object is already sorted and no records have been removed by filtering, then the resulting output should be equivalent to what one would get from calling myImages.saveAsBinarySeries(). If all one wishes to do is to save out Images data in a binary series format, then tsc.convertImagesToSeries() will likely be more efficient than tsc.loadImages().toSeries().saveAsBinarySeries(). Parameters ---------- outputDirPath : string path or URI to directory to be created Output files will be written underneath outputdirname. This directory must not yet exist (unless overwrite is True), and must be no more than one level beneath an existing directory. It will be created as a result of this call. overwrite : bool If true, outputdirname and all its contents will be deleted and recreated as part of this call. """ import cStringIO as StringIO import struct from thunder.rdds.imgblocks.blocks import SimpleBlocks from thunder.rdds.fileio.writers import getParallelWriterForPath from thunder.rdds.fileio.seriesloader import writeSeriesConfig from thunder.utils.common import AWSCredentials if not overwrite: self._checkOverwrite(outputDirPath) overwrite = True # prevent additional downstream checks for this path def partitionToBinarySeries(kvIter): """ Collects all Series records in a partition into a single binary series record. """ keypacker = None firstKey = None buf = StringIO.StringIO() for seriesKey, series in kvIter: if keypacker is None: keypacker = struct.Struct('h' * len(seriesKey)) firstKey = seriesKey # print >> sys.stderr, seriesKey, series, series.tostring().encode('hex') buf.write(keypacker.pack(*seriesKey)) buf.write(series.tostring()) val = buf.getvalue() buf.close() # we might have an empty partition, in which case firstKey will still be None if firstKey is None: return iter([]) else: label = SimpleBlocks.getBinarySeriesNameForKey( firstKey) + ".bin" return iter([(label, val)]) awsCredentials = AWSCredentials.fromContext(self.rdd.ctx) writer = getParallelWriterForPath(outputDirPath)( outputDirPath, overwrite=overwrite, awsCredentialsOverride=awsCredentials) binseriesrdd = self.rdd.mapPartitions(partitionToBinarySeries) binseriesrdd.foreach(writer.writerFcn) # TODO: all we really need here are the number of keys and number of values, which could in principle # be cached in _nkeys and _nvals attributes, removing the need for this .first() call in most cases. firstKey, firstVal = self.first() writeSeriesConfig(outputDirPath, len(firstKey), len(firstVal), keyType='int16', valueType=self.dtype, overwrite=overwrite, awsCredentialsOverride=awsCredentials)
def saveAsBinarySeries(self, outputDirPath, overwrite=False): """ Writes out Series-formatted data. This method (Series.saveAsBinarySeries) writes out binary series files using the current partitioning of this Series object. (That is, if mySeries.rdd.getNumPartitions() == 5, then 5 files will be written out, one per partition.) The records will not be resorted; the file names for each partition will be taken from the key of the first Series record in that partition. If the Series object is already sorted and no records have been removed by filtering, then the resulting output should be equivalent to what one would get from calling myImages.saveAsBinarySeries(). If all one wishes to do is to save out Images data in a binary series format, then tsc.convertImagesToSeries() will likely be more efficient than tsc.loadImages().toSeries().saveAsBinarySeries(). Parameters ---------- outputDirPath : string path or URI to directory to be created Output files will be written underneath outputdirname. This directory must not yet exist (unless overwrite is True), and must be no more than one level beneath an existing directory. It will be created as a result of this call. overwrite : bool If true, outputdirname and all its contents will be deleted and recreated as part of this call. """ import cStringIO as StringIO import struct from thunder.rdds.imgblocks.blocks import SimpleBlocks from thunder.rdds.fileio.writers import getParallelWriterForPath from thunder.rdds.fileio.seriesloader import writeSeriesConfig from thunder.utils.common import AWSCredentials if not overwrite: self._checkOverwrite(outputDirPath) overwrite = True # prevent additional downstream checks for this path def partitionToBinarySeries(kvIter): """ Collects all Series records in a partition into a single binary series record. """ keypacker = None firstKey = None buf = StringIO.StringIO() for seriesKey, series in kvIter: if keypacker is None: keypacker = struct.Struct('h'*len(seriesKey)) firstKey = seriesKey # print >> sys.stderr, seriesKey, series, series.tostring().encode('hex') buf.write(keypacker.pack(*seriesKey)) buf.write(series.tostring()) val = buf.getvalue() buf.close() # we might have an empty partition, in which case firstKey will still be None if firstKey is None: return iter([]) else: label = SimpleBlocks.getBinarySeriesNameForKey(firstKey) + ".bin" return iter([(label, val)]) awsCredentials = AWSCredentials.fromContext(self.rdd.ctx) writer = getParallelWriterForPath(outputDirPath)(outputDirPath, overwrite=overwrite, awsCredentialsOverride=awsCredentials) binseriesrdd = self.rdd.mapPartitions(partitionToBinarySeries) binseriesrdd.foreach(writer.writerFcn) # TODO: all we really need here are the number of keys and number of values, which could in principle # be cached in _nkeys and _nvals attributes, removing the need for this .first() call in most cases. firstKey, firstVal = self.first() writeSeriesConfig(outputDirPath, len(firstKey), len(firstVal), keyType='int16', valueType=self.dtype, overwrite=overwrite, awsCredentialsOverride=awsCredentials)