def test_fromMultipleArrays(self): ary = arange(8, dtype=dtype('int16')).reshape((2, 4)) ary2 = arange(8, 16, dtype=dtype('int16')).reshape((2, 4)) series = SeriesLoader(self.sc).fromArrays([ary, ary2]) seriesvals = series.collect() seriesary = series.pack() # check ordering of keys assert_equals((0, 0), seriesvals[0][0]) # first key assert_equals((1, 0), seriesvals[1][0]) # second key assert_equals((3, 0), seriesvals[3][0]) assert_equals((0, 1), seriesvals[4][0]) assert_equals((3, 1), seriesvals[7][0]) # check dimensions tuple is reversed from numpy shape assert_equals(ary.shape[::-1], series.dims.count) # check that values are in original order, with subsequent point concatenated in values collectedvals = array([kv[1] for kv in seriesvals], dtype=dtype('int16')) assert_true(array_equal(ary.ravel(), collectedvals[:, 0])) assert_true(array_equal(ary2.ravel(), collectedvals[:, 1])) # check that packing returns concatenation of input arrays, with time as first dimension assert_true(array_equal(ary.T, seriesary[0])) assert_true(array_equal(ary2.T, seriesary[1]))
def test_fromArrays(self): ary = arange(8, dtype=dtype('int16')).reshape((2, 4)) series = SeriesLoader(self.sc).fromArrays(ary) seriesvals = series.collect() seriesary = series.pack() # check ordering of keys assert_equals((0, 0), seriesvals[0][0]) # first key assert_equals((1, 0), seriesvals[1][0]) # second key assert_equals((2, 0), seriesvals[2][0]) assert_equals((3, 0), seriesvals[3][0]) assert_equals((0, 1), seriesvals[4][0]) assert_equals((1, 1), seriesvals[5][0]) assert_equals((2, 1), seriesvals[6][0]) assert_equals((3, 1), seriesvals[7][0]) # check dimensions tuple is reversed from numpy shape assert_equals(ary.shape[::-1], series.dims.count) # check that values are in original order collectedvals = array([kv[1] for kv in seriesvals], dtype=dtype('int16')).ravel() assert_true(array_equal(ary.ravel(), collectedvals)) # check that packing returns transpose of original array assert_true(array_equal(ary.T, seriesary))
def loadSeriesFromArray(self, values, index=None, npartitions=None): """ Load Series data from a local array Parameters ---------- values : list or ndarray A list of 1d numpy arrays, or a single 2d numpy array index : array-like, optional, deafult = None Index to set for Series object, if None will use linear indices. npartitions : position int, optional, default = None Number of partitions for RDD, if unspecified will use default parallelism. """ from numpy import ndarray, asarray from thunder.rdds.fileio.seriesloader import SeriesLoader loader = SeriesLoader(self._sc) if not npartitions: npartitions = self._sc.defaultParallelism if isinstance(values, list): values = asarray(values) if isinstance(values, ndarray) and values.ndim > 1: values = list(values) data = loader.fromArrays(values, npartitions=npartitions) if index: data.index = index return data
def test_fromArrays(self): ary = arange(8, dtype=dtypeFunc('int16')).reshape((2, 4)) series = SeriesLoader(self.sc).fromArraysAsImages(ary) seriesVals = series.collect() seriesAry = series.pack() # check ordering of keys assert_equals((0, 0), seriesVals[0][0]) # first key assert_equals((1, 0), seriesVals[1][0]) # second key assert_equals((2, 0), seriesVals[2][0]) assert_equals((3, 0), seriesVals[3][0]) assert_equals((0, 1), seriesVals[4][0]) assert_equals((1, 1), seriesVals[5][0]) assert_equals((2, 1), seriesVals[6][0]) assert_equals((3, 1), seriesVals[7][0]) # check dimensions tuple is reversed from numpy shape assert_equals(ary.shape[::-1], series.dims.count) # check that values are in original order collectedVals = array([kv[1] for kv in seriesVals], dtype=dtypeFunc('int16')).ravel() assert_true(array_equal(ary.ravel(), collectedVals)) # check that packing returns transpose of original array assert_true(array_equal(ary.T, seriesAry))
def _run_roundtrip_tst(self, testCount, arrays, blockSize): print "Running TestSeriesBinaryWriteFromStack roundtrip test #%d" % testCount insubdir = os.path.join(self.outputdir, 'input%d' % testCount) os.mkdir(insubdir) outsubdir = os.path.join(self.outputdir, 'output%d' % testCount) #os.mkdir(outsubdir) for aryCount, array in enumerate(arrays): # array.tofile always writes in column-major order... array.tofile(os.path.join(insubdir, "img%02d.stack" % aryCount)) # ... but we will read and interpret these as though they are in row-major order dims = list(arrays[0].shape) dims.reverse() underTest = SeriesLoader(self.sc) underTest.saveFromStack(insubdir, outsubdir, dims, blockSize=blockSize, datatype=str(arrays[0].dtype)) roundtripped = underTest.fromBinary(outsubdir).collect() for serieskeys, seriesvalues in roundtripped: for seriesidx, seriesval in enumerate(seriesvalues): #print "seriesidx: %d; serieskeys: %s; seriesval: %g" % (seriesidx, serieskeys, seriesval) # flip indices again for row vs col-major insanity arykeys = list(serieskeys) arykeys.reverse() msg = "Failure on test #%d, time point %d, indices %s" % (testCount, seriesidx, str(tuple(arykeys))) try: assert_almost_equal(arrays[seriesidx][tuple(arykeys)], seriesval, places=4) except AssertionError, e: raise AssertionError(msg, e)
def test_fromMultipleArrays(self): ary = arange(8, dtype=dtypeFunc('int16')).reshape((2, 4)) ary2 = arange(8, 16, dtype=dtypeFunc('int16')).reshape((2, 4)) series = SeriesLoader(self.sc).fromArraysAsImages([ary, ary2]) seriesVals = series.collect() seriesAry = series.pack() # check ordering of keys assert_equals((0, 0), seriesVals[0][0]) # first key assert_equals((1, 0), seriesVals[1][0]) # second key assert_equals((3, 0), seriesVals[3][0]) assert_equals((0, 1), seriesVals[4][0]) assert_equals((3, 1), seriesVals[7][0]) # check dimensions tuple is reversed from numpy shape assert_equals(ary.shape[::-1], series.dims.count) # check that values are in original order, with subsequent point concatenated in values collectedVals = array([kv[1] for kv in seriesVals], dtype=dtypeFunc('int16')) assert_true(array_equal(ary.ravel(), collectedVals[:, 0])) assert_true(array_equal(ary2.ravel(), collectedVals[:, 1])) # check that packing returns concatenation of input arrays, with time as first dimension assert_true(array_equal(ary.T, seriesAry[0])) assert_true(array_equal(ary2.T, seriesAry[1]))
def loadSeries(self, datapath, nkeys=None, nvalues=None, inputformat='binary', minPartitions=None, conffile='conf.json', keytype=None, valuetype=None): """ Loads a Series object from data stored as text or binary files. Supports single files or multiple files stored on a local file system, a networked file system (mounted and available on all cluster nodes), Amazon S3, or HDFS. Parameters ---------- datapath: string Path to data files or directory, specified as either a local filesystem path or in a URI-like format, including scheme. A datapath argument may include a single '*' wildcard character in the filename. Examples of valid datapaths include 'a/local/relative/directory/*.stack", "s3n:///my-s3-bucket/data/mydatafile.tif", "/mnt/my/absolute/data/directory/", or "file:///mnt/another/data/directory/". nkeys: int, optional (but required if `inputformat` is 'text') dimensionality of data keys. (For instance, (x,y,z) keyed data for 3-dimensional image timeseries data.) For text data, number of keys must be specified in this parameter; for binary data, number of keys must be specified either in this parameter or in a configuration file named by the 'conffile' argument if this parameter is not set. nvalues: int, optional (but required if `inputformat` is 'text') Number of values expected to be read. For binary data, nvalues must be specified either in this parameter or in a configuration file named by the 'conffile' argument if this parameter is not set. inputformat: {'text', 'binary'}. optional, default 'binary' Format of data to be read. minPartitions: int, optional Explicitly specify minimum number of Spark partitions to be generated from this data. Used only for text data. Default is to use minParallelism attribute of Spark context object. conffile: string, optional, default 'conf.json' Path to JSON file with configuration options including 'nkeys', 'nvalues', 'keytype', and 'valuetype'. If a file is not found at the given path, then the base directory given in 'datafile' will also be checked. Parameters `nkeys` or `nvalues` that are specified as explicit arguments to this method will take priority over those found in conffile if both are present. Returns ------- data: thunder.rdds.Series A newly-created Series object, wrapping an RDD of series data. This RDD will have as keys an n-tuple of int, with n given by `nkeys` or the configuration passed in `conffile`. RDD values will be a numpy array of length `nvalues` (or as specified in the passed configuration file). """ checkparams(inputformat, ['text', 'binary']) from thunder.rdds.fileio.seriesloader import SeriesLoader loader = SeriesLoader(self._sc, minPartitions=minPartitions) if inputformat.lower() == 'text': data = loader.fromText(datapath, nkeys=nkeys) else: # must be either 'text' or 'binary' data = loader.fromBinary(datapath, conffilename=conffile, nkeys=nkeys, nvalues=nvalues, keytype=keytype, valuetype=valuetype) return data
def _run_roundtrip_exception_tst(self, nimages, aryShape, dtypeSpec, sizeSpec): testArrays = TestSeriesBinaryWriteFromStack.generateTestImages( nimages, aryShape, dtypeSpec) loader = SeriesLoader(self.sc) series = loader.fromArrays(testArrays) assert_raises(ValueError, series.toBlocks, sizeSpec)
def _run_fromFishTif(self, blocksize="150M"): imagepath = TestSeriesLoader._findSourceTreeDir("utils/data/fish/tif-stack") series = SeriesLoader(self.sc).fromMultipageTif(imagepath, blockSize=blocksize) series_ary = series.pack() series_ary_xpose = series.pack(transpose=True) assert_equals((76, 87, 2), series.dims.count) assert_equals((20, 76, 87, 2), series_ary.shape) assert_equals((20, 2, 87, 76), series_ary_xpose.shape)
def _run_tst_fromBinary(self, useConfJson=False): # run this as a single big test so as to avoid repeated setUp and tearDown of the spark context # data will be a sequence of test data # all keys and all values in a test data item must be of the same length # keys get converted to ints regardless of raw input format DATA = [ SeriesBinaryTestData.fromArrays([[1, 2, 3]], [[11, 12, 13]], 'int16', 'int16'), SeriesBinaryTestData.fromArrays([[1, 2, 3], [5, 6, 7]], [[11], [12]], 'int16', 'int16'), SeriesBinaryTestData.fromArrays([[1, 2, 3]], [[11, 12, 13]], 'int16', 'int32'), SeriesBinaryTestData.fromArrays([[1, 2, 3]], [[11, 12, 13]], 'int32', 'int16'), SeriesBinaryTestData.fromArrays([[1, 2, 3]], [[11.0, 12.0, 13.0]], 'int16', 'float32'), SeriesBinaryTestData.fromArrays([[1, 2, 3]], [[11.0, 12.0, 13.0]], 'float32', 'float32'), SeriesBinaryTestData.fromArrays([[2, 3, 4]], [[11.0, 12.0, 13.0]], 'float32', 'float32'), ] for itemidx, item in enumerate(DATA): outSubdir = os.path.join(self.outputdir, 'input%d' % itemidx) os.mkdir(outSubdir) fname = os.path.join(outSubdir, 'inputfile%d.bin' % itemidx) with open(fname, 'wb') as f: item.writeToFile(f) loader = SeriesLoader(self.sc) if not useConfJson: series = loader.fromBinary(outSubdir, nkeys=item.nkeys, nvalues=item.nvals, keyType=str(item.keyDtype), valueType=str(item.valDtype)) else: # write configuration file conf = {'input': outSubdir, 'nkeys': item.nkeys, 'nvalues': item.nvals, 'valuetype': str(item.valDtype), 'keytype': str(item.keyDtype)} with open(os.path.join(outSubdir, "conf.json"), 'wb') as f: json.dump(conf, f, indent=2) series = loader.fromBinary(outSubdir) seriesData = series.rdd.collect() expectedData = item.data assert_equals(len(expectedData), len(seriesData), "Differing numbers of k/v pairs in item %d; expected %d, got %d" % (itemidx, len(expectedData), len(seriesData))) for expected, actual in zip(expectedData, seriesData): expectedKeys = tuple(expected[0]) expectedType = smallestFloatType(item.valDtype) expectedVals = array(expected[1], dtype=expectedType) assert_equals(expectedKeys, actual[0], "Key mismatch in item %d; expected %s, got %s" % (itemidx, str(expectedKeys), str(actual[0]))) assert_true(allclose(expectedVals, actual[1]), "Value mismatch in item %d; expected %s, got %s" % (itemidx, str(expectedVals), str(actual[1]))) assert_equals(expectedType, str(actual[1].dtype), "Value type mismatch in item %d; expected %s, got %s" % (itemidx, expectedType, str(actual[1].dtype)))
def _run_fromFishTif(self, blocksize): imagepath = TestSeriesLoader._findSourceTreeDir("utils/data/fish/images") series = SeriesLoader(self.sc).fromTif(imagepath, blockSize=blocksize) assert_equals('float16', series._dtype) seriesAry = series.pack() seriesAry_xpose = series.pack(transpose=True) assert_equals('float16', str(seriesAry.dtype)) assert_equals((76, 87, 2), series.dims.count) assert_equals((20, 76, 87, 2), seriesAry.shape) assert_equals((20, 2, 87, 76), seriesAry_xpose.shape)
def _run_fromFishTif(self, blocksize): imagepath = TestSeriesLoader._findSourceTreeDir( "utils/data/fish/tif-stack") series = SeriesLoader(self.sc).fromTif(imagepath, blockSize=blocksize) assert_equals('float16', series._dtype) seriesAry = series.pack() seriesAry_xpose = series.pack(transpose=True) assert_equals('float16', str(seriesAry.dtype)) assert_equals((76, 87, 2), series.dims.count) assert_equals((20, 76, 87, 2), seriesAry.shape) assert_equals((20, 2, 87, 76), seriesAry_xpose.shape)
def test_loadStacksAsSeries(self): rangeary = arange(64*128, dtype=dtype('int16')) rangeary.shape = (64, 128) filepath = os.path.join(self.outputdir, "rangeary.stack") rangeary.tofile(filepath) series = SeriesLoader(self.sc).fromStack(filepath, dims=(128, 64)) series_ary = series.pack() assert_equals((128, 64), series.dims.count) assert_equals((128, 64), series_ary.shape) assert_true(array_equal(rangeary.T, series_ary))
def test_loadStacksAsSeries(self): rangeary = arange(64 * 128, dtype=dtype('int16')) rangeary.shape = (64, 128) filepath = os.path.join(self.outputdir, "rangeary.stack") rangeary.tofile(filepath) series = SeriesLoader(self.sc).fromStack(filepath, dims=(128, 64)) series_ary = series.pack() assert_equals((128, 64), series.dims.count) assert_equals((128, 64), series_ary.shape) assert_true(array_equal(rangeary.T, series_ary))
def test_castToFloat(self): from numpy import arange shape = (3, 2, 2) size = 3*2*2 ary = arange(size, dtype=dtypeFunc('uint8')).reshape(shape) ary2 = ary + size from thunder.rdds.fileio.seriesloader import SeriesLoader series = SeriesLoader(self.sc).fromArrays([ary, ary2]) castSeries = series.astype("smallfloat") assert_equals('float16', str(castSeries.dtype)) assert_equals('float16', str(castSeries.first()[1].dtype))
def test_maxProject(self): from thunder.rdds.fileio.seriesloader import SeriesLoader ary = arange(8, dtype=dtypeFunc('int16')).reshape((2, 4)) series = SeriesLoader(self.sc).fromArraysAsImages(ary) project0Series = series.maxProject(axis=0) project0 = project0Series.pack() project1Series = series.maxProject(axis=1) project1 = project1Series.pack(sorting=True) assert_true(array_equal(amax(ary.T, 0), project0)) assert_true(array_equal(amax(ary.T, 1), project1))
def test_castToFloat(self): from numpy import arange shape = (3, 2, 2) size = 3 * 2 * 2 ary = arange(size, dtype=dtypeFunc('uint8')).reshape(shape) ary2 = ary + size from thunder.rdds.fileio.seriesloader import SeriesLoader series = SeriesLoader(self.sc).fromArraysAsImages([ary, ary2]) castSeries = series.astype("smallfloat") assert_equals('float16', str(castSeries.dtype)) assert_equals('float16', str(castSeries.first()[1].dtype))
def _run_roundtrip_tst(self, nimages, aryShape, dtypeSpec, sizeSpec): testArrays = TestSeriesBinaryWriteFromStack.generateTestImages(nimages, aryShape, dtypeSpec) loader = SeriesLoader(self.sc) series = loader.fromArrays(testArrays) blocks = series.toBlocks(sizeSpec) roundtrippedSeries = blocks.toSeries(newDType=series.dtype) packedSeries = series.pack() packedRoundtrippedSeries = roundtrippedSeries.pack() assert_true(array_equal(packedSeries, packedRoundtrippedSeries))
def test_maxProject(self): from thunder.rdds.fileio.seriesloader import SeriesLoader ary = arange(8, dtype=dtypeFunc('int16')).reshape((2, 4)) series = SeriesLoader(self.sc).fromArrays(ary) project0Series = series.maxProject(axis=0) project0 = project0Series.pack() project1Series = series.maxProject(axis=1) project1 = project1Series.pack(sorting=True) assert_true(array_equal(amax(ary.T, 0), project0)) assert_true(array_equal(amax(ary.T, 1), project1))
def _run_roundtrip_tst(self, testCount, arrays, blockSize): print "Running TestSeriesBinaryWriteFromStack roundtrip test #%d" % testCount insubdir = os.path.join(self.outputdir, 'input%d' % testCount) os.mkdir(insubdir) outsubdir = os.path.join(self.outputdir, 'output%d' % testCount) #os.mkdir(outsubdir) for aryCount, array in enumerate(arrays): # array.tofile always writes in column-major order... array.tofile(os.path.join(insubdir, "img%02d.stack" % aryCount)) # ... but we will read and interpret these as though they are in row-major order dims = list(arrays[0].shape) dims.reverse() underTest = SeriesLoader(self.sc) underTest.saveFromStack(insubdir, outsubdir, dims, blockSize=blockSize, datatype=str(arrays[0].dtype)) series = underTest.fromStack(insubdir, dims, datatype=str(arrays[0].dtype)) roundtripped_series = underTest.fromBinary(outsubdir) roundtripped = roundtripped_series.collect() direct = series.collect() expecteddtype = str(smallest_float_type(arrays[0].dtype)) assert_equals(expecteddtype, roundtripped_series.dtype) assert_equals(expecteddtype, series.dtype) assert_equals(expecteddtype, str(roundtripped[0][1].dtype)) assert_equals(expecteddtype, str(direct[0][1].dtype)) with open(os.path.join(outsubdir, "conf.json"), 'r') as fp: # check that binary series file data type *matches* input stack data type (not yet converted to float) # at least according to conf.json conf = json.load(fp) assert_equals(str(arrays[0].dtype), conf["valuetype"]) for ((serieskeys, seriesvalues), (directkeys, directvalues)) in zip(roundtripped, direct): assert_equals(directkeys, serieskeys) assert_equals(directvalues, seriesvalues) for seriesidx, seriesval in enumerate(seriesvalues): #print "seriesidx: %d; serieskeys: %s; seriesval: %g" % (seriesidx, serieskeys, seriesval) # flip indices again for row vs col-major insanity arykeys = list(serieskeys) arykeys.reverse() msg = "Failure on test #%d, time point %d, indices %s" % (testCount, seriesidx, str(tuple(arykeys))) try: assert_almost_equal(arrays[seriesidx][tuple(arykeys)], seriesval, places=4) except AssertionError, e: raise AssertionError(msg, e)
def _run_roundtrip_tst(self, nimages, aryShape, dtypeSpec, sizeSpec): testArrays = TestSeriesBinaryWriteFromStack.generateTestImages( nimages, aryShape, dtypeSpec) loader = SeriesLoader(self.sc) series = loader.fromArrays(testArrays) blocks = series.toBlocks(sizeSpec) roundtrippedSeries = blocks.toSeries(newDType=series.dtype) packedSeries = series.pack() packedRoundtrippedSeries = roundtrippedSeries.pack() assert_true(array_equal(packedSeries, packedRoundtrippedSeries))
def _run_tst_roundtripConvertToSeries(self, images, strategy): outdir = os.path.join(self.outputdir, "fish-series-dir") partitionedimages = images.toBlocks(strategy) series = partitionedimages.toSeries() series_ary = series.pack() partitionedimages.saveAsBinarySeries(outdir) converted_series = SeriesLoader(self.sc).fromBinary(outdir) converted_series_ary = converted_series.pack() assert_equals(images.dims.count, series.dims.count) expected_shape = tuple([images.nimages] + list(images.dims.count)) assert_equals(expected_shape, series_ary.shape) assert_true(array_equal(series_ary, converted_series_ary))
def _run_tst_roundtripConvertToSeries(self, images, strategy): outdir = os.path.join(self.outputdir, "fish-series-dir") partitionedimages = images.toBlocks(strategy) series = partitionedimages.toSeries() series_ary = series.pack() partitionedimages.saveAsBinarySeries(outdir) converted_series = SeriesLoader(self.sc).fromBinary(outdir) converted_series_ary = converted_series.pack() assert_equals(images.dims.count, series.dims.count) expected_shape = tuple([images.nrecords] + list(images.dims.count)) assert_equals(expected_shape, series_ary.shape) assert_true(array_equal(series_ary, converted_series_ary))
def test_roundtripConvertToSeries(self): imagepath = TestImagesUsingOutputDir._findSourceTreeDir("utils/data/fish/tif-stack") outdir = os.path.join(self.outputdir, "fish-series-dir") images = ImagesLoader(self.sc).fromMultipageTif(imagepath) series = images.toSeries(blockSize=76*20) series_ary = series.pack() images.saveAsBinarySeries(outdir, blockSize=76*20) converted_series = SeriesLoader(self.sc).fromBinary(outdir) converted_series_ary = converted_series.pack() assert_equals((76, 87, 2), series.dims.count) assert_equals((20, 76, 87, 2), series_ary.shape) assert_true(array_equal(series_ary, converted_series_ary))
def test_roundtripConvertToSeries(self): imagepath = TestImagesUsingOutputDir._findSourceTreeDir( "utils/data/fish/tif-stack") outdir = os.path.join(self.outputdir, "fish-series-dir") images = ImagesLoader(self.sc).fromMultipageTif(imagepath) series = images.toSeries(blockSize=76 * 20) series_ary = series.pack() images.saveAsBinarySeries(outdir, blockSize=76 * 20) converted_series = SeriesLoader(self.sc).fromBinary(outdir) converted_series_ary = converted_series.pack() assert_equals((76, 87, 2), series.dims.count) assert_equals((20, 76, 87, 2), series_ary.shape) assert_true(array_equal(series_ary, converted_series_ary))
def loadSeriesLocal(self, dataFilePath, inputFormat='npy', minPartitions=None, keyFilePath=None, varName=None): """ Load a Series object from a local file (either npy or MAT format). File should contain a 1d or 2d matrix, where each row of the input matrix is a record. Keys can be provided in a separate file (with variable name 'keys', for MAT files). If not provided, linear indices will be used for keys. Parameters ---------- dataFilePath: str File to import varName : str, optional, default = None Variable name to load (for MAT files only) keyFilePath : str, optional, default = None File containing the keys for each record as another 1d or 2d array minPartitions : Int, optional, default = 1 Number of partitions for RDD """ checkParams(inputFormat, ['mat', 'npy']) from thunder.rdds.fileio.seriesloader import SeriesLoader loader = SeriesLoader(self._sc, minPartitions=minPartitions) if inputFormat.lower() == 'mat': if varName is None: raise Exception( 'Must provide variable name for loading MAT files') data = loader.fromMatLocal(dataFilePath, varName, keyFilePath) else: data = loader.fromNpyLocal(dataFilePath, keyFilePath) return data
def _run_roundtrip_tst(self, testIdx, nimages, aryShape, dtypeSpec, npartitions): testArrays = TestSeriesBinaryWriteFromStack.generateTestImages( nimages, aryShape, dtypeSpec) loader = SeriesLoader(self.sc) series = loader.fromArrays(testArrays) saveDirPath = os.path.join(self.outputdir, 'save%d' % testIdx) series.repartition( npartitions ) # note: this does an elementwise shuffle! won't be in sorted order series.saveAsBinarySeries(saveDirPath) nnonemptyPartitions = 0 for partitionList in series.rdd.glom().collect(): if partitionList: nnonemptyPartitions += 1 del partitionList nsaveFiles = len(glob.glob(saveDirPath + os.sep + "*.bin")) roundtrippedSeries = loader.fromBinary(saveDirPath) with open(os.path.join(saveDirPath, "conf.json"), 'r') as fp: conf = json.load(fp) # sorting is required here b/c of the randomization induced by the repartition. # orig and roundtripped will in general be different from each other, since roundtripped # will have (0, 0, 0) index as first element (since it will be the lexicographically first # file) while orig has only a 1 in npartitions chance of starting with (0, 0, 0) after repartition. expectedPackedAry = series.pack(sorting=True) actualPackedAry = roundtrippedSeries.pack(sorting=True) assert_true(array_equal(expectedPackedAry, actualPackedAry)) assert_equals(nnonemptyPartitions, nsaveFiles) assert_equals(len(aryShape), conf["nkeys"]) assert_equals(nimages, conf["nvalues"]) assert_equals("int16", conf["keytype"]) assert_equals(str(series.dtype), conf["valuetype"]) # check that we have converted ourselves to an appropriate float after reloading assert_equals(str(smallestFloatType(series.dtype)), str(roundtrippedSeries.dtype))
def test_fromMultipageTif(self): testresourcesdir = TestSeriesLoader._findTestResourcesDir() imagepath = os.path.join(testresourcesdir, "multilayer_tif", "dotdotdot_lzw.tif") testimg_pil = Image.open(imagepath) testimg_arys = list() testimg_arys.append(pil_to_array(testimg_pil)) testimg_pil.seek(1) testimg_arys.append(pil_to_array(testimg_pil)) testimg_pil.seek(2) testimg_arys.append(pil_to_array(testimg_pil)) series = SeriesLoader(self.sc).fromMultipageTif(imagepath) series_ary = series.pack() assert_equals((70, 75, 3), series.dims.count) assert_equals((70, 75, 3), series_ary.shape) assert_true(array_equal(testimg_arys[0], series_ary[:, :, 0])) assert_true(array_equal(testimg_arys[1], series_ary[:, :, 1])) assert_true(array_equal(testimg_arys[2], series_ary[:, :, 2]))
def test_fromMultipageTif(self): testresourcesdir = TestSeriesLoader._findTestResourcesDir() imagepath = os.path.join(testresourcesdir, "multilayer_tif", "dotdotdot_lzw.tif") testimg_pil = Image.open(imagepath) testimg_arys = list() testimg_arys.append(pil_to_array(testimg_pil)) testimg_pil.seek(1) testimg_arys.append(pil_to_array(testimg_pil)) testimg_pil.seek(2) testimg_arys.append(pil_to_array(testimg_pil)) series = SeriesLoader(self.sc).fromMultipageTif(imagepath) assert_equals('float16', series._dtype) series_ary = series.pack() assert_equals((70, 75, 3), series.dims.count) assert_equals((70, 75, 3), series_ary.shape) assert_true(array_equal(testimg_arys[0], series_ary[:, :, 0])) assert_true(array_equal(testimg_arys[1], series_ary[:, :, 1])) assert_true(array_equal(testimg_arys[2], series_ary[:, :, 2]))
def _run_roundtrip_tst(self, testIdx, nimages, aryShape, dtypeSpec, npartitions): testArrays = TestSeriesBinaryWriteFromStack.generateTestImages(nimages, aryShape, dtypeSpec) loader = SeriesLoader(self.sc) series = loader.fromArrays(testArrays) saveDirPath = os.path.join(self.outputdir, 'save%d' % testIdx) series.repartition(npartitions) # note: this does an elementwise shuffle! won't be in sorted order series.saveAsBinarySeries(saveDirPath) nnonemptyPartitions = 0 for partitionList in series.rdd.glom().collect(): if partitionList: nnonemptyPartitions += 1 del partitionList nsaveFiles = len(glob.glob(saveDirPath + os.sep + "*.bin")) roundtrippedSeries = loader.fromBinary(saveDirPath) with open(os.path.join(saveDirPath, "conf.json"), 'r') as fp: conf = json.load(fp) # sorting is required here b/c of the randomization induced by the repartition. # orig and roundtripped will in general be different from each other, since roundtripped # will have (0, 0, 0) index as first element (since it will be the lexicographically first # file) while orig has only a 1 in npartitions chance of starting with (0, 0, 0) after repartition. expectedPackedAry = series.pack(sorting=True) actualPackedAry = roundtrippedSeries.pack(sorting=True) assert_true(array_equal(expectedPackedAry, actualPackedAry)) assert_equals(nnonemptyPartitions, nsaveFiles) assert_equals(len(aryShape), conf["nkeys"]) assert_equals(nimages, conf["nvalues"]) assert_equals("int16", conf["keytype"]) assert_equals(str(series.dtype), conf["valuetype"]) # check that we have converted ourselves to an appropriate float after reloading assert_equals(str(smallestFloatType(series.dtype)), str(roundtrippedSeries.dtype))
def loadSeriesLocal(self, datafile, inputformat='npy', minPartitions=None, keyfile=None, varname=None): """ Load a Series object from a local file (either npy or MAT format). File should contain a 1d or 2d matrix, where each row of the input matrix is a record. Keys can be provided in a separate file (with variable name 'keys', for MAT files). If not provided, linear indices will be used for keys. Parameters ---------- datafile : str File to import varname : str, optional, default = None Variable name to load (for MAT files only) keyfile : str, optional, default = None File containing the keys for each record as another 1d or 2d array minPartitions : Int, optional, default = 1 Number of partitions for RDD """ checkparams(inputformat, ['mat', 'npy']) from thunder.rdds.fileio.seriesloader import SeriesLoader loader = SeriesLoader(self._sc, minPartitions=minPartitions) if inputformat.lower() == 'mat': if varname is None: raise Exception('Must provide variable name for loading MAT files') data = loader.fromMatLocal(datafile, varname, keyfile) else: data = loader.fromNpyLocal(datafile, keyfile) return data
def loadImagesAsSeries(self, dataPath, dims=None, inputFormat='stack', ext=None, dtype='int16', blockSize="150M", blockSizeUnits="pixels", startIdx=None, stopIdx=None, shuffle=True, recursive=False): """ Load Images data as Series data. Parameters ---------- dataPath: string Path to data files or directory, specified as either a local filesystem path or in a URI-like format, including scheme. A dataPath argument may include a single '*' wildcard character in the filename. Examples of valid dataPaths include 'a/local/relative/directory/*.stack", "s3n:///my-s3-bucket/data/mydatafile.tif", "/mnt/my/absolute/data/directory/", or "file:///mnt/another/data/directory/". dims: tuple of positive int, optional (but required if inputFormat is 'stack') Dimensions of input image data, for instance (1024, 1024, 48). Binary stack data will be interpreted as coming from a multidimensional array of the specified dimensions. The first dimension of the passed dims tuple should be the one that is changing most rapidly on disk. So for instance given dims of (x, y, z), the coordinates of the data in a binary stack file should be ordered as [(x0, y0, z0), (x1, y0, z0), ..., (xN, y0, z0), (x0, y1, z0), (x1, y1, z0), ..., (xN, yM, z0), (x0, y0, z1), ..., (xN, yM, zP)]. This is the opposite convention from that used by numpy, which by default has the fastest-changing dimension listed last (column-major convention). Thus, if loading a numpy array `ary`, where `ary.shape == (z, y, x)`, written to disk by `ary.tofile("myarray.stack")`, the corresponding dims parameter should be (x, y, z). If inputFormat is 'tif', the dims parameter (if any) will be ignored; data dimensions will instead be read out from the tif file headers. inputFormat: {'stack', 'tif'}. optional, default 'stack' Expected format of the input data. 'stack' indicates flat files of raw binary data, while 'tif' indicates greyscale / luminance TIF images. Each page of a multipage tif file will be interpreted as a separate z-plane. For both stacks and tif stacks, separate files are interpreted as distinct time points, with ordering given by lexicographic sorting of file names. ext: string, optional, default None Extension required on data files to be loaded. By default will be "stack" if inputFormat=="stack", "tif" for inputFormat=='tif'. dtype: string or numpy dtype. optional, default 'int16' Data type of the image files to be loaded, specified as a numpy "dtype" string. If inputFormat is 'tif', the dtype parameter (if any) will be ignored; data type will instead be read out from the tif headers. blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M" Requested size of individual output files in bytes (or kilobytes, megabytes, gigabytes). If shuffle=True, blockSize can also be a tuple of int specifying either the number of pixels or of splits per dimension to apply to the loaded images, or an instance of BlockingStrategy. Whether a tuple of int is interpreted as pixels or as splits depends on the value of the blockSizeUnits parameter. blockSize also indirectly controls the number of Spark partitions to be used, with one partition used per block created. blockSizeUnits: string, either "pixels" or "splits" (or unique prefix of each, such as "s"), default "pixels" Specifies units to be used in interpreting a tuple passed as blockSizeSpec when shuffle=True. If a string or a BlockingStrategy instance is passed as blockSizeSpec, or if shuffle=False, this parameter has no effect. startIdx: nonnegative int, optional startIdx and stopIdx are convenience parameters to allow only a subset of input files to be read in. These parameters give the starting index (inclusive) and final index (exclusive) of the data files to be used after lexicographically sorting all input data files matching the dataPath argument. For example, startIdx=None (the default) and stopIdx=10 will cause only the first 10 data files in dataPath to be read in; startIdx=2 and stopIdx=3 will cause only the third file (zero-based index of 2) to be read in. startIdx and stopIdx use the python slice indexing convention (zero-based indexing with an exclusive final position). stopIdx: nonnegative int, optional See startIdx. shuffle: boolean, optional, default True Controls whether the conversion from Images to Series formats will make use of a Spark shuffle-based method. recursive: boolean, default False If true, will recursively descend directories rooted at dataPath, loading all files in the tree that have an appropriate extension. Recursive loading is currently only implemented for local filesystems (not s3), and only with shuffle=True. Returns ------- data: thunder.rdds.Series A newly-created Series object, wrapping an RDD of timeseries data generated from the images in dataPath. This RDD will have as keys an n-tuple of int, with n given by the dimensionality of the original images. The keys will be the zero-based spatial index of the timeseries data in the RDD value. The value will be a numpy array of length equal to the number of image files loaded. Each loaded image file will contribute one point to this value array, with ordering as implied by the lexicographic ordering of image file names. """ checkParams(inputFormat, ['stack', 'tif', 'tif-stack']) if inputFormat.lower() == 'stack' and not dims: raise ValueError( "Dimensions ('dims' parameter) must be specified if loading from binary image stack" + " ('stack' value for 'inputFormat' parameter)") if not ext: ext = DEFAULT_EXTENSIONS.get(inputFormat.lower(), None) if shuffle: from thunder.rdds.fileio.imagesloader import ImagesLoader loader = ImagesLoader(self._sc) if inputFormat.lower() == 'stack': images = loader.fromStack(dataPath, dims, dtype=dtype, ext=ext, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive) else: # tif / tif stack images = loader.fromTif(dataPath, ext=ext, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive) return images.toBlocks(blockSize, units=blockSizeUnits).toSeries() else: from thunder.rdds.fileio.seriesloader import SeriesLoader loader = SeriesLoader(self._sc) if inputFormat.lower() == 'stack': return loader.fromStack(dataPath, dims, ext=ext, dtype=dtype, blockSize=blockSize, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive) else: # tif / tif stack return loader.fromTif(dataPath, ext=ext, blockSize=blockSize, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive)
def convertImagesToSeries(self, datapath, outputdirpath, dims=None, inputformat='stack', dtype='int16', blocksize="150M", startidx=None, stopidx=None, shuffle=False, overwrite=False): """ Write out Images data as Series data, saved in a flat binary format. The resulting Series data files may subsequently be read in using the loadSeries() method. The Series data object that results will be equivalent to that which would be generated by loadImagesAsSeries(). It is expected that loading Series data directly from the series flat binary format, using loadSeries(), will be faster than converting image data to a Series object through loadImagesAsSeries(). Parameters ---------- datapath: string Path to data files or directory, specified as either a local filesystem path or in a URI-like format, including scheme. A datapath argument may include a single '*' wildcard character in the filename. Examples of valid datapaths include 'a/local/relative/directory/*.stack", "s3n:///my-s3-bucket/data/mydatafile.tif", "/mnt/my/absolute/data/directory/", or "file:///mnt/another/data/directory/". outputdirpath: string Path to a directory into which to write Series file output. An outputdir argument may be either a path on the local file system or a URI-like format, as in datapath. Examples of valid outputdirpaths include "a/relative/directory/", "s3n:///my-s3-bucket/data/myoutput/", or "file:///mnt/a/new/directory/". If the directory specified by outputdirpath already exists and the 'overwrite' parameter is False, this method will throw a ValueError. If the directory exists and 'overwrite' is True, the existing directory and all its contents will be deleted and overwritten. dims: tuple of positive int, optional (but required if inputformat is 'stack') Dimensions of input image data, for instance (1024, 1024, 48). Binary stack data will be interpreted as coming from a multidimensional array of the specified dimensions. The first dimension of the passed dims tuple should be the one that is changing most rapidly on disk. So for instance given dims of (x, y, z), the coordinates of the data in a binary stack file should be ordered as [(x0, y0, z0), (x1, y0, z0), ..., (xN, y0, z0), (x0, y1, z0), (x1, y1, z0), ..., (xN, yM, z0), (x0, y0, z1), ..., (xN, yM, zP)]. This is the opposite convention from that used by numpy, which by default has the fastest-changing dimension listed last (column-major convention). Thus, if loading a numpy array `ary`, where `ary.shape == (z, y, x)`, written to disk by `ary.tofile("myarray.stack")`, the corresponding dims parameter should be (x, y, z). If inputformat is 'tif-stack', the dims parameter (if any) will be ignored; data dimensions will instead be read out from the tif file headers. inputformat: {'stack', 'tif-stack'}. optional, default 'stack' Expected format of the input data. 'stack' indicates flat files of raw binary data, while 'tif-stack' indicates a sequence of multipage tif files, with each page of the tif corresponding to a separate z-plane. For both stacks and tif stacks, separate files are interpreted as distinct time points, with ordering given by lexicographic sorting of file names. This method assumes that stack data consists of signed 16-bit integers in native byte order. The lower-level API method SeriesLoader.saveFromStack() allows alternative data types to be read in. dtype: string or numpy dtype. optional, default 'int16' Data type of the image files to be loaded, specified as a numpy "dtype" string. If inputformat is 'tif-stack', the dtype parameter (if any) will be ignored; data type will instead be read out from the tif headers. blocksize: string formatted as e.g. "64M", "512k", "2G", or positive int. optional, default "150M" Requested size of individual output files in bytes (or kilobytes, megabytes, gigabytes). This parameter also indirectly controls the number of Spark partitions to be used, with one partition used per block created. startidx: nonnegative int, optional startidx and stopidx are convenience parameters to allow only a subset of input files to be read in. These parameters give the starting index (inclusive) and final index (exclusive) of the data files to be used after lexicographically sorting all input data files matching the datapath argument. For example, startidx=None (the default) and stopidx=10 will cause only the first 10 data files in datapath to be read in; startidx=2 and stopidx=3 will cause only the third file (zero-based index of 2) to be read in. startidx and stopidx use the python slice indexing convention (zero-based indexing with an exclusive final position). stopidx: nonnegative int, optional See startidx. shuffle: boolean, optional, default False Controls whether the conversion from Images to Series formats will make use of a Spark shuffle-based method. The default at present is not to use a shuffle. The shuffle-based method may lead to higher performance in some cases, but the default method appears to be more stable with larger data set sizes. This default may change in future releases. overwrite: boolean, optional, default False If true, the directory specified by outputdirpath will first be deleted, along with all its contents, if it already exists. (Use with caution.) If false, a ValueError will be thrown if outputdirpath is found to already exist. """ checkparams(inputformat, ['stack', 'tif-stack']) if inputformat.lower() == 'stack' and not dims: raise ValueError( "Dimensions ('dims' parameter) must be specified if loading from binary image stack" + " ('stack' value for 'inputformat' parameter)") if shuffle: from thunder.rdds.fileio.imagesloader import ImagesLoader loader = ImagesLoader(self._sc) if inputformat.lower() == 'stack': loader.fromStack(datapath, dims, dtype=dtype, startidx=startidx, stopidx=stopidx)\ .saveAsBinarySeries(outputdirpath, blockSize=blocksize, overwrite=overwrite) else: loader.fromMultipageTif(datapath, startidx=startidx, stopidx=stopidx)\ .saveAsBinarySeries(outputdirpath, blockSize=blocksize, overwrite=overwrite) else: from thunder.rdds.fileio.seriesloader import SeriesLoader loader = SeriesLoader(self._sc) if inputformat.lower() == 'stack': loader.saveFromStack(datapath, outputdirpath, dims, datatype=dtype, blockSize=blocksize, overwrite=overwrite, startidx=startidx, stopidx=stopidx) else: loader.saveFromMultipageTif(datapath, outputdirpath, blockSize=blocksize, startidx=startidx, stopidx=stopidx, overwrite=overwrite)
def loadSeries(self, dataPath, nkeys=None, nvalues=None, inputFormat='binary', minPartitions=None, confFilename='conf.json', keyType=None, valueType=None, keyPath=None, varName=None): """ Loads a Series object from data stored as binary, text, npy, or mat. For binary and text, supports single files or multiple files stored on a local file system, a networked file system (mounted and available on all cluster nodes), Amazon S3, or HDFS. For local formats (npy and mat) only local file systems currently supported. Parameters ---------- dataPath: string Path to data files or directory, as either a local filesystem path or a URI. May include a single '*' wildcard in the filename. Examples of valid dataPaths include 'local/directory/*.stack", "s3n:///my-s3-bucket/data/", or "file:///mnt/another/directory/". nkeys: int, optional (required if `inputFormat` is 'text'), default = None Number of keys per record (e.g. 3 for (x, y, z) coordinate keys). Must be specified for text data; can be specified here or in a configuration file for binary data. nvalues: int, optional (required if `inputFormat` is 'text') Number of values per record. Must be specified here or in a configuration file for binary data. inputFormat: {'text', 'binary', 'npy', 'mat'}. optional, default = 'binary' inputFormat of data to be read. minPartitions: int, optional, default = SparkContext.minParallelism Minimum number of Spark partitions to use, only for text. confFilename: string, optional, default 'conf.json' Path to JSON file with configuration options including 'nkeys', 'nvalues', 'keyType', and 'valueType'. If a file is not found at the given path, then the base directory in 'dataPath' will be checked. Parameters will override the conf file. keyType: string or numpy dtype, optional, default = None Numerical type of keys, will override conf file. valueType: string or numpy dtype, optional, default = None Numerical type of values, will override conf file. keyPath: string, optional, default = None Path to file with keys when loading from npy or mat. varName : str, optional, default = None Variable name to load (for MAT files only) Returns ------- data: thunder.rdds.Series A Series object, wrapping an RDD, with (n-tuples of ints) : (numpy array) pairs """ checkParams(inputFormat, ['text', 'binary', 'npy', 'mat']) from thunder.rdds.fileio.seriesloader import SeriesLoader loader = SeriesLoader(self._sc, minPartitions=minPartitions) if inputFormat.lower() == 'binary': data = loader.fromBinary(dataPath, confFilename=confFilename, nkeys=nkeys, nvalues=nvalues, keyType=keyType, valueType=valueType) elif inputFormat.lower() == 'text': if nkeys is None: raise Exception('Must provide number of keys per record for loading from text') data = loader.fromText(dataPath, nkeys=nkeys) elif inputFormat.lower() == 'npy': data = loader.fromNpyLocal(dataPath, keyPath) else: if varName is None: raise Exception('Must provide variable name for loading MAT files') data = loader.fromMatLocal(dataPath, varName, keyPath) return data
def convertImagesToSeries(self, dataPath, outputDirPath, dims=None, inputFormat='stack', ext=None, dtype='int16', blockSize="150M", blockSizeUnits="pixels", startIdx=None, stopIdx=None, shuffle=True, overwrite=False, recursive=False, nplanes=None, npartitions=None, renumber=False, confFilename='conf.json'): """ Write out Images data as Series data, saved in a flat binary format. The resulting files may subsequently be read in using ThunderContext.loadSeries(). Loading Series data directly will likely be faster than converting image data to a Series object through loadImagesAsSeries(). Parameters ---------- dataPath: string Path to data files or directory, as either a local filesystem path or a URI. May include a single '*' wildcard in the filename. Examples of valid dataPaths include 'local/directory/*.stack", "s3n:///my-s3-bucket/data/", or "file:///mnt/another/directory/". outputDirPath: string Path to directory to write Series file output. May be either a path on the local file system or a URI-like format, such as "local/directory", "s3n:///my-s3-bucket/data/", or "file:///mnt/another/directory/". If the directory exists and 'overwrite' is True, the existing directory and all its contents will be deleted and overwritten. dims: tuple of positive int, optional (required if inputFormat is 'stack') Image dimensions. Binary stack data will be interpreted as a multidimensional array with the given dimensions, and should be stored in row-major order (Fortran or Matlab convention), where the first dimension changes most rapidly. For 'png' or 'tif' data dimensions will be read from the image file headers. inputFormat: str, optional, default = 'stack' Expected format of the input data: 'stack', 'png', or 'tif'. 'stack' indicates flat binary stacks. 'png' or 'tif' indicate image formats. Page of a multipage tif file will be extend along the third dimension. Separate files interpreted as distinct records, with ordering given by lexicographic sorting of file names. ext: string, optional, default = None File extension, default will be "bin" if inputFormat=="stack", "tif" for inputFormat=='tif', and 'png' for inputFormat=="png". dtype: string or numpy dtype. optional, default 'int16' Data type of the image files to be loaded, specified as a numpy "dtype" string. Ignored for 'tif' or 'png' (data will be inferred from image formats). blockSize: string or positive int, optional, default "150M" Requested size of blocks (e.g "64M", "512k", "2G"). If shuffle=True, can also be a tuple of int specifying the number of pixels or splits per dimension. Indirectly controls the number of Spark partitions, with one partition per block. blockSizeUnits: string, either "pixels" or "splits", default "pixels" Units for interpreting a tuple passed as blockSize when shuffle=True. startIdx: nonnegative int, optional, default = None Convenience parameters to read only a subset of input files. Uses python slice conventions (zero-based indexing with exclusive final position). These parameters give the starting and final index after lexicographic sorting. stopIdx: nonnegative int, optional, default = None See startIdx. shuffle: boolean, optional, default = True Controls whether the conversion from Images to Series formats will use of a Spark shuffle-based method. overwrite: boolean, optional, default False If true, the directory specified by outputDirPath will be deleted (recursively) if it already exists. (Use with caution.) recursive: boolean, optional, default = False If true, will recursively descend directories rooted at dataPath, loading all files in the tree with an appropriate extension. nplanes: positive integer, optional, default = None Subdivide individual image files. Every `nplanes` from each file will be considered a new record. With nplanes=None (the default), a single file will be considered as representing a single record. If the number of records per file is not the same across all files, then `renumber` should be set to True to ensure consistent keys. npartitions: positive int, optional, default = None Specify number of partitions for the RDD, if unspecified will use 1 partition per image. renumber: boolean, optional, default = False Recalculate keys for records after images are loading. Only necessary if different files contain different number of records (e.g. due to specifying nplanes). See Images.renumber(). confFilename : string, optional, default = 'conf.json' Name of conf file if using to specify parameters for binary stack data """ checkParams(inputFormat, ['stack', 'tif', 'tif-stack']) if not overwrite: raiseErrorIfPathExists(outputDirPath, awsCredentialsOverride=self._credentials) overwrite = True # prevent additional downstream checks for this path if not ext: ext = DEFAULT_EXTENSIONS.get(inputFormat.lower(), None) if shuffle: from thunder.rdds.fileio.imagesloader import ImagesLoader loader = ImagesLoader(self._sc) if inputFormat.lower() == 'stack': images = loader.fromStack(dataPath, dims, ext=ext, dtype=dtype, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive, nplanes=nplanes, npartitions=npartitions, confFilename=confFilename) else: # 'tif' or 'tif-stack' images = loader.fromTif(dataPath, ext=ext, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive, nplanes=nplanes, npartitions=npartitions) if renumber: images = images.renumber() images.toBlocks(blockSize, units=blockSizeUnits).saveAsBinarySeries(outputDirPath, overwrite=overwrite) else: from thunder.rdds.fileio.seriesloader import SeriesLoader if nplanes is not None: raise NotImplementedError("nplanes is not supported with shuffle=False") if npartitions is not None: raise NotImplementedError("npartitions is not supported with shuffle=False") loader = SeriesLoader(self._sc) if inputFormat.lower() == 'stack': loader.saveFromStack(dataPath, outputDirPath, dims, ext=ext, dtype=dtype, blockSize=blockSize, overwrite=overwrite, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive) else: # 'tif' or 'tif-stack' loader.saveFromTif(dataPath, outputDirPath, ext=ext, blockSize=blockSize, startIdx=startIdx, stopIdx=stopIdx, overwrite=overwrite, recursive=recursive)
def loadSeries(self, dataPath, nkeys=None, nvalues=None, inputFormat='binary', minPartitions=None, confFilename='conf.json', keyType=None, valueType=None): """ Loads a Series object from data stored as text or binary files. Supports single files or multiple files stored on a local file system, a networked file system (mounted and available on all cluster nodes), Amazon S3, or HDFS. Parameters ---------- dataPath: string Path to data files or directory, specified as either a local filesystem path or in a URI-like format, including scheme. A dataPath argument may include a single '*' wildcard character in the filename. Examples of valid dataPaths include 'a/local/relative/directory/*.stack", "s3n:///my-s3-bucket/data/mydatafile.tif", "/mnt/my/absolute/data/directory/", or "file:///mnt/another/data/directory/". nkeys: int, optional (but required if `inputFormat` is 'text') dimensionality of data keys. (For instance, (x,y,z) keyed data for 3-dimensional image timeseries data.) For text data, number of keys must be specified in this parameter; for binary data, number of keys must be specified either in this parameter or in a configuration file named by the 'conffile' argument if this parameter is not set. nvalues: int, optional (but required if `inputFormat` is 'text') Number of values expected to be read. For binary data, nvalues must be specified either in this parameter or in a configuration file named by the 'conffile' argument if this parameter is not set. inputFormat: {'text', 'binary'}. optional, default 'binary' Format of data to be read. minPartitions: int, optional Explicitly specify minimum number of Spark partitions to be generated from this data. Used only for text data. Default is to use minParallelism attribute of Spark context object. confFilename: string, optional, default 'conf.json' Path to JSON file with configuration options including 'nkeys', 'nvalues', 'keytype', and 'valuetype'. If a file is not found at the given path, then the base directory given in 'datafile' will also be checked. Parameters `nkeys` or `nvalues` that are specified as explicit arguments to this method will take priority over those found in conffile if both are present. Returns ------- data: thunder.rdds.Series A newly-created Series object, wrapping an RDD of series data. This RDD will have as keys an n-tuple of int, with n given by `nkeys` or the configuration passed in `conffile`. RDD values will be a numpy array of length `nvalues` (or as specified in the passed configuration file). """ checkParams(inputFormat, ['text', 'binary']) from thunder.rdds.fileio.seriesloader import SeriesLoader loader = SeriesLoader(self._sc, minPartitions=minPartitions) if inputFormat.lower() == 'text': data = loader.fromText(dataPath, nkeys=nkeys) else: # must be either 'text' or 'binary' data = loader.fromBinary(dataPath, confFilename=confFilename, nkeys=nkeys, nvalues=nvalues, keyType=keyType, valueType=valueType) return data
def loadImagesAsSeries(self, dataPath, dims=None, inputFormat='stack', ext=None, dtype='int16', blockSize="150M", blockSizeUnits="pixels", startIdx=None, stopIdx=None, shuffle=True, recursive=False, nplanes=None, npartitions=None, renumber=False, confFilename='conf.json'): """ Load Images data as Series data. Parameters ---------- dataPath: string Path to data files or directory, as either a local filesystem path or a URI. May include a single '*' wildcard in the filename. Examples of valid dataPaths include 'local/directory/*.stack", "s3n:///my-s3-bucket/data/", or "file:///mnt/another/directory/". dims: tuple of positive int, optional (required if inputFormat is 'stack') Image dimensions. Binary stack data will be interpreted as a multidimensional array with the given dimensions, and should be stored in row-major order (Fortran or Matlab convention), where the first dimension changes most rapidly. For 'png' or 'tif' data dimensions will be read from the image file headers. inputFormat: str, optional, default = 'stack' Expected format of the input data: 'stack', 'png', or 'tif'. 'stack' indicates flat binary stacks. 'png' or 'tif' indicate image formats. Page of a multipage tif file will be extend along the third dimension. Separate files interpreted as distinct records, with ordering given by lexicographic sorting of file names. ext: string, optional, default = None File extension, default will be "bin" if inputFormat=="stack", "tif" for inputFormat=='tif', and 'png' for inputFormat=="png". dtype: string or numpy dtype. optional, default 'int16' Data type of the image files to be loaded, specified as a numpy "dtype" string. Ignored for 'tif' or 'png' (data will be inferred from image formats). blockSize: string or positive int, optional, default "150M" Requested size of blocks (e.g "64M", "512k", "2G"). If shuffle=True, can also be a tuple of int specifying the number of pixels or splits per dimension. Indirectly controls the number of Spark partitions, with one partition per block. blockSizeUnits: string, either "pixels" or "splits", default "pixels" Units for interpreting a tuple passed as blockSize when shuffle=True. startIdx: nonnegative int, optional, default = None Convenience parameters to read only a subset of input files. Uses python slice conventions (zero-based indexing with exclusive final position). These parameters give the starting and final index after lexicographic sorting. stopIdx: nonnegative int, optional, default = None See startIdx. shuffle: boolean, optional, default = True Controls whether the conversion from Images to Series formats will use of a Spark shuffle-based method. recursive: boolean, optional, default = False If true, will recursively descend directories rooted at dataPath, loading all files in the tree with an appropriate extension. nplanes: positive integer, optional, default = None Subdivide individual image files. Every `nplanes` from each file will be considered a new record. With nplanes=None (the default), a single file will be considered as representing a single record. If the number of records per file is not the same across all files, then `renumber` should be set to True to ensure consistent keys. npartitions: positive int, optional, default = None Specify number of partitions for the RDD, if unspecified will use 1 partition per image. renumber: boolean, optional, default = False Recalculate keys for records after images are loading. Only necessary if different files contain different number of records (e.g. due to specifying nplanes). See Images.renumber(). confFilename : string, optional, default = 'conf.json' Name of conf file if using to specify parameters for binary stack data Returns ------- data: thunder.rdds.Series A Series object, wrapping an RDD, with (n-tuples of ints) : (numpy array) pairs. Keys will be n-tuples of int, with n given by dimensionality of the images, and correspond to indexes into the image arrays. Value will have length equal to the number of image files. With each image contributing one point to this value array, with ordering given by the lexicographic ordering of image file names. """ checkParams(inputFormat, ['stack', 'tif', 'tif-stack']) if not ext: ext = DEFAULT_EXTENSIONS.get(inputFormat.lower(), None) if shuffle: from thunder.rdds.fileio.imagesloader import ImagesLoader loader = ImagesLoader(self._sc) if inputFormat.lower() == 'stack': images = loader.fromStack(dataPath, dims, dtype=dtype, ext=ext, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive, nplanes=nplanes, npartitions=npartitions, confFilename=confFilename) else: # tif / tif stack images = loader.fromTif(dataPath, ext=ext, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive, nplanes=nplanes, npartitions=npartitions) if renumber: images = images.renumber() return images.toBlocks(blockSize, units=blockSizeUnits).toSeries() else: from thunder.rdds.fileio.seriesloader import SeriesLoader if nplanes is not None: raise NotImplementedError("nplanes is not supported with shuffle=False") if npartitions is not None: raise NotImplementedError("npartitions is not supported with shuffle=False") if renumber: raise NotImplementedError("renumber is not supported with shuffle=False") loader = SeriesLoader(self._sc) if inputFormat.lower() == 'stack': return loader.fromStack(dataPath, dims, ext=ext, dtype=dtype, blockSize=blockSize, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive) else: # tif / tif stack return loader.fromTif(dataPath, ext=ext, blockSize=blockSize, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive)
def convertImagesToSeries(self, dataPath, outputDirPath, dims=None, inputFormat='stack', ext=None, dtype='int16', blockSize="150M", blockSizeUnits="pixels", startIdx=None, stopIdx=None, shuffle=True, overwrite=False, recursive=False, nplanes=None, npartitions=None, renumber=False): """ Write out Images data as Series data, saved in a flat binary format. The resulting Series data files may subsequently be read in using the loadSeries() method. The Series data object that results will be equivalent to that which would be generated by loadImagesAsSeries(). It is expected that loading Series data directly from the series flat binary format, using loadSeries(), will be faster than converting image data to a Series object through loadImagesAsSeries(). Parameters ---------- dataPath: string Path to data files or directory, specified as either a local filesystem path or in a URI-like format, including scheme. A dataPath argument may include a single '*' wildcard character in the filename. Examples of valid dataPaths include 'a/local/relative/directory/*.stack", "s3n:///my-s3-bucket/data/mydatafile.tif", "/mnt/my/absolute/data/directory/", or "file:///mnt/another/data/directory/". outputDirPath: string Path to a directory into which to write Series file output. An outputdir argument may be either a path on the local file system or a URI-like format, as in dataPath. Examples of valid outputDirPaths include "a/relative/directory/", "s3n:///my-s3-bucket/data/myoutput/", or "file:///mnt/a/new/directory/". If the directory specified by outputDirPath already exists and the 'overwrite' parameter is False, this method will throw a ValueError. If the directory exists and 'overwrite' is True, the existing directory and all its contents will be deleted and overwritten. dims: tuple of positive int, optional (but required if inputFormat is 'stack') Dimensions of input image data, for instance (1024, 1024, 48). Binary stack data will be interpreted as coming from a multidimensional array of the specified dimensions. The first dimension of the passed dims tuple should be the one that is changing most rapidly on disk. So for instance given dims of (x, y, z), the coordinates of the data in a binary stack file should be ordered as [(x0, y0, z0), (x1, y0, z0), ..., (xN, y0, z0), (x0, y1, z0), (x1, y1, z0), ..., (xN, yM, z0), (x0, y0, z1), ..., (xN, yM, zP)]. This is the opposite convention from that used by numpy, which by default has the fastest-changing dimension listed last (column-major convention). Thus, if loading a numpy array `ary`, where `ary.shape == (z, y, x)`, written to disk by `ary.tofile("myarray.stack")`, the corresponding dims parameter should be (x, y, z). If inputFormat is 'tif', the dims parameter (if any) will be ignored; data dimensions will instead be read out from the tif file headers. inputFormat: {'stack', 'tif'}. optional, default 'stack' Expected format of the input data. 'stack' indicates flat files of raw binary data, while 'tif' indicates greyscale / luminance TIF images. Each page of a multipage tif file will be interpreted as a separate z-plane. For both stacks and tif stacks, separate files are interpreted as distinct time points, with ordering given by lexicographic sorting of file names. ext: string, optional, default None Extension required on data files to be loaded. By default will be "stack" if inputFormat=="stack", "tif" for inputFormat=='tif'. dtype: string or numpy dtype. optional, default 'int16' Data type of the image files to be loaded, specified as a numpy "dtype" string. If inputFormat is 'tif', the dtype parameter (if any) will be ignored; data type will instead be read out from the tif headers. blockSize: string formatted as e.g. "64M", "512k", "2G", or positive int, tuple of positive int, or instance of BlockingStrategy. optional, default "150M" Requested size of individual output files in bytes (or kilobytes, megabytes, gigabytes). blockSize can also be an instance of blockingStrategy, or a tuple of int specifying either the number of pixels or of splits per dimension to apply to the loaded images. Whether a tuple of int is interpreted as pixels or as splits depends on the value of the blockSizeUnits parameter. This parameter also indirectly controls the number of Spark partitions to be used, with one partition used per block created. blockSizeUnits: string, either "pixels" or "splits" (or unique prefix of each, such as "s"), default "pixels" Specifies units to be used in interpreting a tuple passed as blockSizeSpec when shuffle=True. If a string or a BlockingStrategy instance is passed as blockSizeSpec, or if shuffle=False, this parameter has no effect. startIdx: nonnegative int, optional startIdx and stopIdx are convenience parameters to allow only a subset of input files to be read in. These parameters give the starting index (inclusive) and final index (exclusive) of the data files to be used after lexicographically sorting all input data files matching the dataPath argument. For example, startIdx=None (the default) and stopIdx=10 will cause only the first 10 data files in dataPath to be read in; startIdx=2 and stopIdx=3 will cause only the third file (zero-based index of 2) to be read in. startIdx and stopIdx use the python slice indexing convention (zero-based indexing with an exclusive final position). stopIdx: nonnegative int, optional See startIdx. shuffle: boolean, optional, default True Controls whether the conversion from Images to Series formats will make use of a Spark shuffle-based method. overwrite: boolean, optional, default False If true, the directory specified by outputDirPath will first be deleted, along with all its contents, if it already exists. (Use with caution.) If false, a ValueError will be thrown if outputDirPath is found to already exist. recursive: boolean, default False If true, will recursively descend directories rooted at dataPath, loading all files in the tree that have an appropriate extension. Recursive loading is currently only implemented for local filesystems (not s3), and only with shuffle=True. nplanes: positive integer, default None If passed, will cause a single image file to be subdivided into multiple records. Every `nplanes` z-planes (or multipage tif pages) in the file will be taken as a new record, with the first nplane planes of the first file being record 0, the second nplane planes being record 1, etc, until the first file is exhausted and record ordering continues with the first nplane planes of the second file, and so on. With nplanes=None (the default), a single file will be considered as representing a single record. Keys are calculated assuming that all input files contain the same number of records; if the number of records per file is not the same across all files, then `renumber` should be set to True to ensure consistent keys. nplanes is only supported for shuffle=True (the default). npartitions: positive int, optional If specified, request a certain number of partitions for the underlying Spark RDD. Default is 1 partition per image file. Only applies when shuffle=True. renumber: boolean, optional, default False If renumber evaluates to True, then the keys for each record will be explicitly recalculated after all images are loaded. This should only be necessary at load time when different files contain different number of records. renumber is only supported for shuffle=True (the default). See Images.renumber(). """ checkParams(inputFormat, ['stack', 'tif', 'tif-stack']) if inputFormat.lower() == 'stack' and not dims: raise ValueError( "Dimensions ('dims' parameter) must be specified if loading from binary image stack" + " ('stack' value for 'inputFormat' parameter)") if not overwrite: raiseErrorIfPathExists(outputDirPath, awsCredentialsOverride=self._credentials) overwrite = True # prevent additional downstream checks for this path if not ext: ext = DEFAULT_EXTENSIONS.get(inputFormat.lower(), None) if shuffle: from thunder.rdds.fileio.imagesloader import ImagesLoader loader = ImagesLoader(self._sc) if inputFormat.lower() == 'stack': images = loader.fromStack(dataPath, dims, ext=ext, dtype=dtype, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive, nplanes=nplanes, npartitions=npartitions) else: # 'tif' or 'tif-stack' images = loader.fromTif(dataPath, ext=ext, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive, nplanes=nplanes, npartitions=npartitions) if renumber: images = images.renumber() images.toBlocks(blockSize, units=blockSizeUnits).saveAsBinarySeries( outputDirPath, overwrite=overwrite) else: from thunder.rdds.fileio.seriesloader import SeriesLoader if nplanes is not None: raise NotImplementedError( "nplanes is not supported with shuffle=False") if npartitions is not None: raise NotImplementedError( "npartitions is not supported with shuffle=False") loader = SeriesLoader(self._sc) if inputFormat.lower() == 'stack': loader.saveFromStack(dataPath, outputDirPath, dims, ext=ext, dtype=dtype, blockSize=blockSize, overwrite=overwrite, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive) else: # 'tif' or 'tif-stack' loader.saveFromTif(dataPath, outputDirPath, ext=ext, blockSize=blockSize, startIdx=startIdx, stopIdx=stopIdx, overwrite=overwrite, recursive=recursive)
def _run_roundtrip_exception_tst(self, nimages, aryShape, dtypeSpec, sizeSpec): testArrays = TestSeriesBinaryWriteFromStack.generateTestImages(nimages, aryShape, dtypeSpec) loader = SeriesLoader(self.sc) series = loader.fromArrays(testArrays) assert_raises(ValueError, series.toBlocks, sizeSpec)
def _run_roundtrip_tst(self, testCount, arrays, blockSize): print "Running TestSeriesBinaryWriteFromStack roundtrip test #%d" % testCount insubdir = os.path.join(self.outputdir, 'input%d' % testCount) os.mkdir(insubdir) outsubdir = os.path.join(self.outputdir, 'output%d' % testCount) #os.mkdir(outsubdir) for aryCount, array in enumerate(arrays): # array.tofile always writes in column-major order... array.tofile(os.path.join(insubdir, "img%02d.stack" % aryCount)) # ... but we will read and interpret these as though they are in row-major order dims = list(arrays[0].shape) dims.reverse() underTest = SeriesLoader(self.sc) underTest.saveFromStack(insubdir, outsubdir, dims, blockSize=blockSize, datatype=str(arrays[0].dtype)) series = underTest.fromStack(insubdir, dims, datatype=str(arrays[0].dtype)) roundtripped_series = underTest.fromBinary(outsubdir) roundtripped = roundtripped_series.collect() direct = series.collect() expecteddtype = str(smallest_float_type(arrays[0].dtype)) assert_equals(expecteddtype, roundtripped_series.dtype) assert_equals(expecteddtype, series.dtype) assert_equals(expecteddtype, str(roundtripped[0][1].dtype)) assert_equals(expecteddtype, str(direct[0][1].dtype)) with open(os.path.join(outsubdir, "conf.json"), 'r') as fp: # check that binary series file data type *matches* input stack data type (not yet converted to float) # at least according to conf.json conf = json.load(fp) assert_equals(str(arrays[0].dtype), conf["valuetype"]) for ((serieskeys, seriesvalues), (directkeys, directvalues)) in zip(roundtripped, direct): assert_equals(directkeys, serieskeys) assert_equals(directvalues, seriesvalues) for seriesidx, seriesval in enumerate(seriesvalues): #print "seriesidx: %d; serieskeys: %s; seriesval: %g" % (seriesidx, serieskeys, seriesval) # flip indices again for row vs col-major insanity arykeys = list(serieskeys) arykeys.reverse() msg = "Failure on test #%d, time point %d, indices %s" % ( testCount, seriesidx, str(tuple(arykeys))) try: assert_almost_equal(arrays[seriesidx][tuple(arykeys)], seriesval, places=4) except AssertionError, e: raise AssertionError(msg, e)
def generateTestSeries(self): from thunder.rdds.fileio.seriesloader import SeriesLoader ary1 = arange(8, dtype=dtypeFunc('uint8')).reshape((2, 4)) ary2 = arange(8, 16, dtype=dtypeFunc('uint8')).reshape((2, 4)) return SeriesLoader(self.sc).fromArraysAsImages([ary1, ary2])
def _run_tst_fromBinary(self, useConfJson=False): # run this as a single big test so as to avoid repeated setUp and tearDown of the spark context # data will be a sequence of test data # all keys and all values in a test data item must be of the same length # keys get converted to ints regardless of raw input format DATA = [ SeriesBinaryTestData.fromArrays([[1, 2, 3]], [[11, 12, 13]], 'int16', 'int16'), SeriesBinaryTestData.fromArrays([[1, 2, 3], [5, 6, 7]], [[11], [12]], 'int16', 'int16'), SeriesBinaryTestData.fromArrays([[1, 2, 3]], [[11, 12, 13]], 'int16', 'int32'), SeriesBinaryTestData.fromArrays([[1, 2, 3]], [[11, 12, 13]], 'int32', 'int16'), SeriesBinaryTestData.fromArrays([[1, 2, 3]], [[11.0, 12.0, 13.0]], 'int16', 'float32'), SeriesBinaryTestData.fromArrays([[1, 2, 3]], [[11.0, 12.0, 13.0]], 'float32', 'float32'), SeriesBinaryTestData.fromArrays([[2, 3, 4]], [[11.0, 12.0, 13.0]], 'float32', 'float32'), ] for itemidx, item in enumerate(DATA): outSubdir = os.path.join(self.outputdir, 'input%d' % itemidx) os.mkdir(outSubdir) fname = os.path.join(outSubdir, 'inputfile%d.bin' % itemidx) with open(fname, 'wb') as f: item.writeToFile(f) loader = SeriesLoader(self.sc) if not useConfJson: series = loader.fromBinary(outSubdir, nkeys=item.nkeys, nvalues=item.nvals, keyType=str(item.keyDtype), valueType=str(item.valDtype)) else: # write configuration file conf = { 'input': outSubdir, 'nkeys': item.nkeys, 'nvalues': item.nvals, 'valuetype': str(item.valDtype), 'keytype': str(item.keyDtype) } with open(os.path.join(outSubdir, "conf.json"), 'wb') as f: json.dump(conf, f, indent=2) series = loader.fromBinary(outSubdir) seriesData = series.rdd.collect() expectedData = item.data assert_equals( len(expectedData), len(seriesData), "Differing numbers of k/v pairs in item %d; expected %d, got %d" % (itemidx, len(expectedData), len(seriesData))) for expected, actual in zip(expectedData, seriesData): expectedKeys = tuple(expected[0]) expectedType = smallestFloatType(item.valDtype) expectedVals = array(expected[1], dtype=expectedType) assert_equals( expectedKeys, actual[0], "Key mismatch in item %d; expected %s, got %s" % (itemidx, str(expectedKeys), str(actual[0]))) assert_true( allclose(expectedVals, actual[1]), "Value mismatch in item %d; expected %s, got %s" % (itemidx, str(expectedVals), str(actual[1]))) assert_equals( expectedType, str(actual[1].dtype), "Value type mismatch in item %d; expected %s, got %s" % (itemidx, expectedType, str(actual[1].dtype)))