def test_buffer_export(self): with TmpDir() as dir_: filename = os.path.join(dir_, "export.fcs") text_start = 256 fseg = TextSegment( self.data.shape[0], self.long_channels, self.channels, self.data.max(axis=0), text_start=text_start, ) header = HeaderSegment(text_start, fseg.text_end, fseg.data_start, fseg.data_end) fcs = Fcs(self.data, self.long_channels, self.channels) with open(filename, "wb") as fp: fcs.export(fp) with open(filename, "rb") as fp: assert fp.read(58).decode("UTF-8") == header.to_string() fp.seek(text_start) assert (fp.read(header.text_end - fseg.text_start + 1).decode("UTF-8").strip() == fseg.to_string()) fp.seek(fseg.data_start) dseg = DataSegment.from_string( fp.read(fseg.data_end - fseg.data_start + 1), fseg.datatype, len(fseg.pnn), fseg.tot, fseg.endian, ) assert np.array_equal(dseg.values, self.data)
def test_data(self): with TmpDir() as dir_: filename = os.path.join(dir_, self.name) df = DataFrame(self.data, columns=self.channels) df.to_fcs(filename) fcs = Fcs.from_file(filename) assert np.array_equal(fcs.values, self.data)
def test_count(self): with TmpDir() as dir_: filename = os.path.join(dir_, self.name) df = DataFrame(self.data, columns=self.channels) df.to_fcs(filename) fcs = Fcs.from_file(filename) assert fcs.count == len(self.data)
def test_read_text_segment(self): with TmpDir() as dir_: filename = os.path.join(dir_, self.name) cols = pd.MultiIndex.from_tuples( list(zip(self.channels, self.long_channels)), names=["short", "long"], ) df = DataFrame(self.data, columns=cols) df.to_fcs(filename) seg = Fcs.read_text_segment(filename) assert seg.pnn == self.channels assert seg.pns == self.long_channels
def test_read_text_segment(self): with TmpDir() as dir_: filename = os.path.join(dir_, "test.fcs") cols = pd.MultiIndex.from_tuples( list(zip(self.channels, self.long_channels)), names=["short", "long"], ) df = DataFrame(self.data, columns=cols) df.to_fcs(filename) self.s3.upload_file(filename, self.bucket_name, "test.fcs") parse_func = create_open_func(S3ReadBuffer, bucket=self.bucket_name) with parse_func("test.fcs") as fp: seg = Fcs.read_text_segment(fp) assert seg.pnn == self.channels assert seg.pns == self.long_channels
def test_write_text_segment(self): with TmpDir() as dir_: filename = os.path.join(dir_, "export.fcs") fcs = Fcs(self.data, self.long_channels, self.channels) fcs.export(filename) fseg = TextSegment( self.data.shape[0], self.long_channels, ["w", "x", "y", "z"], self.data.max(axis=0), text_start=256, ) Fcs.write_text_segment(filename, fseg) tseg = Fcs.read_text_segment(filename) assert tseg.pns == ["w", "x", "y", "z"]
def test_from_fcs(self): with TmpDir() as dir_: filename = os.path.join(dir_, "test.fcs") cols = pd.MultiIndex.from_tuples( list(zip(self.channels, self.long_channels)), names=["short", "long"], ) df = DataFrame(self.data, columns=cols) df.to_fcs(filename) self.s3.upload_file(filename, self.bucket_name, "test.fcs") parse_func = create_open_func(S3ReadBuffer, bucket=self.bucket_name) with parse_func("test.fcs") as fp: fcs = Fcs.from_file(fp) assert fcs.short_channels == self.channels assert fcs.long_channels == self.long_channels assert fcs.count == len(self.data) assert np.array_equal(fcs.values, self.data)