def test_list_fitting(self): true1, data1 = self.generate_data() true2, data2 = self.generate_data() model = DPMixtureModel(3, 2000, 100, 1, type='BEM') rs = model.fit([data1, data2]) assert(len(rs) == 2) for r in rs: diffs = {} for i in gen_mean: diffs[i] = np.min(np.abs(r.mus-gen_mean[i]), 0) assert(np.vdot(diffs[i], diffs[i]) < 1) fcm1 = FCMdata('test_fcm1', data1, ['fsc', 'ssc'], [0, 1]) fcm2 = FCMdata('test_fcm2', data2, ['fsc', 'ssc'], [0, 1]) c = FCMCollection('fcms', [fcm1, fcm2]) rs = model.fit(c) assert(len(rs) == 2) for r in rs: diffs = {} for i in gen_mean: diffs[i] = np.min(np.abs(r.mus-gen_mean[i]), 0) assert(np.vdot(diffs[i], diffs[i]) < 1)
def testCheckNames(self): pnts = array([[1, 1, 1], [5, 5, 5]]) fcm1 = FCMdata('test_fcm1', pnts, ['fsc', 'ssc', 'cd3'], [0, 1]) fcm2 = FCMdata('test_fcm2', pnts, ['fsc', 'ssc', 'cd3'], [0, 1]) fcm3 = FCMdata('test_fcm3', pnts, ['fsc', 'ssc', 'cd4'], [0, 1]) fcms1 = FCMcollection('fcms1', [fcm1, fcm2]) fcms2 = FCMcollection('fcms2', [fcm1, fcm2, fcm3, fcms1]) check1 = fcms1.check_names() assert check1[fcms1.name] == [True, True, True] check2 = fcms2.check_names() assert check2[fcms2.name] == [check1, True, True, False]
def testBoundaryEvents(self): pnts = array([[0, 1, 2], [3, 4, 5], [0, 2, 5]]) fcm = FCMdata('test_fcm', pnts, ['fsc', 'ssc', 'cd3'], [0, 1]) eps = 1e-10 result = fcm.boundary_events() assert result['fsc'] - 1 < eps assert result['ssc'] - 2.0 / 3.0 < eps assert result['cd3'] - 1 < eps
def get_FCMdata(self, auto_comp=False, **kwargs): """Return the next FCM data set stored in a FCS file""" # parse headers header = self.parse_header(self.cur_offset) # parse text text = self.parse_text(self.cur_offset, header['text_start'], header['text_stop']) # parse annalysis try: astart = text['beginanalysis'] except KeyError: astart = header['analysis_start'] try: astop = text['endanalysis'] except KeyError: astop = header['analysis_end'] analysis = self.parse_analysis(self.cur_offset, astart, astop) # parse data try: dstart = int(text['begindata']) except KeyError: dstart = header['data_start'] try: dstop = int(text['enddata']) except KeyError: dstop = header['data_end'] # account for LMD reporting the wrong values for the size of the data # segment lmd = self.fix_lmd(self.cur_offset, header['text_start'], header['text_stop']) dstop = dstop + lmd data = self.parse_data(self.cur_offset, dstart, dstop, text) # build fcmdata object channels = [] scchannels = [] scchannel_indexes = [] to_transform = [] base_chan_name = [] for i in range(1, int(text['par']) + 1): base_chan_name.append(text['p%dn' % i]) try: if text['p%ds' % i] not in ['', ' ']: name = text['p%ds' % i] else: name = text['p%dn' % i] except KeyError: name = text['p%dn' % i] channels.append(name) # if not name.lower().startswith('fl'): if not is_fl_channel(name): scchannels.append(name) if name != 'Time': scchannel_indexes.append(i - 1) else: # we're a FL channel try: if text['p%dr' % i] == '262144': to_transform.append(i - 1) except KeyError: pass try: unused_path, name = os.path.split(self._fh.name) except AttributeError: name = 'InMemoryFile' name, unused_ext = os.path.splitext(name) tmpfcm = FCMdata( name, data, zip(base_chan_name, channels), scchannels, Annotation({ 'text': text, 'header': header, 'analysis': analysis, })) if self.sidx is not None and self.spill is not None: # if we're passed a spillover we assume we compensate auto_comp = True if auto_comp: if self.sidx is None and self.spill is None: if tmpfcm.get_spill(): spill, sidx = get_spill(tmpfcm.get_spill()) tmpfcm.compensate(sidx=sidx, spill=spill) else: tmpfcm.compensate(sidx=self.sidx, spill=self.spill) if self.transform == 'logicle': try: if isinstance(kwargs['r'], Number): self.r = kwargs['r'] elif numpy.all(numpy.isreal(kwargs['r'])): self.r = numpy.zeros(data.shape[1]) except KeyError: self.r = None if 'T' in kwargs.keys(): T = kwargs['T'] else: T = 262144 if 'm' in kwargs.keys(): m = kwargs['m'] else: m = 4.5 if 'scale_max' in kwargs.keys(): scale_max = kwargs['scale_max'] else: scale_max = 1e5 if 'scale_min' in kwargs.keys(): scale_min = kwargs['scale_min'] else: scale_min = 0 if 'rquant' in kwargs.keys(): rquant = kwargs['rquant'] else: rquant = None if 'w' in kwargs.keys(): w = kwargs['w'] else: w = 0.5 if 'a' in kwargs.keys(): a = kwargs['a'] else: a = 0 if to_transform: tmpfcm.logicle(to_transform, T=T, m=m, r=self.r, w=w, a=a, scale_max=scale_max, scale_min=scale_min, rquant=rquant) elif self.transform == 'log': if to_transform: tmpfcm.log(to_transform) try: tmpfcm._r = self.r except AttributeError: pass try: tmpfcm._w = self.w except AttributeError: pass if 'nextdata' in text: self.cur_offset = int(text['nextdata']) return tmpfcm
def setUp(self): pnts = array([[1, 1, 1], [5, 5, 5]]) fcm1 = FCMdata('test_fcm1', pnts, ['fsc', 'ssc', 'cd3'], [0, 1]) fcm2 = FCMdata('test_fcm2', pnts, ['fsc', 'ssc', 'cd3'], [0, 1]) self.fcms = FCMcollection('fcms', [fcm1, fcm2])
def setUp(self): self.pnts = array([[0, 1, 2], [3, 4, 5]]) self.fcm = FCMdata('test_fcm', self.pnts, ['fsc', 'ssc', 'cd3'], [0, 1])
def setUp(self): self.pnts = array([[0, 1, 2], [3, 4, 5]]) self.fcm = FCMdata('test_fcm', self.pnts, ['fsc', 'ssc', 'fl-1'], scatters=[0, 1])