def test_set_filter_masked_wrong(clean_ui): """What happens when we call set_filter to a filtered dataset with the wrong size?""" x = np.asarray([10, 20, 30]) y = np.asarray([2, 3, 4]) ui.load_arrays(1, x, y) ui.ignore(lo=15, hi=45) with pytest.raises(DataErr) as err: ui.set_filter(np.asarray([True, False])) assert str(err.value) == "size mismatch between 3 and 2"
def test_set_filter_masked(clean_ui): """What happens when we call set_filter to a filtered dataset?""" x = np.asarray([10, 20, 30, 40, 50]) y = np.asarray([2, 3, 4, 5, 6]) ui.load_arrays(1, x, y) ui.ignore(lo=15, hi=45) data = ui.get_data() assert data.mask == pytest.approx([True, False, False, False, True]) ui.set_filter(np.asarray([True, False, True, False, False])) assert data.mask == pytest.approx([True, False, True, False, True])
def test_user_model1d_fit(): """Check can use in a fit.""" mname = "test_model" ui.load_user_model(um_line, mname) ui.add_user_pars(mname, ["slope", "intercept"], parvals = [1.0, 1.0]) mdl = ui.get_model_component(mname) x = numpy.asarray([-2.4, 2.3, 5.4, 8.7, 12.3]) # Set up the data to be scattered around y = -0.2 x + 2.8 # Pick the deltas so that they sum to 0 (except for central # point) # slope = -0.2 intercept = 2.8 dy = numpy.asarray([0.1, -0.2, 0.14, -0.1, 0.2]) ydata = x * slope + intercept + dy ui.load_arrays(1, x, ydata) ui.set_source(mname) ui.ignore(5.0, 6.0) # drop the central bin ui.set_stat('leastsq') ui.set_method('simplex') ui.fit() fres = ui.get_fit_results() assert fres.succeeded assert fres.parnames == ('test_model.slope', 'test_model.intercept') assert fres.numpoints == 4 assert fres.dof == 2 # Tolerance has been adjusted to get the tests to pass on my # machine. It's really just to check that the values have chanegd # from their default values. # assert fres.parvals[0] == pytest.approx(slope, abs=0.01) assert fres.parvals[1] == pytest.approx(intercept, abs=0.05) # Thse should be the same values, so no need to use pytest.approx # (unless there's some internal translation between types done # somewhere?). # assert mdl.slope.val == fres.parvals[0] assert mdl.intercept.val == fres.parvals[1]
def fit_evol(dateglob='20?????', rootdir='darkhist_peaknorm', outroot='', xmin=25.0, xmax=4000, conf=True, gauss=False): results = {} fileglob = os.path.join(rootdir, '{}.dat'.format(dateglob)) for i, filename in enumerate(glob(fileglob)): filedate = re.search(r'(\d{7})', filename).group(1) print "\n\n*************** {} *****************".format(filename) plt.figure(1) ui.load_data(1, filename, 2) data = ui.get_data() ui.ignore(None, xmin) ui.ignore(xmax, None) dark_models.xall = data.x # dark_models.imin = np.where(xall > xmin)[0][0] # dark_models.imax = np.where(xall > xmax)[0][0] sbp.gamma1 = 0.05 sbp.gamma2 = 3.15 sbp.gamma2.min = 2. sbp.gamma2.max = 4. sbp.x_b = 130. sbp.x_b.min = 100. sbp.x_b.max = 160. sbp.x_r = 50. ok = (data.x > 40) & (data.x < 60) sbp.ampl1 = np.mean(data.y[ok]) if gauss: fit_gauss_sbp() else: fit_sbp() pars = (sbp.gamma1.val, sbp.gamma2.val, sbp.x_b.val, sbp.x_r.val, sbp.ampl1.val) fit_y = dark_models.smooth_broken_pow(pars, data.x) if conf: ui.set_conf_opt('numcores', 1) ui.conf() res = ui.get_conf_results() result = dict((x, getattr(res, x)) for x in ('parnames', 'parmins', 'parvals', 'parmaxes')) result['x'] = data.x result['y'] = data.y result['y_fit'] = fit_y results[filedate] = result if outroot is not None: ui.notice(0, xmax) ui.set_xlog() ui.set_ylog() ui.plot_fit() plt.xlim(1, 1e4) plt.ylim(0.5, 1e5) plt.grid(True) plt.xlabel('Dark current (e-/sec)') outfile = os.path.join(rootdir, '{}{}.png'.format(outroot, filedate)) print 'Writing', outfile plt.savefig(outfile) if conf: outfile = os.path.join(rootdir, '{}{}.pkl'.format(outroot, filedate)) print 'Writing', outfile pickle.dump(result, open(outfile, 'w'), protocol=-1) if outroot is not None: outfile = os.path.join(rootdir, '{}fits.pkl'.format(outroot)) print 'Writing', outfile pickle.dump(results, open(outfile, 'w'), protocol=-1) return results