def characterize(args, adcs, seed): with misc.push_random_state(): np.random.seed(seed) seeds = [np.random.randint(0, 4294967296) for _ in range(3)] assert not args.relative_snr_ref, "TODO implement relative" assert not args.relative_snr_thres, "TODO implement relative" min_snr_ref_v = args.min_snr_ref_v min_snr_thres_v = args.min_snr_thres_v fsr = adcs[0].stages[0].meta.fsr n_bits = int(np.sum([np.floor(stage.meta.n_bits) for stage in adcs[0].stages])) n_bits += int(gen.infer_thres_bits(adcs[0].tail)[0]) lsb = gen.compute_lsb(n_bits, *fsr) if min_snr_ref_v is None: min_snr_ref_v = lsb/2 if min_snr_thres_v is None: min_snr_thres_v = lsb/2 snr_ref_inv = np.linspace(0, min_snr_ref_v, args.samples_snr_ref) snr_thres_inv = np.linspace(0, min_snr_thres_v, args.samples_snr_thres) snr_ref = np.power((fsr[1] - fsr[0]), 2)/np.power(snr_ref_inv, 2) snr_thres = np.power((fsr[1] - fsr[0]), 2)/np.power(snr_thres_inv, 2) snr_ref[0] = 0 snr_thres[0] = 0 real_thres_s = data.at_least_ndarray(args.real_thres_s) shape = (np.size(real_thres_s), np.size(adcs),) adcs_sweep = np.tile(np.array(adcs, dtype=object), shape[:-1] + (1,)) # Sweep thres with misc.push_random_state(): np.random.seed(seed) for idx in cartesian(*tuple(range(s) for s in shape)): r_thres_s = real_thres_s[idx[0]] adcs_sweep[idx] = copy.deepcopy(adcs_sweep[idx]) for stage in adcs_sweep[idx].stages: r_thres_s_local = r_thres_s * stage.meta.lsb stage._thres = np.random.normal(stage.thres, r_thres_s_local) adcs_sweep[idx]._tail = np.random.normal(adcs_sweep[idx].tail, r_thres_s) uncertain = characterize_uncertanty(args, adcs_sweep, seed, real_thres_s) ref = characterize_noise(args, adcs_sweep, seed, snr_ref, real_thres_s, ref=True) thres = characterize_noise(args, adcs_sweep, seed, snr_thres, real_thres_s, ref=False) return uncertain, ref, thres
def calibrate_single(args, adc, lsb_scale, seed, snr_ref, snr_thres): assert len(args.samples) == 1 assert args.n_test == 0 delta_sigma = adc.as_delta_sigma() adc_ideal = adc.as_ideal() ideal_delta_sigma = adc.as_delta_sigma() calibrated_stages = [] trully_calibrated = True with misc.push_random_state(): np.random.seed(seed) seeds = [ np.random.randint(0, 4294967296) for _ in range(len(delta_sigma)) ] for_iter = zip(seeds, delta_sigma, ideal_delta_sigma, adc_ideal.stages) for seed, ds_stage, ideal, pipe_ideal in for_iter: meta = ds_stage.meta ins = np.zeros(( 0, meta.n_diff, )) cargs = copy.deepcopy(args) cargs.seed = seed conf = make_config(meta, cargs, False) tb = sims.StageTestbench.Scalar(ds_stage, ins, conf) simulator = sims.Simulator(seed, snr_ref, snr_thres) if DRY_RUN: calibrated = copy.deepcopy(ds_stage) else: codes, _ = tb.simulate(simulator) system = cal.CalibrationSystem(ideal, conf, codes, mask=cal.json_dicts_to_mask( ideal, [{ "parameter": "thres" }]), sum_conf=True, use_bands=True) calibrated, _ = system.run_calibration( samples_step=args.samples[0] + 1, lsb_scale=lsb_scale) x = system.map_in(calibrated, ins) trully_calibrated = (trully_calibrated and system.system( x, scalar=True, lsb_scale=lsb_scale) == 0) calibrated._thres = pipe_ideal.thres calibrated_stages.append(calibrated) calibrated = gen.PipeParameters(calibrated_stages, adc_ideal.tail) return calibrated, trully_calibrated
def gen_adc(bits, seed, args, n_adcs=1): assert len(bits) > 1 adcs = [] with misc.push_random_state(): np.random.seed(seed) seeds = [np.random.randint(0, 4294967296) for _ in range(n_adcs)] for seed in seeds: with misc.push_random_state(): np.random.seed(seed) sub_seeds = [ np.random.randint(0, 4294967296) for _ in range(len(bits)) ] stages = [] for ii, cbits, sseed in zip(count(), bits[:-1], sub_seeds): _, half_bit = gen.parse_bits(cbits) n_refs = 3 if half_bit else 2 eff = bits_to_required_eff(cbits) eff = max(eff, 0.95) meta = gen.StageMeta(cbits, n_refs, eff=eff, seed=sseed) s_ref = args.s_ref if s_ref is None: s_ref = 0.5 * meta.lsb / 3 stage = meta.generate_gaussian(S_TAU, s_cap=args.s_cap, s_refs=s_ref, s_thres=0, s_cm=0) stages.append(stage) cbits, half_bit = gen.parse_bits(bits[-1]) n_refs = 3 if half_bit else 2 tail_meta = gen.StageMeta(cbits, n_refs, half_bit=half_bit, seed=sub_seeds[-1]) tail = meta.generate_ideal().thres adc = gen.PipeParameters(stages, tail) adcs.append(adc) return adcs
def __init__(self, seed, ref_snr=0, thres_snr=0, in_snr=0, u_history=True, data_location=None): super().__init__(data_location) self._seed = seed self._u_history = u_history self._ref_snr = ref_snr self._thres_snr = thres_snr self._in_snr = in_snr with push_random_state() as state_store: np.random.seed(self._seed) self._random_state = state_store
def calib(meta, args, interlace, use_full_range=None): n_caps = meta.n_caps n_refs = meta.n_refs n_diff = meta.n_diff n_cs = (n_caps - 1) // 2 n_cf = n_caps - n_cs use_full_range = misc.default(use_full_range, n_cs < 2) ds_samples = args.samples if args.n_test > 0: raise ValueError("Minimal does not support test inputs.") comb_cs = misc.iterate_combinations(n_caps, n_cs) if args.full: comb_cs = [tuple(misc.iterate_permutations(cs)) for cs in comb_cs] comb_cs = [elem for tlp in comb_cs for elem in tlp] slice_ = slice(None) if use_full_range else slice(1, -1) comb_refs = gen.ds_map(n_cs, n_refs, n_cs * (n_refs - 1) + 1) comb_refs = np.transpose(comb_refs[:, slice_], ( 1, 0, 2, )) comb_refs = comb_refs.tolist() comb_refs = [( comb_refs[ii], comb_refs[jj], ) for ii in range(len(comb_refs)) for jj in range(ii + 1, len(comb_refs))] comb_cs = list(comb_cs) comb_refs = list(comb_refs) even_configs = [] even_ins = [] ics = [] with misc.push_random_state(): seed = None if args.seed is None else int(args.seed) np.random.seed(seed) for cs_ii, refs_ii in cartesian(comb_cs, comb_refs): even_configs.append(gen.Configuration(meta, cs_ii)) top_ii, bot_ii = refs_ii if args.inputs == "": sub_seed = np.random.randint(0, 4294967296) even_ins.append( gen.InternalRandom(meta, np.size(cs_ii), sub_seed)) else: top = gen.InternalDC(meta, top_ii) bot = gen.InternalDC(meta, bot_ii) even_ins.append(gen.ACCombinator(meta, top, bot, args.period)) inv = [[n_refs - 1 - iii for iii in ii] for ii in top_ii] inv = inv + [[n_refs // 2, n_refs - n_refs // 2][:n_diff] ] * (n_cf - n_cs) ics.append(gen.InitialCondition(meta, inv)) if interlace: n_cs_h = n_cs // 2 assert n_cs_h > 0, "Not enough capacitors to decrease bits." odd_configs = [] odd_ins = [] for conf, in_ in zip(even_configs, even_ins): left = (n_cs - n_cs_h) // 2 cs_range = range(left, left + n_cs_h) mask = np.zeros((n_cs, ), dtype=bool) mask[cs_range] = 1 odd_configs.append( gen.Configuration(conf.meta, conf.cs[cs_range, :])) odd_ins.append(gen.InputMask(in_.meta, in_, mask)) else: odd_ins = even_ins odd_configs = even_configs conf_sets = [] parity = 0 for samples in ds_samples: if parity == 0: configs = even_configs inputs = even_ins else: configs = odd_configs inputs = odd_ins conf_sets.append(gen.ConfigurationSet(samples, inputs, configs)) parity = (parity + 1) % 2 if args.ic == "clear": ics = [gen.InitialCondition.Discharged(meta, n_cf)] * len(odd_ins) elif args.ic == "precharge": pass else: raise ValueError("ic type {} not supported".format(args.ic)) return gen.ConfigurationSequence(ics, conf_sets * args.loop)