def init_seq_idx(conf_seq, data): idx_dct, ext_dct, n_dct = get(data, "indexing", "extended", "n") base_shape, diff_ii = get(idx_dct, "base_shape", "diff_ii") caps, refs, cm = get(ext_dct, "caps", "refs", "cm") n_conf, n_diff = get(n_dct, "n_conf", "n_diff") ic = conf_seq.initial_conditions # Set initial condition # shape (n_conf, n_cf, n_diff,) ic_ii = np.array(list(iic.ref_ii for iic in ic), dtype=int) # shape (n_conf, n_cf, n_diff,) cf_ii = conf_seq.configuration_sets[0].cf m_refs_idx = ( Ellipsis, cf_ii, ic_ii, diff_ii, ) m_caps_idx = ( Ellipsis, cf_ii, diff_ii, ) init_idxs = {"m_refs_idx": m_refs_idx, "m_caps_idx": m_caps_idx} return init_idxs
def u_thres_to_code(u, thres, data): s_thres = get(get(data, "std")[0], "s_thres")[0] if s_thres > 0: thres = np.random.normal(thres, s_thres, np.shape(thres)) # reinterpret shape (..., n_diff) to (..., n_thres, n_diff) code = np.diff(u, axis=-1) if np.size(u, -1) == 2 else u code = np.sum(code >= thres[..., np.newaxis, :], axis=-1) return code
def recreate_cache(eff, caps, refs, thres, ins, common_mode, c_seq, codes, scalar=None): fun = sims.Simulator scalar = default(scalar, len(np.shape(eff)) == 0) if scalar: codes = codes[:, np.newaxis, ...] data = fun.simulate_setup(eff, caps, refs, thres, ins, common_mode, c_seq, scalar=scalar) ext_dct = get(data, "extended")[0] eff, thres, cm = get(ext_dct, "eff", "thres", "cm") # shape (..., n_conf, n_diff,) seq_idx = fun.init_seq_idx(c_seq, data) set_data = None du_idx = None trans_idx = None sets_cache = [] cache = {"data": data, "seq_idx": seq_idx, "sets": sets_cache} for ii_set, c_set in enumerate(c_seq.configuration_sets): set_data = fun.init_set(c_set, data, set_data, du_idx) if set_data["previous"] is not None: ds_offset = data["ds_offset"] code = codes[ds_offset:ds_offset + 1, ...] trans_idx = fun.transition_step_idx(c_set, set_data, data, code) ds_offset = data["ds_offset"] code = codes[ds_offset:ds_offset + c_set.ds_samples, ...] du_idx = fun.du_indexes(code, c_set, set_data, data) sets_cache.append(( set_data, trans_idx, du_idx, )) return cache
def du_indexes(code, conf_set, set_data, data): n_samples = np.size(code, 0) idx_dct, ext_dct, n_dct = get(data, "indexing", "extended", "n") idx_set, n_set, ds_map = get(set_data, "indexing", "n", "ds_map") refs, ins = get(ext_dct, "refs", "ins") diff_ii, diff_ii_ext, cap_axis, base_ii, base_len = get( idx_dct, "diff_ii", "diff_ii_ext", "cap_axis", "base_ii", "base_len") cs_ii, cs_ii_base, = get(idx_set, "cs_ii", "cs_ii_base") ds_offset = data["ds_offset"] in_ref_ii, in_ins_ii = conf_set.generate_in(n_samples, ds_offset) data["ds_offset"] = ds_offset + n_samples # Using take instead of indexing because for some reason it's faster ref_ii = ds_map.take(code, axis=1) ds_map_transpose = tuple(range(1, base_len + 1 + 2)) + ( 0, -1, ) # shape = (n_samples, base_shape, n_conf, n_cs, n_diff) ref_ii = np.transpose(ref_ii, ds_map_transpose) set_data["indexing"]["ref_ii"] = ref_ii[-1, ...] ext_idx = ( np.newaxis, Ellipsis, ) ext_base_ii = tuple(bb[ext_idx] for bb in base_ii) m_ref = ext_base_ii + ( cs_ii_base[ext_idx], ref_ii, diff_ii_ext[ext_idx], ) m_in_ref = (Ellipsis, ) + ( cs_ii[ext_idx], in_ref_ii, diff_ii[ext_idx], ) m_in_ins = (Ellipsis, ) + ( in_ins_ii, diff_ii[ext_idx], ) ravel = np.ravel_multi_index r_ref = ravel(m_ref, refs.shape, mode='wrap') return {"r_ref": r_ref, "m_in_ref": m_in_ref, "m_in_ins": m_in_ins}
def du_compute(indexes, conf_set, set_data, data): idx_dct, ext_dct, n_dct = get(data, "indexing", "extended", "n") caps, refs, ins, cm = get(ext_dct, "caps", "refs", "ins", "cm") cap_axis, base_len, base_ii, diff_ii = get(idx_dct, "cap_axis", "base_len", "base_ii", "diff_ii") n_diff = get(n_dct, "n_diff")[0] r_ref, m_in_ref, m_in_ins = get(indexes, "r_ref", "m_in_ref", "m_in_ins") cs_ii, cf_ii = get(get(set_data, "indexing")[0], "cs_ii", "cf_ii") std = get(data, "std")[0] s_ref, s_in = get(std, "s_ref", "s_in") cs = caps[np.newaxis, ..., cs_ii, diff_ii] # used in du cf = caps[np.newaxis, ..., cf_ii, diff_ii] CF = np.sum(cf, axis=-2, keepdims=True) # used in du # Using take instead of indexing because for some reason it's faster ref = refs.ravel().take(r_ref) if s_ref > 0: ref = np.random.normal(ref, s_ref, size=np.shape(ref)) in_ref = refs[m_in_ref] if s_ref > 0: in_ref = np.random.normal(in_ref, s_ref, size=np.shape(in_ref)) in_ins = ins[m_in_ins] if s_in > 0: in_ins = np.random.normal(in_ins, s_in, size=np.shape(in_ins)) in_ref = np.transpose(in_ref, (-4, ) + tuple(range(base_len)) + ( -3, -2, -1, )) in_ins = np.transpose(in_ins, (-4, ) + tuple(range(base_len)) + ( -3, -2, -1, )) # sum done un numpy to prevent issue 79 of numexpr du = np.sum(ne.evaluate("(cs/CF)*(in_ref + in_ins - ref)"), cap_axis + 1) if n_diff == 2: du += cm[np.newaxis, ...] - np.mean(du, axis=-1, keepdims=True) return du
def init_seq(indexes, data): idx_dct, ext_dct, n_dct, std = get(data, "indexing", "extended", "n", "std") base_shape = get(idx_dct, "base_shape")[0] caps, refs, cm = get(ext_dct, "caps", "refs", "cm") n_conf, n_diff = get(n_dct, "n_conf", "n_diff") s_ref = get(std, "s_ref")[0] m_refs_idx, m_caps_idx = get(indexes, "m_refs_idx", "m_caps_idx") u = np.zeros(base_shape + ( n_conf, n_diff, )) # shape (base_shape, n_conf, n_cf, n_diff,) ic_refs = refs[m_refs_idx] if s_ref > 0: ic_refs = np.random.normal(ic_refs, s_ref, size=np.shape(ic_refs)) ic_cf = caps[m_caps_idx] ic_g = ic_cf / np.sum(ic_cf, axis=-2, keepdims=True) u += np.sum(ic_g * ic_refs, axis=-2) if n_diff == 2: u += cm - np.mean(u, axis=-1, keepdims=True) u = u[np.newaxis, ...] return u
def init_set(conf_set, data, prev_set_data, prev_du_idx): idx_dct, ext_dct, n_dct = get(data, "indexing", "extended", "n") n_refs, n_codes = get(n_dct, "n_refs", "n_codes") base_len, diff_ii = get(idx_dct, "base_len", "diff_ii") n_cs = conf_set.n_cs cs_ii = conf_set.cs cf_ii = conf_set.cf cs_ii_base = cs_ii[(np.newaxis, ) * base_len + (Ellipsis, )] meta = conf_set.meta ds_map = gen.ds_map(n_cs, n_refs, n_codes, meta.differential) if prev_du_idx is None: prev_dct = None else: prev_dct = { "r_ref": prev_du_idx["r_ref"], "cs_ii": prev_set_data["indexing"]["cs_ii"], "cf_ii": prev_set_data["indexing"]["cf_ii"] } return { "indexing": { "cs_ii": cs_ii, "cf_ii": cf_ii, "cs_ii_base": cs_ii_base, }, "n": { "n_cs": n_cs }, "previous": prev_dct, "ds_map": ds_map }
def recreate(eff, caps, refs, thres, ins, common_mode, c_seq, cache, scalar=None, limit_samples=None): fun = sims.Simulator scalar = default(scalar, len(np.shape(eff)) == 0) limit_samples = default(limit_samples, c_seq.samples) + 1 samples = 0 cache = dict(cache) cache["data"] = fun.simulate_setup(eff, caps, refs, thres, ins, common_mode, c_seq, scalar=scalar) data = cache["data"] seq_idx = cache["seq_idx"] u_history = [] # shape (..., n_conf, n_diff,) u = fun.init_seq(seq_idx, data) u_history.append(u) samples += 1 # Simulate each configuration set dct_idx, dct_ext, dct_n = get(data, "indexing", "extended", "n") n_conf, n_diff = get(dct_n, "n_conf", "n_diff") eff, cm = get(dct_ext, "eff", "cm") base_shape, base_len = get(dct_idx, "base_shape", "base_len") c_sets = c_seq.configuration_sets def multi_idx_filter(idx_tuple, sub_idx): return tuple(ii[sub_idx] if hasattr(ii, "__getitem__") else ii for ii in idx_tuple) for ii_set, c_set, data_trans_du in zip(range(len(c_sets)), c_sets, cache["sets"]): set_data, trans_idx, du_idx = data_trans_du if set_data["previous"] is not None: u = fun.transition_step(trans_idx, u[-1:, ...], set_data, data) u_history.append(u) samples += 1 r_du = (1 - eff) * cm n_u = np.empty((c_set.ds_samples, ) + base_shape + ( n_conf, n_diff, )) local_du_idx = dict(du_idx) # used in u du = fun.du_compute(du_idx, c_set, set_data, data) for idx in cartesian(*tuple(range(ss) for ss in base_shape)): local_idx = tuple(slice(ii, ii + 1) for ii in idx) + (Ellipsis, ) ext_local_idx = (slice(None), ) + local_idx local_du_idx["r_ref"] = local_du_idx["r_ref"][ext_local_idx] local_du_idx["m_in_ref"] = multi_idx_filter( local_du_idx["m_in_ref"], ext_local_idx) local_du_idx["m_in_ins"] = multi_idx_filter( local_du_idx["m_in_ins"], ext_local_idx) zi = u[(slice(-1, None), ) + local_idx] * eff[local_idx] local_u = lfilter([1], [1, -eff[local_idx].item()], du[ext_local_idx] + r_du[(np.newaxis, ) + local_idx], zi=zi, axis=0)[0] n_u[ext_local_idx] = local_u u = n_u u_history.append(u) samples += np.size(u, 0) if samples >= limit_samples: break u_history = np.concatenate(u_history, axis=0) u_history = u_history[:limit_samples, ...] if scalar: assert np.size(u_history, 1) == 1 u_history = u_history[:, 0, ...] return u_history
def transition_step(indexes, u, set_data, data): idx_dct, ext_dct, n_dct = get(data, "indexing", "extended", "n") cap_axis = get(idx_dct, "cap_axis")[0] n_diff = get(n_dct, "n_diff")[0] eff, caps, refs, thres, ins, cm = get(ext_dct, "eff", "caps", "refs", "thres", "ins", "cm") m_cf_cs_idx, m_cs_cf_idx, m_cs_cs_idx, m_cf_cf_idx = get( indexes, "m_cf_cs_idx", "m_cs_cf_idx", "m_cs_cs_idx", "m_cf_cf_idx") r_this_r_ref, r_prev_r_ref = get(indexes, "r_this_r_ref", "r_prev_r_ref") m_this_in_ref, m_this_in_ins = get(indexes, "m_this_in_ref", "m_this_in_ins") std = get(data, "std")[0] s_ref, s_in = get(std, "s_ref", "s_in") cf_cs = caps[m_cf_cs_idx] cs_cf = caps[m_cs_cf_idx] cs_cs = caps[m_cs_cs_idx] # used in du cf_cf = caps[m_cf_cf_idx] prev_ref = refs.ravel().take(r_prev_r_ref) if s_ref > 0: prev_ref = np.random.normal(prev_ref, s_ref, size=np.shape(prev_ref)) this_ref = refs.ravel().take(r_this_r_ref) if s_ref > 0: this_ref = np.random.normal(this_ref, s_ref, size=np.shape(this_ref)) this_in_ref = refs[m_this_in_ref] # used in du if s_ref > 0: this_in_ref = np.random.normal(this_in_ref, s_ref, size=np.shape(this_in_ref)) this_in_ins = ins[m_this_in_ins] # used in du if s_in > 0: this_in_ins = np.random.normal(this_in_ins, s_in, size=np.shape(this_in_ins)) # used in du u_gain = (np.sum(cf_cf, axis=-2) + np.sum(cf_cs, axis=-2)) / ( np.sum(cf_cf, axis=-2) + np.sum(cs_cf, axis=-2)) # Sum on next_cs shaped du_stmt = ("sum(cs_cs*(this_in_ref + this_in_ins)" " - (cf_cs + cs_cs)*(this_ref), axis={})").format(cap_axis) du = ne.evaluate(du_stmt) # Sum on prev_cs shaped du_stmt = ("sum(cs_cf*prev_ref, axis={})").format(cap_axis) du += ne.evaluate(du_stmt) assert np.size(u, 0) == 1, "Only one sample." CF = np.sum(cf_cf, axis=cap_axis) + np.sum(cs_cf, axis=cap_axis) # Apply gain and charge loss u = ne.evaluate("u*u_gain*eff + (1-eff)*cm + du/CF") # common mode feedback if n_diff == 2: u += cm - np.mean(u, axis=-1, keepdims=True)[np.newaxis, ...] return u
def transition_step_idx(conf_set, set_data, data, code): assert np.size(code, 0) == 1 idx_dct, ext_dct, n_dct = get(data, "indexing", "extended", "n") idx_set, pre_set, n_set, ds_map = get(set_data, "indexing", "previous", "n", "ds_map") cs_ii, cf_ii, cs_ii_base = get(idx_set, "cs_ii", "cf_ii", "cs_ii_base") prev_cs_ii, prev_cf_ii = get(pre_set, "cs_ii", "cf_ii") prev_r_ref = get(pre_set, "r_ref")[0][-1, ...] diff_ii, diff_ii_ext, cap_axis = get(idx_dct, "diff_ii", "diff_ii_ext", "cap_axis") base_shape, base_len, base_ii = get(idx_dct, "base_shape", "base_len", "base_ii") eff, caps, refs, thres, ins, cm = get(ext_dct, "eff", "caps", "refs", "thres", "ins", "cm") n_conf, n_diff = get(n_dct, "n_conf", "n_diff") # Compute transition cs_cf_ii, cf_cs_ii, cs_cs_ii, cf_cf_ii = transition_cx_change( prev_cs_ii, prev_cf_ii, cs_ii, cf_ii) # shape(n_cs, ..., n_conf, n_diff) this_ref_ii = ds_map[:, code, :] # shape(..., n_conf, n_cs, n_diff) ds_map_transpose = tuple(range(1, base_len + 1 + 2)) + ( 0, -1, ) # shape = (n_samples, base_shape, n_conf, n_cs, n_diff) (before idx) this_ref_ii = np.transpose(this_ref_ii, ds_map_transpose)[0, ...] ds_offset = data["ds_offset"] in_ref_ii, in_ins_ii = conf_set.generate_in(1, ds_offset) data["ds_offset"] = ds_offset + 1 m_this_ref_idx = base_ii + ( cs_ii_base, this_ref_ii, diff_ii_ext, ) r_this_ref_idx = np.ravel_multi_index(m_this_ref_idx, refs.shape) transition_idx = { "m_cf_cs_idx": ( Ellipsis, cf_cs_ii, diff_ii, ), "m_cs_cf_idx": ( Ellipsis, cs_cf_ii, diff_ii, ), "m_cs_cs_idx": ( Ellipsis, cs_cs_ii, diff_ii, ), "m_cf_cf_idx": ( Ellipsis, cf_cf_ii, diff_ii, ), "r_this_r_ref": r_this_ref_idx, "r_prev_r_ref": prev_r_ref, "m_this_in_ref": ( Ellipsis, cs_ii, in_ref_ii[0, ...], diff_ii, ), "m_this_in_ins": ( Ellipsis, in_ins_ii[0, ...], diff_ii, ) } return transition_idx
def simulate(self, eff, caps, refs, thres, ins, common_mode, c_seq, scalar=None, raise_=False): scalar = default(scalar, len(np.shape(eff)) == 0) meta = c_seq.meta data = self.simulate_setup(eff, caps, refs, thres, ins, common_mode, c_seq, scalar=scalar) self._standard_deviations(meta, data) idx_dct, ext_dct, n_dct = get(data, "indexing", "extended", "n") eff, thres, cm = get(ext_dct, "eff", "thres", "cm") n_diff = get(n_dct, "n_diff")[0] u_history = [] with self._random_state as _: # shape (..., n_conf, n_diff,) seq_idx = self.init_seq_idx(c_seq, data) u = self.init_seq(seq_idx, data) if self.u_history: u_history.append(u) # Simulate each configuration set codes = [] du_idx = None set_data = None n_sets = len(c_seq.configuration_sets) for ii_set, c_set in enumerate(c_seq.configuration_sets): set_data = self.init_set(c_set, data, set_data, du_idx) if set_data["previous"] is not None: code = self.u_thres_to_code(u[-1, ...], thres, data) code = code[np.newaxis, ...] trans_idx = self.transition_step_idx( c_set, set_data, data, code) u = self.transition_step(trans_idx, u[-1:, ...], set_data, data) codes.append(code) if self.u_history: u_history.append(u) for sample in range(c_set.ds_samples): # print(" {}/{} Sample {}/{} ({:0.2f}%)".format( # ii_set+1, n_sets, sample+1, c_set.ds_samples, 100*sample/c_set.ds_samples), # end='\r') code = self.u_thres_to_code(u[-1, ...], thres, data) code = code[np.newaxis, ...] codes.append(code) du_idx = self.du_indexes(code, c_set, set_data, data) # used in u du = self.du_compute(du_idx, c_set, set_data, data) u = ne.evaluate("u*eff + (1-eff)*cm + du") # common mode feedback if n_diff == 2: u += cm[np.newaxis, ...] - np.mean( u, axis=-1, keepdims=True) if self.u_history: u_history.append(u) codes = np.concatenate(codes, axis=0) if self.u_history: u_history = np.concatenate(u_history, axis=0) if scalar: assert np.size(codes, 1) == 1 assert np.size(u_history, 1) == 1 codes = codes[:, 0, ...] u_history = u_history[:, 0, ...] if ((u_history < meta.fsr[0] - meta.lsb).any() or (u_history > meta.fsr[1] + meta.lsb).any()): message = "Residual out of range." if raise_: raise ValueError(message) else: warnings.warn(message) return codes, u_history