def infer_inadequate(self, interaction_matrix): """ Who needs a description when you can have a picture? COSY H-------H HSQC | | HSQC C< INAD >C implies => INAD bond between CC """ for i in range(len(interaction_matrix)): cosy_i = [ x for x, y in enumerate(interaction_matrix[i]) if y == InteractionValues.COSY ] hsqc_i = [ x for x, y in enumerate(interaction_matrix[i]) if y == InteractionValues.HSQC ] b_cs = [x for x in cartesian(*[cosy_i, hsqc_i])] for b_c in b_cs: b = b_c[0] c = b_c[1] d_s = [ x for x, y in enumerate(interaction_matrix[b]) if y == InteractionValues.HSQC ] b_ds = [x for x in cartesian([c], d_s)] for b_d in b_ds: if b_d[0] != b_d[1]: interaction_matrix[b_d[0]][ b_d[1]] = InteractionValues.INAD return interaction_matrix
def __init__(self, polarities, neutral, product_table): self.P = polarities self.N = neutral self.table = product_table self.is_monotone = True self.is_final = True self.is_declarative = True self.is_dynamic = bool(set(self.P) > set(self.N) and self.N) g = nx.DiGraph() for a in self.table: for prod in self.table[a].values(): if a != prod: g.add_edge(a, prod) try: order = nx.topological_sort(g) if set(self.N) != set(order[-len(self.N):]): self.is_final = False except nx.NetworkXUnfeasible: self.is_monotone = False self.is_final = False for a, b, c in cartesian(self.table, repeat=3): try: x = self.table[self.table[a][b]][c] y = self.table[self.table[b][c]][a] if x != y: self.is_declarative = False break except KeyError: continue
def plot_stats(nucs, structure, param, violation_padding): import matplotlib.pyplot as plt stat_names = {"scale": {}, "violations": {'padding': violation_padding}} stats = defaultdict(lambda: np.empty((len(nucs), 3), dtype='float')) fig, axs = plt.subplots(len(stat_names), 1) for (i, nuc), (stat, kwargs) in cartesian(enumerate(nucs), stat_names.items()): with HDFFile(nuc, "r") as f: param_value = f['structures'][structure]['calculation'].attrs[ param] if param_value == "particle_sizes": param_value["particle_sizes"] = param_value["particle_sizes"][ -1] stat_values = globals()[stat](f, structure, **kwargs) stats[stat][i] = [ param_value, np.mean(stat_values), np.std(stat_values) ] for ax, (stat_name, data) in zip(axs, stats.items()): ax.set_ylabel(stat_name) ax.set_xlabel(param) data = np.sort(data, axis=0) ax.errorbar(data.T[0], data.T[1], yerr=data.T[2]) fig.tight_layout() plt.show()
def characterize_noise(args, adcs_sweep, seed, snr_noise, est_thres_s, ref, match_noise=False): shape = np.shape(adcs_sweep) shape = (shape[0], np.size(snr_noise), shape[1],) real = np.stack((adcs_sweep,)*shape[1], axis=1) calibrated = np.copy(real) per_snr = np.zeros(shape) cal_snr = np.zeros(shape) nav_snr = np.zeros(shape) confidence = args.confidence sigmas = np.sqrt(2)*erfinv(confidence) # Sweep thres for idx in cartesian(*tuple(range(s) for s in shape[:-1])): e_thres_s = est_thres_s[idx[0]] noise = snr_noise[idx[1]] idx = idx + (slice(None,),) lsb_scale = (1 + 2*(e_thres_s*sigmas)) if match_noise else 1 mod_adcs = real[idx].tolist() aargs = (noise, 0,) if ref else (0, noise,) calibrated_, per_snr_, cal_snr_, nav_snr_ = \ characterize_point(args, mod_adcs, seed, lsb_scale, *aargs) calibrated[idx] = np.array(calibrated_, dtype=object) per_snr[idx] = np.array(per_snr_) cal_snr[idx] = np.array(cal_snr_) nav_snr[idx] = np.array(nav_snr_) return real, calibrated, per_snr, cal_snr, nav_snr
def iter_stages_idx(self): if len(self.shape) == 0: yield tuple() else: for idx in cartesian(*tuple(range(ss) for ss in self.shape)): yield idx
def __init__(self, neighbor_order, *args, **kwargs): super(NthNeighborNodeHistogramCalculator, self).__init__(*args, **kwargs) self.labels_to_consider = kwargs.get('labels_to_consider', self._gnx.graph["node_labels"]) self._num_classes = len(self.labels_to_consider) self._neighbor_order = neighbor_order self._relation_types = [ "".join(x) for x in cartesian(*(["io"] * self._neighbor_order)) ] self._print_name += "_%d" % (neighbor_order, ) counter = {i: 0 for i in range(self._num_classes)} if self._gnx.is_directed(): self._features = { node: {rtype: counter.copy() for rtype in self._relation_types} for node in self._gnx } self._reg_features = { node: {rtype: [] for rtype in self._relation_types} for node in self._gnx } else: self._features = {node: counter.copy() for node in self._gnx}
def characterize_uncertanty(args, adcs_sweep, seed, est_thres_s): shape = np.shape(adcs_sweep) shape = ( shape[0], np.size(est_thres_s), shape[1], ) real = np.stack((adcs_sweep, ) * shape[1], axis=1) calibrated = np.copy(real) per_snr = np.zeros(shape) cal_snr = np.zeros(shape) nav_snr = np.zeros(shape) confidence = args.confidence sigmas = np.sqrt(2) * erfinv(confidence) # Sweep thres for idx in cartesian(*tuple(range(s) for s in shape[:-1])): e_thres_s = est_thres_s[idx[1]] idx = idx + (slice(None, ), ) lsb_scale = 1 + 2 * (e_thres_s * sigmas) mod_adcs = real[idx].tolist() calibrated_, per_snr_, cal_snr_, nav_snr_ = \ characterize_point(args, mod_adcs, seed, lsb_scale, 0, 0) calibrated[idx] = np.array(calibrated_, dtype=object) per_snr[idx] = np.array(per_snr_) cal_snr[idx] = np.array(cal_snr_) nav_snr[idx] = np.array(nav_snr_) return real, calibrated, per_snr, cal_snr, nav_snr
def sweep_parameters(self, sweep_dicts): def sweep(dct): sw_type = dct.get("type", "linear") if sw_type == "linear": values = np.linspace(dct["start"], dct["end"], dct["samples"]) elif sw_type == "log": values = np.logspace(dct["start"], dct["end"], dct["samples"]) else: raise ValueError( "sweep type {} not recognized.".format(sw_type)) def gen_dict(value): copy_keys = ( "parameter", "index", ) result = {key: dct[key] for key in copy_keys} result["value"] = value return result return [gen_dict(value) for value in values] values_axes = tuple(sweep(dct) for dct in sweep_dicts) shape = tuple(len(axis) for axis in values_axes) ins = np.zeros(shape + self.shape, dtype=int) stages = np.zeros(shape + self.shape, dtype=object) for idx in cartesian(*tuple(range(ss) for ss in shape)): val = tuple(vals[ii] for ii, vals in zip(idx, values_axes)) this_stages = copy.deepcopy(self.stages) in_ = np.array(self.ins) new_val = tuple() for vall in val: if vall["parameter"] == "test": in_[( Ellipsis, vall["index"], slice(None), )] = vall["value"] else: new_val = (vall, ) + new_val for sub_idx in this_stages.iter_idx(): this_stages[idx] = this_stages[idx].create_modified(new_val) ins[idx + (Ellipsis, )] = in_ stages[idx + (Ellipsis, )] = this_stages return values_axes, StageTestbench(stages.tolist(), ins, self.configuration_sequence, shape=shape + self.shape)
def iter_idx(self): if self.dims == 0: yield tuple() else: for idx in cartesian(*tuple( range(ss) for ss in np.shape(self.data))): yield idx
def ambiguous(self): from itertools import product as cartesian try: new_bases = self.ambiguous_bases.get(self[0], (self[0], )) except IndexError: yield Sequence('') else: yield from map( Sequence('').join, cartesian(new_bases, self[1:].ambiguous))
def location_set(q): # location_set is an iterative which enumerates the elements of the rectangle [0,q) (in Z^d) # the map is essentially just q-adic representation of the numbers [0,prod(q)) if type(q) == int: q = (q, ) #return [tuple(reversed(k)) for k in cartesian(*[range(qj) for qj in reversed(q)])]; return [ tuple(reversed(k)) for k in cartesian(*[range(qj) for qj in reversed(q)]) ]
def characterize(args, adcs, seed): with misc.push_random_state(): np.random.seed(seed) seeds = [np.random.randint(0, 4294967296) for _ in range(3)] assert not args.relative_snr_ref, "TODO implement relative" assert not args.relative_snr_thres, "TODO implement relative" min_snr_ref_v = args.min_snr_ref_v min_snr_thres_v = args.min_snr_thres_v fsr = adcs[0].stages[0].meta.fsr n_bits = int(np.sum([np.floor(stage.meta.n_bits) for stage in adcs[0].stages])) n_bits += int(gen.infer_thres_bits(adcs[0].tail)[0]) lsb = gen.compute_lsb(n_bits, *fsr) if min_snr_ref_v is None: min_snr_ref_v = lsb/2 if min_snr_thres_v is None: min_snr_thres_v = lsb/2 snr_ref_inv = np.linspace(0, min_snr_ref_v, args.samples_snr_ref) snr_thres_inv = np.linspace(0, min_snr_thres_v, args.samples_snr_thres) snr_ref = np.power((fsr[1] - fsr[0]), 2)/np.power(snr_ref_inv, 2) snr_thres = np.power((fsr[1] - fsr[0]), 2)/np.power(snr_thres_inv, 2) snr_ref[0] = 0 snr_thres[0] = 0 real_thres_s = data.at_least_ndarray(args.real_thres_s) shape = (np.size(real_thres_s), np.size(adcs),) adcs_sweep = np.tile(np.array(adcs, dtype=object), shape[:-1] + (1,)) # Sweep thres with misc.push_random_state(): np.random.seed(seed) for idx in cartesian(*tuple(range(s) for s in shape)): r_thres_s = real_thres_s[idx[0]] adcs_sweep[idx] = copy.deepcopy(adcs_sweep[idx]) for stage in adcs_sweep[idx].stages: r_thres_s_local = r_thres_s * stage.meta.lsb stage._thres = np.random.normal(stage.thres, r_thres_s_local) adcs_sweep[idx]._tail = np.random.normal(adcs_sweep[idx].tail, r_thres_s) uncertain = characterize_uncertanty(args, adcs_sweep, seed, real_thres_s) ref = characterize_noise(args, adcs_sweep, seed, snr_ref, real_thres_s, ref=True) thres = characterize_noise(args, adcs_sweep, seed, snr_thres, real_thres_s, ref=False) return uncertain, ref, thres
def array_location_set(k): # returns an iterative which enumerates the indices of a matrix from the bottom left corner, reading left->right, bottom->top, consistent with R^n enumeration # the 2nd to last coordinate needs to the x-axis, the last coordinate is the y-axis but counts backwards from num_rows if type(k) == int: return [(j, ) for j in range(k)] elif len(k) == 1: return [(j, ) for j in range(k[0])] else: return [ tuple(reversed((j[-1], ) + (-j[-2] - 1, ) + j[:-2])) for j in cartesian(*[range(kj) for kj in reversed(k)]) ]
def generate(a, b): grammar = a.grammar a, b = [x.node.values() for x in [a, b]] pairs = filter_non_viable(cartesian(a, b), grammar) intra = map(lambda s: cartesian(s, repeat=2), (a, b)) intra = filter_non_viable(chain(*intra), grammar) viable_pairs = set(map(frozenset, pairs + intra)) stacks = chain.from_iterable( combinations(pairs, n) for n in range(1, len(pairs)+1)) implic = tuple(chain(*(implications(p, grammar) for p in pairs))) for stack in map(list, stacks): stack_ = set(map(frozenset, stack)) if any(stack_ & {x, i} == {x} for x, i in implic): continue if any(not viable_pairs & {s} for s in indirect(stack)): continue yield stack
def __init__(self, *args, **kwargs): super(DirectedNthNeighborNodeHistogramCalculator, self).__init__(*args, **kwargs) counter = {i: 0 for i in range(self._num_classes)} self._print_name += "_%d" % (self._neighbor_order, ) self._relation_types = [ "".join(x) for x in cartesian(*(["io"] * self._neighbor_order)) ] self._features = { node: {rtype: counter.copy() for rtype in self._relation_types} for node in self._gnx }
def generate(a, b): grammar = a.grammar a, b = [x.node.values() for x in [a, b]] pairs = filter_non_viable(cartesian(a, b), grammar) intra = map(lambda s: cartesian(s, repeat=2), (a, b)) intra = filter_non_viable(chain(*intra), grammar) viable_pairs = set(map(frozenset, pairs + intra)) stacks = chain.from_iterable( combinations(pairs, n) for n in range(1, len(pairs) + 1)) implic = tuple(chain(*(implications(p, grammar) for p in pairs))) for stack in map(list, stacks): stack_ = set(map(frozenset, stack)) if any(stack_ & {x, i} == {x} for x, i in implic): continue if any(not viable_pairs & {s} for s in indirect(stack)): continue yield stack
def letterCombinations(self, digits: str) -> List[str]: if not digits: return [] phone = { '2': 'abc', '3': 'def', '4': 'ghi', '5': 'jkl', '6': 'mno', '7': 'pqrs', '8': 'tuv', '9': 'wxyz' } return [''.join(i) for i in cartesian(*[phone[d] for d in digits])]
def __init__(self, neighbor_order, *args, **kwargs): super(NthNeighborNodeEdgeHistogramCalculator, self).__init__(*args, **kwargs) # self._num_classes = len(self._gnx.graph["node_labels"]) self._neighbor_order = neighbor_order self._relation_types = [ "".join(x) for x in cartesian(*(["io"] * self._neighbor_order)) ] self._print_name += "_%d" % (neighbor_order, ) counter = {i: 0 for i in self._gnx.graph["edge_labels"]} self._features = { node: {rtype: counter.copy() for rtype in self._relation_types} for node in self._gnx }
def __call__(self, data): supported_types = set([OPINION_BROKEN.short, OPINION_ISSUE.short, OPINION_PRAISE.short]) for key, value in recombined(data): self.comments_in += 1 m_id, ts, type, product, version, platform, locale, manufacturer, device, url, message = value.split( "\t", 10 ) if not url or type not in supported_types: continue app = "<%s>" % product site = normalize_url(url) out_keys = cartesian((version,), (site,), (app, platform, None), (type,)) out_value = (m_id, message) self.comments_out += 1 for out_key in out_keys: yield (out_key, out_value)
def __call__(self, data): supported_types = set( [OPINION_BROKEN.short, OPINION_ISSUE.short, OPINION_PRAISE.short]) for key, value in recombined(data): self.comments_in += 1 m_id, ts, type, product, version, platform, locale, \ manufacturer, device, url, message = value.split('\t', 10) if not url or type not in supported_types: continue app = '<%s>' % product site = normalize_url(url) out_keys = cartesian((version, ), (site, ), (app, platform, None), (type, )) out_value = (m_id, message) self.comments_out += 1 for out_key in out_keys: yield (out_key, out_value)
def prepickle_data(dat, path_context, directory, name): real, calibrated, per_snr, cal_snr, nav_snr = dat real_str = np.empty_like(real) calibrated_str = np.empty_like(calibrated) for idx in cartesian(*tuple(range(s) for s in np.shape(real))): lname = "{}[{}]".format(name, ','.join(str(ii) for ii in idx)) real_location = data.DataLocation(path_context, directory, lname + ".real", ".json") real_str[idx] = data.save(real[idx], real_location) calibrated_location = data.DataLocation(path_context, directory, lname + ".calibrated", ".json") calibrated_str[idx] = data.save(calibrated[idx], calibrated_location) return real_str, calibrated_str, per_snr, cal_snr, nav_snr
def decompose(self): def sum_eq(a, b, c): return tuple(map(add, a, b)) == c length = ceil(log(len(self.P), 2)) configs = cartesian([0, 1], repeat=length) prob = constraint.Problem() prob.addVariables(self.P, list(configs)) prob.addConstraint(constraint.AllDifferentConstraint()) for p1 in self.table: for p2 in self.table[p1]: prob.addConstraint(sum_eq, (p1, p2, self.table[p1][p2])) return prob.getSolution()
def spacedRotations(D, N): from math import pi, sin, cos, sqrt from itertools import product as cartesian, repeat from .util import frange if D == 2: yield from zip(frange(-pi, pi, 2*pi/N)) elif D == 3: # Ken Shoemake # Graphics Gems III, pp 124-132 from .quaternion import Quaternion for X, *theta in cartesian(frange(0, 1, 1/N), *repeat(frange(0, 2*pi, 2*pi/N), 2)): R = (sqrt(1-X), sqrt(X)) yield Quaternion(sin(theta[0]) * R[0], cos(theta[0]) * R[0], sin(theta[1]) * R[1], cos(theta[1]) * R[1]).axis_angle else: raise NotImplementedError("Only defined for D in [2..3], not {}" .format(D))
def postpickle_data(dat, path_context): if dat is None: return None real_str, calibrated_str, per_snr, cal_snr, nav_snr = dat real = np.empty_like(real_str) calibrated = np.empty_like(calibrated_str) memo = {} for idx in cartesian(*tuple(range(s) for s in np.shape(real_str))): real_path = real_str[idx] real_dl = data.DataLocation.Parse(real_path, path_context, None_on_fail=False) real[idx] = data.load(gen.PipeParameters, real_dl, memo) calibrated_path = calibrated_str[idx] calibrated_dl = data.DataLocation.Parse(calibrated_path, path_context, None_on_fail=False) calibrated[idx] = data.load(gen.PipeParameters, calibrated_dl, memo) return real, calibrated, per_snr, cal_snr, nav_snr
def spacedRotations(D, N): from math import pi, sin, cos, sqrt from itertools import product as cartesian, repeat from .util import frange if D == 2: yield from zip(frange(-pi, pi, 2 * pi / N)) elif D == 3: # Ken Shoemake # Graphics Gems III, pp 124-132 from .quaternion import Quaternion for X, *theta in cartesian(frange(0, 1, 1 / N), *repeat(frange(0, 2 * pi, 2 * pi / N), 2)): R = (sqrt(1 - X), sqrt(X)) yield Quaternion( sin(theta[0]) * R[0], cos(theta[0]) * R[0], sin(theta[1]) * R[1], cos(theta[1]) * R[1]).axis_angle else: raise NotImplementedError( "Only defined for D in [2..3], not {}".format(D))
from itertools import islice, product as cartesian, starmap from coherent_point_drift.geometry import rigidXform, rotationMatrix, RMSD, randomRotations from coherent_point_drift.align import globalAlignment import matplotlib.pyplot as plt from collections import defaultdict import numpy as np fig, axs = plt.subplots(2, 1) for ndim, ax in zip((2, 3), axs): ax.set_title("{}D".format(ndim)) nstepss = range(1, 8) pointss = np.random.random((10, 12, ndim)) rotations = starmap(rotationMatrix, islice(randomRotations(ndim), 10)) rmsds = defaultdict(list) for nsteps, points, rotation in cartesian(nstepss, pointss, rotations): degraded = rigidXform(points, R=rotation) xform = globalAlignment(points, degraded, w=0.1, nsteps=nsteps) rmsds[nsteps].append(RMSD(points, rigidXform(degraded, *xform))) labels = sorted(rmsds.keys()) ax.violinplot([rmsds[i] for i in labels], labels) fig.tight_layout() plt.show()
def test_matrix_alignment(self): block = InterfaceBlock('Foo', layout=BlockLayout.std140) for r, c in cartesian(range(2, 5), repeat=2): mat = 'mat{}x{}'.format(r, c) i = InterfaceBlockMember(block, 'foo', mat) self.assertEqual(i.alignment, 16)
def __init__(self, stages, ins, configuration_sequence, shape=None, data_location=None): super().__init__(data_location) NestedStages = data.nested_lists_of(gen.StageParameters) NestedSequences = data.nested_lists_of(gen.ConfigurationSequence) if not isinstance(configuration_sequence, NestedSequences): conf_shape = np.shape(configuration_sequence) configuration_sequence = NestedSequences(configuration_sequence, len(conf_shape)) conf_shape = np.shape(configuration_sequence.data) if not isinstance(stages, NestedStages): shape = default(shape, np.shape(stages)) dims = len(shape) cur = stages root_arr = stages # Check a valid 0,0,0... for dd in range(dims): assert isinstance(cur, ( tuple, list, )) cur = cur[0] # Check elements for idx in cartesian(*tuple(range(ss) for ss in shape)): assert isinstance(misc.getitem(stages, idx), gen.StageParameters) # Check regularity def rec_check(lst, shape): valid = (not isinstance(lst, list) and len(shape) == 0) or len(lst) == shape[0] if len(shape) > 1: sub_shape = shape[1:] valid = valid and all( [rec_check(llst, sub_shape) for llst in lst]) return valid assert rec_check(stages, shape) else: stages_shape = np.shape(stages.data) shape = default(shape, stages_shape) assert shape == stages_shape dims = len(shape) root_arr = stages.data ref_element = misc.getitem(root_arr, (0, ) * dims) ins = data.at_least_ndarray(ins) if len(np.shape(ins)) == 1: ins = ins[..., np.newaxis] # Broadcast ins if len(np.shape(ins)) == 2: ins = ins[(np.newaxis, ) * dims + (Ellipsis, )] ins = np.tile(ins, shape + ( 1, 1, )) assert len(np.shape(ins)) == dims + 2 if np.size(ins, -1) != ref_element.meta.n_diff: cm = ref_element.meta.common_mode ins = np.concatenate(( cm - ins, cm + ins, ), axis=1) # All meta the same for idx in cartesian(*tuple(range(ss) for ss in shape)): c_element = misc.getitem(root_arr, idx) assert c_element.meta == ref_element.meta self._stages = NestedStages.EnsureIsInstance(stages) self._shape = shape self._ins = ins self._configuration_sequence = configuration_sequence
output_file_name_base_str = "output/" + input_file_name[ input_file_name.rfind("/") + 1:input_file_name.index(".wav")] + "/" try: mkdir(path=output_file_name_base_str) except (FileExistsError): pass window_sizes = [int(sys.argv[2]) ] #[16] #[4, 8, 16] #[int(sys.argv[2])] #[4, 8, 16] max_depth_sizes = [int(sys.argv[3]) ] #[16] #[8, 16] #[int(sys.argv[3])] #[16, 4, 8, 32] random_state_sizes = [0] #[2, 1, 0] num_estimators_sizes = [int(sys.argv[4])] #[100, 50, 200, 500] for window_size, MAX_DEPTH, RANDOM_STATE, N_ESTIMATORS in cartesian( window_sizes, max_depth_sizes, random_state_sizes, num_estimators_sizes): print("\n\n-----NEW TRIAL-----") print((window_size, MAX_DEPTH, RANDOM_STATE, N_ESTIMATORS)) gc.collect() output_file_name_restored = output_file_name_base_str + "w_" + str( window_size) + "_c_di_do_md_" + str(MAX_DEPTH) + "_rs_" + str( RANDOM_STATE) + "_ne_" + str( N_ESTIMATORS ) + ".wav" #distorted input, distorted output, center window #print(output_file_name_restored) # c = 0 # print(output_file_name_restored[:output_file_name_restored.index(".wav")] + "_chan_" + str(c) + ".rf") # exit()
def iter_idx(self): return cartesian(self.iter_conf_idx(), self.iter_stages_idx())
def calib(meta, args, interlace, use_full_range=None): n_caps = meta.n_caps n_refs = meta.n_refs n_diff = meta.n_diff n_cs = (n_caps - 1) // 2 n_cf = n_caps - n_cs use_full_range = misc.default(use_full_range, n_cs < 2) ds_samples = args.samples if args.n_test > 0: raise ValueError("Minimal does not support test inputs.") comb_cs = misc.iterate_combinations(n_caps, n_cs) if args.full: comb_cs = [tuple(misc.iterate_permutations(cs)) for cs in comb_cs] comb_cs = [elem for tlp in comb_cs for elem in tlp] slice_ = slice(None) if use_full_range else slice(1, -1) comb_refs = gen.ds_map(n_cs, n_refs, n_cs * (n_refs - 1) + 1) comb_refs = np.transpose(comb_refs[:, slice_], ( 1, 0, 2, )) comb_refs = comb_refs.tolist() comb_refs = [( comb_refs[ii], comb_refs[jj], ) for ii in range(len(comb_refs)) for jj in range(ii + 1, len(comb_refs))] comb_cs = list(comb_cs) comb_refs = list(comb_refs) even_configs = [] even_ins = [] ics = [] with misc.push_random_state(): seed = None if args.seed is None else int(args.seed) np.random.seed(seed) for cs_ii, refs_ii in cartesian(comb_cs, comb_refs): even_configs.append(gen.Configuration(meta, cs_ii)) top_ii, bot_ii = refs_ii if args.inputs == "": sub_seed = np.random.randint(0, 4294967296) even_ins.append( gen.InternalRandom(meta, np.size(cs_ii), sub_seed)) else: top = gen.InternalDC(meta, top_ii) bot = gen.InternalDC(meta, bot_ii) even_ins.append(gen.ACCombinator(meta, top, bot, args.period)) inv = [[n_refs - 1 - iii for iii in ii] for ii in top_ii] inv = inv + [[n_refs // 2, n_refs - n_refs // 2][:n_diff] ] * (n_cf - n_cs) ics.append(gen.InitialCondition(meta, inv)) if interlace: n_cs_h = n_cs // 2 assert n_cs_h > 0, "Not enough capacitors to decrease bits." odd_configs = [] odd_ins = [] for conf, in_ in zip(even_configs, even_ins): left = (n_cs - n_cs_h) // 2 cs_range = range(left, left + n_cs_h) mask = np.zeros((n_cs, ), dtype=bool) mask[cs_range] = 1 odd_configs.append( gen.Configuration(conf.meta, conf.cs[cs_range, :])) odd_ins.append(gen.InputMask(in_.meta, in_, mask)) else: odd_ins = even_ins odd_configs = even_configs conf_sets = [] parity = 0 for samples in ds_samples: if parity == 0: configs = even_configs inputs = even_ins else: configs = odd_configs inputs = odd_ins conf_sets.append(gen.ConfigurationSet(samples, inputs, configs)) parity = (parity + 1) % 2 if args.ic == "clear": ics = [gen.InitialCondition.Discharged(meta, n_cf)] * len(odd_ins) elif args.ic == "precharge": pass else: raise ValueError("ic type {} not supported".format(args.ic)) return gen.ConfigurationSequence(ics, conf_sets * args.loop)
def __init__(self, value): self.prefix = self.__prefixes__[self.name] self.machine_type = self.__machine_types__[self.name] self.scalar_type = self self.opaque = False scalar_doc = Scalar.__doc__ Scalar = Enum('Scalar', ((s, s) for s in scalar_types), type=Scalar) Scalar.__doc__ = scalar_doc floating_point_scalars = { Scalar.float, Scalar.double } sampler_dims = range(1, 4) sampler_data_types = {Scalar.float, Scalar.int, Scalar.uint} sampler_types = [ "{}sampler{}D".format(scalar_type.prefix, ndim) for scalar_type, ndim in cartesian(sampler_data_types, sampler_dims) ] class Sampler(str, BasicType, Enum): '''The GLSL sampler types. Scalars difine the following attributes: *opaque* Whether the datatype is an opaque type (:py:obj:`True`) ''' __ndims__ = { "{}sampler{}D".format(scalar_type.prefix, ndim): ndim for scalar_type, ndim in cartesian(sampler_data_types, sampler_dims) } def __init__(self, value): self.ndim = self.__ndims__[self.name] self.opaque = True sampler_doc = Sampler.__doc__
def recreate(eff, caps, refs, thres, ins, common_mode, c_seq, cache, scalar=None, limit_samples=None): fun = sims.Simulator scalar = default(scalar, len(np.shape(eff)) == 0) limit_samples = default(limit_samples, c_seq.samples) + 1 samples = 0 cache = dict(cache) cache["data"] = fun.simulate_setup(eff, caps, refs, thres, ins, common_mode, c_seq, scalar=scalar) data = cache["data"] seq_idx = cache["seq_idx"] u_history = [] # shape (..., n_conf, n_diff,) u = fun.init_seq(seq_idx, data) u_history.append(u) samples += 1 # Simulate each configuration set dct_idx, dct_ext, dct_n = get(data, "indexing", "extended", "n") n_conf, n_diff = get(dct_n, "n_conf", "n_diff") eff, cm = get(dct_ext, "eff", "cm") base_shape, base_len = get(dct_idx, "base_shape", "base_len") c_sets = c_seq.configuration_sets def multi_idx_filter(idx_tuple, sub_idx): return tuple(ii[sub_idx] if hasattr(ii, "__getitem__") else ii for ii in idx_tuple) for ii_set, c_set, data_trans_du in zip(range(len(c_sets)), c_sets, cache["sets"]): set_data, trans_idx, du_idx = data_trans_du if set_data["previous"] is not None: u = fun.transition_step(trans_idx, u[-1:, ...], set_data, data) u_history.append(u) samples += 1 r_du = (1 - eff) * cm n_u = np.empty((c_set.ds_samples, ) + base_shape + ( n_conf, n_diff, )) local_du_idx = dict(du_idx) # used in u du = fun.du_compute(du_idx, c_set, set_data, data) for idx in cartesian(*tuple(range(ss) for ss in base_shape)): local_idx = tuple(slice(ii, ii + 1) for ii in idx) + (Ellipsis, ) ext_local_idx = (slice(None), ) + local_idx local_du_idx["r_ref"] = local_du_idx["r_ref"][ext_local_idx] local_du_idx["m_in_ref"] = multi_idx_filter( local_du_idx["m_in_ref"], ext_local_idx) local_du_idx["m_in_ins"] = multi_idx_filter( local_du_idx["m_in_ins"], ext_local_idx) zi = u[(slice(-1, None), ) + local_idx] * eff[local_idx] local_u = lfilter([1], [1, -eff[local_idx].item()], du[ext_local_idx] + r_du[(np.newaxis, ) + local_idx], zi=zi, axis=0)[0] n_u[ext_local_idx] = local_u u = n_u u_history.append(u) samples += np.size(u, 0) if samples >= limit_samples: break u_history = np.concatenate(u_history, axis=0) u_history = u_history[:limit_samples, ...] if scalar: assert np.size(u_history, 1) == 1 u_history = u_history[:, 0, ...] return u_history
def build_report(ideal, real, ideal_title="IDEAL", real_title="REAL"): assert ideal.meta == real.meta title_len = 24 index_len = 6 ideal_len = 10 real_len = 10 error_len = 10 def title(header): return "- {} ".format(header) + '-' * max(title_len - 3 - len(header), 0) def index(idx): assert len(idx) > 0 if len(idx) == 1: return "{}".format(idx) else: return "({})".format(','.join([str(ii) for ii in idx])) head = " {{:^{}}} {{:^{}}} {{:^{}}} {{:^{}}}".format( index_len, ideal_len, real_len, error_len) line = " {{:^{}}} {{:{}.{}f}} {{:{}.{}f}} {{:{}.{}f}}".format( index_len, ideal_len, ideal_len - 3, real_len, real_len - 3, error_len, error_len - 3) cm, eff, cap, cs_CF, ref = compare_adcs(ideal, real) result = [] result.append(title("CHARGE TRANSFER")) result.append(head.format("INDEX", ideal_title, real_title, "ERROR (%)")) for idx in cartesian(*tuple(range(ss) for ss in np.shape(eff))): result.append(line.format('-', ideal.eff, real.eff, 100 * eff)) result.append('') result.append(title("COMMON MODE")) result.append(head.format("INDEX", ideal_title, real_title, "ERROR (LSB)")) for idx in cartesian(*tuple(range(ss) for ss in np.shape(cm))): r_cm = real.common_mode[idx] i_cm = ideal.common_mode[idx] result.append(line.format('-', i_cm, r_cm, cm[idx])) result.append('') result.append(title("CAPACITOR")) result.append(head.format("INDEX", ideal_title, real_title, "ERROR (%)")) for idx in cartesian(*tuple(range(ss) for ss in np.shape(cap))): r_cap = real.caps[idx] i_cap = ideal.caps[idx] result.append(line.format(index(idx), i_cap, r_cap, 100 * cap[idx])) result.append('') result.append(title("CAPACITOR (Cs/CF)")) result.append(head.format("INDEX", ideal_title, real_title, "ERROR (%)")) for idx in cartesian(*tuple(range(ss) for ss in np.shape(cs_CF))): r_cap = real.caps[idx] / np.sum(real.caps) i_cap = ideal.caps[idx] / np.sum(ideal.caps) result.append(line.format(index(idx), i_cap, r_cap, 100 * cs_CF[idx])) result.append('') result.append(title("REFERENCE")) result.append(head.format("INDEX", ideal_title, real_title, "ERROR (LSB)")) for idx in cartesian(*tuple(range(ss) for ss in np.shape(ref))): r_ref = real.refs[idx] i_ref = ideal.refs[idx] result.append(line.format(index(idx), i_ref, r_ref, ref[idx])) return '\n'.join(result)
return np.append(N, min(1, N[-1])) N = list(reversed(get_N(I))) #print( N);print(111) return linregress(range(len(N)), list(map(log2, N)))[0] zoom_in = lambda x, scale: scale * (x % (1. / scale)) def invert(A, t, mode='<='): if mode == '<=': return (A <= t).astype(int) else: return (A >= t).astype(int) matrix_range = lambda m, n: map(np.array, cartesian(range(m), range(n))) indices = lambda shape: cartesian(range(shape[0]), range(shape[1])) pad0 = lambda a, n: np.pad(a, (0, n), mode='constant', constant_values=0) def make_fractal_image(S, n, stopshort=False): img = S listS = S.tolist() values = set.union(*(set(row) for row in listS)) if stopshort: m = n // len(S) else: m = n
def products(max_val): for a, b in cartesian(multiplicands(max_val), repeat=2): yield a * b