def compute_normalised_by_order(self, ap, n, aa=None) -> ApproximationErrorCode: """ Generates normalised transfer function prioritising the fixed order """ # Computing needed constants zeros, poles, gain = ss.ellipap(n, ap, aa) self.h_aux = ss.ZerosPolesGain(zeros, poles, gain) return ApproximationErrorCode.OK
z, p, k = sig.buttap(this_order) eps = np.sqrt(10**(this_ripple / 10) - 1) num, den = sig.zpk2tf(z, p, k) num, den = sig.lp2lp(num, den, eps**(-1 / this_order)) z, p, k = sig.tf2zpk(num, den) elif aprox_name == 'Chebyshev1': z, p, k = sig.cheb1ap(this_order, this_ripple) elif aprox_name == 'Chebyshev2': z, p, k = sig.cheb2ap(this_order, this_ripple) elif aprox_name == 'Bessel': z, p, k = sig.besselap(this_order, norm='mag') elif aprox_name == 'Cauer': z, p, k = sig.ellipap(this_order, this_ripple, this_att) num, den = sig.zpk2tf(z, p, k) all_sys.append(sig.TransferFunction(num, den)) filter_names.append(aprox_name + '_ord_' + str(this_order) + '_rip_' + str(this_ripple) + '_att_' + str(this_att)) analyze_sys(all_sys, filter_names)
elif aprox_name == 'Chebyshev1': z, p, k = sig.cheb1ap(order2analyze, ripple) elif aprox_name == 'Chebyshev2': z, p, k = sig.cheb2ap(order2analyze, ripple) elif aprox_name == 'Bessel': z, p, k = sig.besselap(order2analyze, norm='mag') elif aprox_name == 'Cauer': z, p, k = sig.ellipap(order2analyze, ripple, attenuation) num, den = sig.zpk2tf(z, p, k) # Desnormalizamos para wc num, den = sig.lp2lp(num, den, 2 * np.pi * fc) my_analog_filter = sig.TransferFunction(num, den) my_analog_filter_desc = aprox_name + '_ord_' + str(order2analyze) + '_analog' all_sys.append(my_analog_filter) filter_names.append(my_analog_filter_desc) analog_filters.append(my_analog_filter) analog_filters_names.append(my_analog_filter_desc)
if aprox_name == 'Butterworth': z,p,k = sig.buttap(ii) elif aprox_name == 'Chebyshev1': z,p,k = sig.cheb1ap(ii, ripple) elif aprox_name == 'Chebyshev2': z,p,k = sig.cheb2ap(ii, ripple) elif aprox_name == 'Bessel': z,p,k = sig.besselap(ii, norm='mag') elif aprox_name == 'Cauer': z,p,k = sig.ellipap(ii, ripple, attenuation) num, den = sig.zpk2tf(z,p,k) all_sys.append(sig.TransferFunction(num,den)) filter_names.append(aprox_name + '_ord_' + str(ii)) analyze_sys( all_sys, filter_names )
this_order = force_order else: this_order = besselord(omega_p, omega_s, alfa_max, alfa_min, omega_d, max_pc_delay) z, p, k = sig.besselap(this_order, norm='mag') elif aprox_name == 'Cauer': if force_order > 0: this_order = force_order else: this_order, _ = sig.ellipord(omega_p, omega_s, alfa_max, alfa_min, analog=True) z, p, k = sig.ellipap(this_order, alfa_max, alfa_min) if (tipo_de_filtro == "Pasa_Alto"): z, p, k = sig.lp2hp_zpk(z, p, k) this_lti = sig.ZerosPolesGain(z, p, k).to_tf() #this_lti = sig.ZerosPolesGain(z, p, k).to_tf() pretty_print_lti(this_lti) analyze_sys([this_lti], [aprox_name], [ideal_filter])
def main(): random.seed(7) np.random.seed(7) fs = 200 # Hz fo = 40 # Hz rp = 1 # dB rs = 80 # dB fpass = fo - 5 # Hz fstop = fo + 5 # Hz # N, fo = signal.cheb1ord(fpass, fstop, rp, rs, fs=fs) # N, fo = signal.cheb2ord(fpass, fstop, rp, rs, fs=fs) # N, fo = signal.buttord(fpass, fstop, rp, rs, fs=fs) N, fo = signal.ellipord(fpass, fstop, rp, rs, fs=fs) wp = 2 * fs * np.tan(np.pi * fo / fs) # prototype = signal.lti(*signal.cheb1ap(N, rp)) # prototype = signal.lti(*signal.cheb2ap(N, rs)) # prototype = signal.lti(*signal.buttap(N)) prototype = signal.lti(*signal.ellipap(N=N, rp=rp, rs=rs)) model = lowpass_to_lowpass(prototype, wo=wp) model = discretize(model, dt=1 / fs) print(model) # Use PSD output to white noise input PSD ratio as response window = signal.get_window('blackman', 256) psd = functools.partial(signal.welch, scaling='density', window=window, fs=fs) input_range = interval(lower_bound=-0.5, upper_bound=0.5) input_noise_power_density = 0.0005 input_noise = np.random.normal(scale=np.sqrt(input_noise_power_density * fs / 2), size=512) assert np.max(input_noise) < ProcessingUnit.active().rinfo().max assert np.min(input_noise) > ProcessingUnit.active().rinfo().min input_noise = np.array([fixed(n) for n in input_noise]) _, outputs = model.output(input_noise.astype(float), t=None) output_noise = outputs.T[0] freq, output_noise_power_density = psd(output_noise) expected_response = 10 * np.log10(output_noise_power_density / input_noise_power_density + 1 / inf) # Take quantization noise into account noise_floor = -6.02 * (ProcessingUnit.active().wordlength - 1) - 1.76 expected_response = np.maximum(expected_response, noise_floor) # Formulate GP problem toolbox = solvers.gp.formulate( prototype, transforms=[ functools.partial(lowpass_to_lowpass, wo=wp), functools.partial(discretize, dt=1 / fs) ], evaluate=functools.partial( evaluate, input_range=input_range, input_noise=input_noise, input_noise_power_density=input_noise_power_density, psd=psd, expected_response=expected_response), weights=(1., 1., 1., 1., -1., -1., -1., -1.), forms=[DirectFormI, DirectFormII], variants=range(1000), dtype=fixed, tol=1e-6) # Solve GP problem only_visualize = False if not only_visualize: with multiprocessing.Pool() as pool: try: toolbox.register('map', pool.map) stats = deap.tools.Statistics( key=lambda code: code.fitness.values) stats.register('avg', np.mean, axis=0) stats.register('med', np.median, axis=0) stats.register('min', np.min, axis=0) pareto_front = deap.tools.ParetoFront() population = toolbox.population(512) population, logbook = solvers.gp.nsga2(population, toolbox, mu=512, lambda_=128, cxpb=0.5, mutpb=0.05, ngen=25, stats=stats, halloffame=pareto_front, verbose=True) finally: toolbox.register('map', map) with open('front.pkl', 'wb') as f: pickle.dump(pareto_front, f) with open('logbook.pkl', 'wb') as f: pickle.dump(logbook, f) else: with open('front.pkl', 'rb') as f: pareto_front = pickle.load(f) with open('logbook.pkl', 'rb') as f: logbook = pickle.load(f) codes = [] criteria = [] for code in pareto_front: crt = Criteria(*code.fitness.values) if crt.frequency_response_error >= inf: continue if crt.stability_margin <= 0: continue if crt.overflow_margin < 0: continue if crt.underflow_margin < 0: continue codes.append(code) criteria.append(crt) frequency_response_error = np.array( [crt.frequency_response_error for crt in criteria]) arithmetic_snr = np.array([crt.arithmetic_snr for crt in criteria]) stability_margin = np.array([crt.stability_margin for crt in criteria]) memory_size = np.array([crt.size_of_memory for crt in criteria]) memory_size_in_bytes = memory_size * ProcessingUnit.active().wordlength / 8 plt.figure() gen, med = logbook.select('gen', 'med') crt = Criteria(*np.array(med).T) def fit_most_within_unit_interval(values): values = np.asarray(values) med = np.median(values) mad = np.median(np.absolute(values - med)) if not mad: return values return (values - med) / (4. * mad) plt.plot(gen, fit_most_within_unit_interval(crt.overflow_margin), label='Med[$M_o$]') plt.plot(gen, fit_most_within_unit_interval(crt.underflow_margin), label='Med[$M_u$]') plt.plot(gen, fit_most_within_unit_interval(crt.stability_margin), label='Med[$M_s$]') plt.plot(gen, fit_most_within_unit_interval(crt.arithmetic_snr), label='Med[$SNR_{arit}$]') plt.plot(gen, fit_most_within_unit_interval(crt.frequency_response_error), label='Med[$E_2$]') plt.plot(gen, fit_most_within_unit_interval(crt.number_of_adders), label='Med[$N_a$]') plt.plot(gen, fit_most_within_unit_interval(crt.number_of_multipliers), label='Med[$N_m$]') plt.plot(gen, fit_most_within_unit_interval(crt.size_of_memory), label='Med[$N_e$]') plt.ylabel('Normalized evolution') plt.xlabel('Generations') plt.ylim([-10, 10]) plt.legend(loc='upper right') plt.savefig('population_evolution.png') plt.figure() plt.plot(*logbook.select('gen', 'nunfeas')) plt.ylabel('Unfeasibles') plt.xlabel('Generations') plt.savefig('unfeasibles.png') def pareto2d(ax, x, y): ax.scatter(x, y, marker='o', facecolors='none', edgecolors='r') idx = np.argsort(x) ax.plot(x[idx], y[idx], 'k--') fig = plt.figure() ax = fig.add_subplot(3, 1, 1) idx = solvers.gp.argnondominated(-frequency_response_error, stability_margin) pareto2d(ax, frequency_response_error[idx], stability_margin[idx]) ax.set_ylabel('Margin $M_s$') ax.invert_xaxis() ax = fig.add_subplot(3, 1, 2) idx = solvers.gp.argnondominated(-frequency_response_error, arithmetic_snr) pareto2d(ax, frequency_response_error[idx], arithmetic_snr[idx]) ax.set_ylabel('$SNR_{arit}$ [dBr]') ax.invert_xaxis() ax = fig.add_subplot(3, 1, 3) idx = solvers.gp.argnondominated(-frequency_response_error, -memory_size_in_bytes) pareto2d(ax, frequency_response_error[idx], memory_size_in_bytes[idx]) ax.set_xlabel('Error $E_2$ [dBr]') ax.set_ylabel('Total $N_e$ @ Q15 [bytes]') ax.invert_xaxis() ax.invert_yaxis() plt.savefig('pareto_fronts.png') fig = plt.figure() ax = fig.add_subplot(projection='3d') idx = solvers.gp.argnondominated(-frequency_response_error, arithmetic_snr, -memory_size_in_bytes) scatter = ax.scatter(frequency_response_error[idx], arithmetic_snr[idx], memory_size_in_bytes[idx], c=stability_margin[idx], cmap='jet_r') fig.colorbar(scatter, ax=ax, label='Margin $M_s$') ax.set_xlabel('Error $E_2$ [dBr]') ax.set_ylabel('$SNR_{arit}$ [dBr]') ax.set_zlabel('Total $N_e$ @ Q15 [bytes]') ax.invert_xaxis() ax.invert_zaxis() plt.savefig('pareto_sampling.png') index = np.argsort(frequency_response_error)[0] implement = toolbox.compile(codes[index]) optimized_implementation_a = implement(prototype) index = np.argsort(frequency_response_error)[3] implement = toolbox.compile(codes[index]) optimized_implementation_b = implement(prototype) print(memory_size_in_bytes[np.argsort(frequency_response_error)]) print(stability_margin[np.argsort(frequency_response_error)]) print(arithmetic_snr[np.argsort(frequency_response_error)]) pretty(optimized_implementation_a).draw('optimized_a.png') pretty(optimized_implementation_b).draw('optimized_b.png') func = signal_processing_function(optimized_implementation_a) output_noise = func(input_noise).T[0].astype(float) _, output_noise_power_density = psd(output_noise) optimized_implementation_a_response = \ 10 * np.log10(output_noise_power_density / input_noise_power_density + 1/inf) func = signal_processing_function(optimized_implementation_b) output_noise = func(input_noise).T[0].astype(float) _, output_noise_power_density = psd(output_noise) optimized_implementation_b_response = \ 10 * np.log10(output_noise_power_density / input_noise_power_density + 1/inf) show_biquad_cascade = True if show_biquad_cascade: biquad_cascade = series_diagram([ DirectFormI.from_model(signal.dlti( section[:3], section[3:], dt=model.dt), dtype=fixed) for section in signal.zpk2sos(model.zeros, model.poles, model.gain) ], simplify=False) pretty(biquad_cascade).draw('biquad.png') func = signal_processing_function(biquad_cascade) output_noise = func(input_noise).T[0].astype(float) _, output_noise_power_density = psd(output_noise) biquad_cascade_response = \ 10 * np.log10(output_noise_power_density / input_noise_power_density + 1/inf) plt.figure() plt.plot(freq, expected_response, label='Model') if show_biquad_cascade: plt.plot(freq, biquad_cascade_response, label='Biquad cascade') pass plt.plot(freq, optimized_implementation_a_response, label='Optimized realization A') plt.plot(freq, optimized_implementation_b_response, label='Optimized realization B') plt.xlabel('Frecuency $f$ [Hz]') plt.ylabel('Response $|H(f)|$ [dBr]') plt.legend() plt.savefig('response.png') plt.show()