def main(): ast_mode = False # -ast flag will print the AST instead of the evaluation if len(sys.argv) > 1 and sys.argv[1] == "-ast": ast_mode = True # Standard REPL while True: try: expr = input("> ") if expr == "exit": break if ast_mode: print(parse(expr)) else: print(evaluate(parse(expr))) except EOFError: break except Exception as e: print("Error: " + str(e))
def worker_clustering(_clustering_info: dict) -> None: """Download, extract data and forward the content.""" thread = current_thread() logger.debug('%s: Worker started', thread.name) # Fetch data prometheus_metrics.METRICS['gets'].inc() try: resp = _retryable('get', source_url, stream=True) except requests.HTTPError as exception: logger.error('%s: Unable to fetch source data for "%s": %s', thread.name, source_id, exception) prometheus_metrics.METRICS['get_errors'].inc() return prometheus_metrics.METRICS['get_successes'].inc() try: with NamedTemporaryFile(delete=False) as tmp_file: file_name = tmp_file.name for chunk in filter(None, resp.iter_content(chunk_size=CHUNK)): tmp_file.write(chunk) except IOError as exception: logger.error('%s: Unable to create temp file for "%s": %s', thread.name, source_id, exception) return # Unpack data and stream it # Build the POST data object data = { 'id': source_id, 'data': custom_parser.parse(file_name), } # Pass to next service prometheus_metrics.METRICS['posts'].inc() try: resp = _retryable('post', f'http://{dest_url}', json=data, headers={"x-rh-identity": b64_identity}) prometheus_metrics.METRICS['post_successes'].inc() except requests.HTTPError as exception: logger.error('%s: Failed to pass data for "%s": %s', thread.name, source_id, exception) prometheus_metrics.METRICS['post_errors'].inc() # Cleanup with suppress(IOError): os.remove(file_name) logger.debug('%s: Done, exiting', thread.name)
def test_failure_atom(self): with self.assertRaises(Exception): parse("idk")
def test_failure_mult(self): with self.assertRaises(Exception): parse("2*")
def test_failure_add(self): with self.assertRaises(Exception): parse("2+")
def test_parentheses(self): self.assertEqual(parse("(2+1)*3"), Binary(Times, Binary(Plus, N(2), N(1)), N(3)))
def test_operator_precedence(self): self.assertEqual(parse("2+1*3"), Binary(Plus, N(2), Binary(Times, N(1), N(3))))
def test_add(self): self.assertEqual(parse("1+3"), Binary(Plus, N(1), N(3))) self.assertEqual(parse("1-3"), Binary(Minus, N(1), N(3)))
+ elem(m1, n1, 0, 2) * elem(m2, n2, 2, 0)) \ + 0.500 * d * elem(m1, n1, 1, 1) * elem(m2, n2, 1, 1) \ + 0.250 * d * (elem(m1, n1, 1, 3) * elem(m2, n2, 0, 0) + elem(m1, n1, 3, 1) * elem(m2, n2, 0, 0) + elem(m1, n1, 0, 0) * elem(m2, n2, 1, 3) + elem(m1, n1, 0, 0) * elem(m2, n2, 3, 1) + elem(m1, n1, 0, 2) * elem(m2, n2, 1, 1) + elem(m1, n1, 2, 0) * elem(m2, n2, 1, 1) + elem(m1, n1, 1, 1) * elem(m2, n2, 0, 2) + elem(m1, n1, 1, 1) * elem(m2, n2, 2, 0)) \ + 0.0625 * d * (elem(m1, n1, 4, 0) * elem(m2, n2, 0, 0) + elem(m1, n1, 0, 4) * elem(m2, n2, 0, 0) + elem(m1, n1, 0, 0) * elem(m2, n2, 4, 0) + elem(m1, n1, 0, 0) * elem(m2, n2, 0, 4) + 2 * elem(m1, n1, 2, 0) * elem(m2, n2, 2, 0) + 2 * elem(m1, n1, 0, 2) * elem(m2, n2, 0, 2)) end = timer() print('hamilt: ', end - start) np.savez_compressed('hamilt.npz', H=H) return H # print(H) if __name__ == '__main__': B, D, N, _ = parse() for b in B: for d in D: for n in N: main(1, b, d, n)
def test_atom(self): self.assertEqual(parse("2"), N(2))
import os import custom_parser from project_setup import Project from genetic_algorithm import GeneticAlgorithm from visualization import visualize from project_stats import plot_evolution_over_time if __name__ == "__main__": csv_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'activities.csv') component_list = custom_parser.parse(csv_path=csv_path) project = Project(component_list) project.print_planning_days() print("Min:", project.calc_min_fitness()) print("Max:", project.calc_max_fitness()) gen_algo = GeneticAlgorithm(component_list) gen_algo.run(300) print("Best project planning:") gen_algo.get_best_project().print_planning_days() gen_algo.get_best_project().print_to_csv('best_project') plot_evolution_over_time(gen_algo.get_best_project()) visualization_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), '..', 'example', 'index.html') visualize(gen_algo.get_best_project(), visualization_path)
k = index[sort_idx].sum(axis=1) # n1 + n2 w = 1 / (k + 1) - 0.01 # bar widths r = [j for i in range(n + 1) for j in range(i)] x = k - 0.5 + r / (k + 1) # positions d_no = 0 # number of duplicate states clean_dir('eigenvectors') for i in range(eigenvectors.shape[0]): eigv_len = no_signif_el(eigenvectors[i]) minor_ticks = np.arange(0, eigv_len, 0.5) # Plot label label = 'E = ' + str(E[i]) + '\n' + '$\\left|' + \ str(ket[i][0]) + '\\,' + str(ket[i][1]) + '\\right\\rangle$\t' + \ ir_str[i] fname = str(ket[i][0]) + ' ' + str(ket[i][1]) # filename index_plot(eigenvectors[i], eigv_len, label, index, sort_idx, fname, d_no) energy_plot(eigenvectors[i], eigv_len, x, w, label, index, sort_idx, fname, d_no) if __name__ == '__main__': B, D, N = parse() for b in B: for d in D: for n in N: main(b, d, n)
('_max_e_' + str(max_energy) + '.pdf' if max_energy else '.pdf') histogram(rel_sp, cumulative=True, bins=np.linspace(0, 4, count), fit=True, ylabel=r'$I(s)$', xlabel='$s$', weights=w, count=count, label=[rnames[i] for i in reps], fname=fname, ylim=(0, 1.05), figsize=(5.8, 3.7)) # Version with open('version.txt', 'w') as f: f.write('1.5.0') os.chdir("../../Scripts") if __name__ == '__main__': B, D, N, delta_n, st_epsilon, lvl_epsilon, reselect, cut, bin_size, \ max_energy = parse(advanced=True, select=True, hist_bin=True, max_e=True) for b in B: for d in D: for n in N: for max_e in max_energy: main(b, d, n, delta_n, st_epsilon, lvl_epsilon, reselect, cut, bin_size, max_e)
f.write("\\begin{longtable}{" + " | ".join(["c"] * 4) + "}\n") f.write("energy levels & rebde.dat & reuna.dat & reuns.dat\t\\\\\n") f.write("\\hline\n\\endfirsthead\n") for row in range(files.shape[1]): # Find the representation of the energy level c = 0 for x in range(1, 4): if files[x + 3][row]: c = x line = color(c, True) + '{:.18f}'.format(files[0][row]) + " & " + \ " & ".join(color(x, files[x + 6][row]) + '{:.18f}'.format(files[x][row]) for x in range(1, 4)) f.write('\t' + line + " \\\\\n") f.write("\\end{longtable}") f.write("\n\n\\end{document}") os.chdir("../../Scripts") end = timer() print('total: ', end - start) if __name__ == '__main__': B, D, N, delta_n, st_epsilon, lvl_epsilon = parse(advanced=True) for b in B: for d in D: for n in N: main(b, d, n, delta_n, st_epsilon, lvl_epsilon)
def test_failure_parentheses(self): with self.assertRaises(Exception): parse("2+(3*5")
ax.set_ylabel('$\\alpha$') if energy_plot: ax.set_xlabel('$\\Delta E$') else: ax.set_xlabel('$B$') ax.set_ylim([0, 1.1]) if not energy_plot: ax.set_xlim([0, 1]) ax.legend() # plt.show() plt.tight_layout(pad=0.3) if energy_plot: fig.savefig('../Statistics/alpha_e_B[' + ', '.join('{:.2}' for i in B).format(*B) + ']_N' + str(N) + '.pdf', dpi=400) else: fig.savefig( '../Statistics/alpha_N' + str(N) + ('_max_e_' + str(max_energy) + '.pdf' if len(max_energy) > 1 or max_energy[0] else '.pdf'), dpi=400) plt.close() if __name__ == '__main__': B, D, N, max_energy, energy_plot, small_plot = \ parse(max_e=True, e_plot=True, s_plot=True) main(B, D, N, max_energy, energy_plot, small_plot)
def test_mult(self): self.assertEqual(parse("1*3"), Binary(Times, N(1), N(3))) self.assertEqual(parse("1/3"), Binary(Div, N(1), N(3)))