def ga_main01(out='result', clear_directory=False): ''' GAテスト & プロット ''' if clear_directory and os.path.isdir(out): shutil.rmtree(out) problem = zdt1 epoch = 250 save_trigger = lambda i: i == epoch # 最後だけ with base.MOEAD_ENV(problem) as optimizer: with ut.stopwatch('main'): # GA開始 # 初期集団生成 optimizer.init_population() # 進化 for i in range(1, epoch + 1): population = optimizer.advance() print('epoch:', i, 'popsize:', len(population), end='\r') # print(len(set([x.id for x in optimizer.get_individuals()]))) if save_trigger(i): optimizer.save(file=os.path.join(out, f'epoch{i}.pickle')) # elite = optimizer.get_elite() # history = optimizer.history # def best(pop): # return [x for x in pop if x.rank == 1][0]() # bests = np.array([best(pop) for pop in history]) # first_population = optimizer[0] last_population = optimizer.get_individuals() last_population.sort(key=lambda x: x.value) optimal_front = base.get_optomal_front( f'pareto_front/{problem.__name__}_front.json') print(len(set([ind.id for ind in last_population]))) ### TEMP: check stat ### print("Convergence: ", base.convergence(last_population, optimal_front)) print( "Diversity: ", base.diversity(last_population, optimal_front[0], optimal_front[-1])) ######################## ### TEMP: plot front ### x, y = np.array([x.value for x in last_population]).T plt.scatter(optimal_front[:, 0], optimal_front[:, 1], c='r') plt.scatter(x, y, c='b') plt.axis("tight") # plt.xlim((0, 1)) # plt.ylim((0, 1)) plt.show()
def main(): args = get_args() if args.run: exe_path = f'bin/{args.exe}' exe_ext = '.exe' if os.environ.get('OS') == 'Windows_NT' else '.out' if not exe_path.endswith(exe_ext): exe_path += exe_ext with ut.stopwatch('CFD'): run_cfd(exe_path, resume=args.resume) elif args.res: with ut.stopwatch('RES'): if args.dest: collect_result(args.dest) else: for d in os.listdir('.'): if os.path.isdir(d) and os.path.isfile(d + '/in2d.txt'): print(d) collect_result(d) elif args.pack: pack_data() elif args.test: __test__()
def main(): global DEVICE args = get_args() if args.gpu >= 0: DEVICE = f'/gpu:{args.gpu}' PROGRESSBAR = not args.no_progress if args.out: out = f'result/{args.out}' else: out = f'result/{FILENAME}' clear = args.clear if args.test: __test__() return if args.mode == '0': with ut.stopwatch('sample0'): train_main(out=out, clear=clear)
def main(): global DEVICE, PROGRESSBAR args = get_args() if args.gpu: C_.DEVICE = args.gpu C_.SHOW_PROGRESSBAR = not args.no_progress # out = args.out out = f'result/{SRC_FILENAME}' if args.test: # print(vars(args)) __test__() elif args.mode: taskname = 'task' + args.mode f_ = globals().get(taskname) if f_: with ut.stopwatch(taskname) as sw: f_(**vars(args), sw=sw)
def ga_main2(out='result', clear_directory=False): ''' GAテスト & プロット ''' if clear_directory and os.path.isdir(out): shutil.rmtree(out) epoch = 250 save_trigger = lambda i: i == epoch # 最後だけ optimal_front = get_optomal_front() stat = [] with NSGA2_ENV() as optimizer: for rep in range(100): with ut.stopwatch(f'epoch{epoch+1}'): optimizer.create_initial_population() for i in range(1, epoch + 1): optimizer.advance() print('epoch:', i, 'popsize:', len(optimizer.population), end='\r') last_population = optimizer.get_individuals() last_population.sort(key=lambda x: x.value) conv = convergence(last_population, optimal_front) div = diversity(last_population, optimal_front[0], optimal_front[-1]) stat.append((conv, div)) print("Convergence: ", conv) print("Diversity: ", div) print('=' * 20, 'Average', '=' * 20) print("Convergence: ", np.mean([x[0] for x in stat])) print("Diversity: ", np.mean([x[1] for x in stat]))
def ga_main02(out='result', clear_directory=False): ''' GAテスト & プロット 50回の平均 ''' if clear_directory and os.path.isdir(out): shutil.rmtree(out) problem = zdt1 epoch = 250 save_trigger = lambda i: i == epoch # 最後だけ optimal_front = base.get_optomal_front('pareto_front/zdt1_front.json') stat = [] for rep in range(50): with base.NSGA2_ENV(problem) as optimizer: with ut.stopwatch(f'epoch{epoch+1}'): optimizer.init_population() for i in range(1, epoch + 1): population = optimizer.advance() print('epoch:', i, 'popsize:', len(population), end='\r') optimizer.save(file=os.path.join(out, f'main02_rep{rep}.pkl')) last_population = optimizer.get_individuals() last_population.sort(key=lambda x: x.value) conv = base.convergence(last_population, optimal_front) div = base.diversity(last_population, optimal_front[0], optimal_front[-1]) stat.append((conv, div)) print("Convergence: ", conv) print("Diversity: ", div) print('=' * 20, 'Average', '=' * 20) print("Convergence: ", np.mean([x[0] for x in stat])) print("Diversity: ", np.mean([x[1] for x in stat]))
def main(opts=None): parser = argparse.ArgumentParser() parser.add_argument('--cycle', '-n', type=int, default=None, help='number of cycles') parser.add_argument('--out', '-o', type=str, default=None, help='output directory name') parser.add_argument('--resume', '-r', type=str, default=None, help='filename for resume calculation') args = parser.parse_args(args=opts) conf = read_inputfile('in2d.txt') if args.cycle: cycle = args.cycle else: cycle = conf.cycle if args.out: out = args.out else: out = conf.dest itr_max = conf.nitr save_interval = conf.save nx = np.array(conf.nx, dtype=np.int32) ny = np.array(conf.ny, dtype=np.int32) u = np.zeros((2, ny+2, nx+1), dtype=np.float64) # x方向速度 v = np.zeros((2, ny+1, nx+2), dtype=np.float64) # y方向速度 p = np.zeros((ny+2, nx+2), dtype=np.float64) # 圧力 t = np.zeros((ny+2, nx+2), dtype=np.float64) # 温度 f = np.ones((ny+2, nx+2), dtype=np.float64) # 流れ場情報(0=>物体上, 1=>流体) m = np.zeros((120,), dtype=np.uint8) # メッセージ格納用配列 flg = np.array(0, dtype=np.int32) # 圧力計算収束確認用 itr_hist = [] save_count = 0 register_functions() f_read_inputfile() f_initialize() with open('grid.csv') as fp: f[1:-1, 1:-1] = np.array(list(csv.reader(fp)), dtype=np.float64) if args.resume: u[0], v[0], p[:] = load_value(args.resume) for file in ut.iglobm('image/*.png'): os.remove(file) with ut.stopwatch('calc'): with ut.chdir(out): with tqdm(total=cycle, mininterval=1) as bar: for i in range(1, cycle+1): f_calc_velociry(u[0], v[0], p, t, u[1], v[1], nx, ny) f_bind_velocity(u[1], v[1], f, nx, ny) for j in range(1, itr_max+1): itr = np.array(j, dtype=np.int32) m.fill(ord(' ')) f_calc_pressure(u[1], v[1], p, itr, flg, nx, ny, m) f_bind_velocity(u[1], v[1], f, nx, ny) if j % 100 == 0: msg = ''.join(map(chr, m)).rstrip() bar.write(f'cycle={i} {msg}') if flg == 0: if i % 100 == 0: msg = ''.join(map(chr, m)).rstrip() bar.write(f'cycle={i} {msg}') break itr_hist.append(j) u[0] = u[1] v[0] = v[1] if i % save_interval == 0: k = save_count with ut.chdir('result'): dump_data(f'out_{k:05d}.npy', u[0], v[0], p) with ut.chdir('image'): plot_w(vorticity(u[0], v[0]), f'out_{k:05d}.png') plot_nitr(itr_hist, 'nitr.png') save_count += 1 bar.update()
def analyze_out(): grid_img = get_grid_image() # === Plot === fig, axs_1 = plt.subplots(2, 2) fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0.1, hspace=0) axs = axs_1.flatten() def init_ax(): for ax in axs: ax.tick_params(left=False, bottom=False, labelleft=False, labelbottom=False) def clear_ax(): for ax in axs: ax.cla() color_list = [(0, "blue"), (0.5, "white"), (1, "red")] cmap = plc.LinearSegmentedColormap.from_list('custom_cmap', color_list) # === Plot end === key = "180730_4" data = "train" fps = 2 log_dir = f"logs_{key}" video_file = f"cae_{key}_{data}_fps{fps}.mp4" barch_size = 20 total_batch = 1000 with run_env() as sess: model = create_model_set() batch_it = get_data() output = model["output"] feed_dict_f = model["feed_dict_f"] saver = model["saver"] errs = [] def update(): def f(): with last_model(sess, saver, log_dir): for x in batch_it(barch_size, data=data): y = sess.run(output, feed_dict=feed_dict_f(x, False)) for i in range(0, barch_size): err = np.sum((x[i] - y[i])**2) / 2 errs.append(err) print("err:", err) # yield err if i % 5 > 0: continue clear_ax() axs[0].imshow(x[i, :, :, 0], cmap=cmap, vmin=0.6, vmax=0.9) axs[1].imshow(y[i, :, :, 0], cmap=cmap, vmin=0.6, vmax=0.9) axs[2].imshow(x[i, :, :, 1], cmap=cmap, vmin=0.3, vmax=0.7) axs[3].imshow(y[i, :, :, 1], cmap=cmap, vmin=0.3, vmax=0.7) for ax in axs: ax.imshow(grid_img) axs[3].annotate(f"Error {err:.3f}", xy=(0.9, -0.1), xycoords="axes fraction", fontsize=10, horizontalalignment="right", verticalalignment="top") yield gen = f() return lambda i: print(f"export: {i} / {total_batch}", end=" " * 10 + "\r") or gen.__next__() with ut.stopwatch("anim"): cv.show_anim(fig, update(), frames=total_batch // 5, init_func=init_ax, file=video_file, fps=fps) print("mean error:", np.mean(errs)) np.save(f"cae_errs_{key}_{data}.npy", errs) return with ut.stopwatch("err"): errs = [x for x in update()] np.save(f"errs_{key}_{data}", errs)
args = parser.parse_args() return args def main(): global DEVICE args = get_args() if args.gpu >= 0: DEVICE = f'/gpu:{args.gpu}' PROGRESSBAR = not args.no_progress if args.out: out = f'result/{args.out}' else: out = f'result/{FILENAME}' clear = args.clear if args.test: __test__() return if args.mode == '0': with ut.stopwatch('sample0'): train_main(out=out, clear=clear) if __name__ == '__main__': with ut.stopwatch("main"): main()