def write_grid(self, grid, path): # check for support if grid._class != "Grid" and grid._class != "FlagGrid" and grid._class != "LevelsetGrid" and grid._class != "MACGrid": assert False, "grid._class {} is not supported".format(grid._class) return # check if path exists make_dir(os.path.dirname(path)) # find buffer handle handle = self._get_buffer_handle(grid) vec3content = False # copy grid to buffer if grid._class == "Grid": if grid._T == "Real": copyGridToArrayReal(grid, self._buffer_list[handle]) elif grid._T == "Vec3": copyGridToArrayVec3(grid, self._buffer_list[handle]) vec3content = True elif grid._class == "FlagGrid": copyGridToArrayFlag(grid, self._buffer_list[handle]) elif grid._class == "LevelsetGrid": copyGridToArrayLevelset(grid, self._buffer_list[handle]) elif grid._class == "MACGrid": copyGridToArrayMAC(grid, self._buffer_list[handle]) vec3content = True else: print("Grid is not supported") self._print_grid_info(grid) assert False # store buffer in npz grid_desc = self._get_grid_description(grid) np_grid = self._buffer_list[handle] is2d = (np_grid.shape[0] == 1) if vec3content and is2d: np_grid = np_grid[..., 0:2] # remove z component of vectors if is2d: np_grid = np.squeeze(np_grid, axis=0) # remove z axis np.savez_compressed(path, data=np_grid, header=grid_desc)
def on_sim_step(scene, t): if args.no_output: return # screenshot of the mantaflow gui if t == args.warmup and scene.show_gui: scene._gui.screenshot(output_path + "/screenshots/screen_{:06d}.jpg".format(scene.file_num)) # save all steps for rendering if args.output_render_data: if t == 0: make_dir(output_path + "/scene_{:04d}/uni/".format(stored_scenes_num)) scene.pressure.save(output_path + "/scene_{:04d}/uni/ref_flipParts_0000.uni".format(stored_scenes_num)) scene.pp.save(output_path + "/scene_{:04d}/uni/flipParts_{:04d}.uni".format(stored_scenes_num, t)) scene.phi_fluid.save(output_path + "/scene_{:04d}/uni/levelset_{:04d}.uni".format(stored_scenes_num, t)) # some steps should not be written to file if t < args.warmup or t % args.skip_steps != 0: return # zoom if zoom_resolution: if args.dimension == 2: zoom_mask = [1.0, args.zoom, args.zoom, 1.0] np_real_temp = np.empty(shape=[1, args.resolution, args.resolution, 1], order='C') np_vec_temp = np.empty(shape=[1, args.resolution, args.resolution, 3], order='C') else: zoom_mask = [1.0, args.zoom, args.zoom, args.zoom, 1.0] np_real_temp = np.empty(shape=[1, args.resolution, args.resolution, args.resolution, 1], order='C') np_vec_temp = np.empty(shape=[1, args.resolution, args.resolution, args.resolution, 3], order='C') # write grids to a file scene.file_num = scene.file_num + 1 output_name = "{}_{:07d}".format(scene.resolution, scene.file_num) for grid_name in grids: # it was already checked if the attribute is present in the scene grid = getattr(scene, grid_name) if zoom_resolution: grid_zoom = getattr(scene, grid_name+"_zoom") grid_type = MantaGridType(grid.getGridType()) if grid_type == MantaGridType.TypeReal or grid_type == MantaGridType.TypeLevelset or grid_type == MantaGridType.TypeLevelsetReal: copyGridToArrayReal(grid, np_real_temp) # if grid_name == "pressure": # fig, ax1 = plt.subplots(1, 1, figsize=(8, 8)) # fig.tight_layout(pad=0.1) # im1 = ax1.imshow(np_real_temp[0,:,:,0], vmin=None, vmax=None, cmap='viridis', interpolation='nearest') # divider = make_axes_locatable(ax1) # cax = divider.append_axes('right', size='5%', pad = 0.05) # fig.colorbar(im1, cax=cax, orientation='vertical') # ax1.set_title("Reference", fontsize=20) # ax1.set_xlim(0, np_real_temp.shape[2]-1) # ax1.set_ylim(0, np_real_temp.shape[1]-1) # ax1.get_xaxis().set_visible(True) # ax1.get_yaxis().set_visible(True) # plt.show(block=True) # force on area m^2 if "pressure" in grid_name: scale_factor = args.zoom * args.zoom # mass on volume m^3 elif "density" in grid_name: scale_factor = args.zoom * args.zoom * args.zoom else: scale_factor = args.zoom np_zoomed = ndimage.zoom(np_real_temp, zoom_mask) * scale_factor copyArrayToGridReal(np_zoomed, grid_zoom) elif grid_type == MantaGridType.TypeInt: assert False, "Not supported" elif grid_type == MantaGridType.TypeVec3 or grid_type == MantaGridType.TypeMAC or grid_type == MantaGridType.TypeMACVec3: copyGridToArrayVec3(grid, np_vec_temp) np_zoomed = ndimage.zoom(np_vec_temp, zoom_mask) * args.zoom copyArrayToGridVec3(np_zoomed, grid_zoom) # save the zoomed grid to a uni file (will later be converted to .npz) grid_zoom.save(output_path + "/" + grid_name + "/" + output_name + ".uni") grid.save(output_path + "/" + grid_name + "_orig/" + output_name + ".uni") else: # Save normally # save the grid to a uni file (will later be converted to .npz) grid.save(output_path + "/" + grid_name + "/" + output_name + ".uni")
nargs='+', help="Custom legend colors to replace the default with.") # Arguments #---------------------------------------------------------------------------- args = parser.parse_args() output_path = "." if args.output: output_path = str(pathlib.Path(args.output).resolve()) else: output_path = find_dir("predictions", 2) predictions_dir = output_path + "/" output_path += "/" + args.comparison_dir + args.name + "/" #output_path = get_uniqe_path(output_path) + "/" output_path += "levelset/" make_dir(output_path) print("Output path: {}".format(output_path)) with open(output_path + "arguments.json", 'w') as f: json.dump(vars(args), f, indent=4) graph_limits = args.y_limits if graph_limits == None or len(graph_limits) != 4: graph_limits = [None] * 4 else: for i, l in enumerate(graph_limits): graph_limits[i] = float(l) #---------------------------------------------------------------------------- #predictions_dir = find_dir("predictions", 2) + "/" assert os.path.exists(predictions_dir), (
#-------------------------------- if not args.no_output: # get the datasets directory, in all datasets should reside if not args.datasets_path: output_path = find_dir("datasets", 2) else: output_path = args.datasets_path assert os.path.exists(output_path), ("Datasets directory {} does not exist".format(output_path)) # set output parent directory name output_path += "/" + args.name if debug: output_path += "_DEBUG" output_path = get_uniqe_path(output_path) # create the directories for grid_name in args.grids: make_dir(output_path + "/" + grid_name) if zoom_resolution: make_dir(output_path + "/" + grid_name + "_orig") if args.gui: make_dir(output_path + "/screenshots/") if args.output_render_data: make_dir(output_path + "/uni/") # keep track of stored scenes stored_scenes_num = 0 print("Output path: {}".format(output_path)) # Dataset description #-------------------------------- description = {} description["version"] = git_version description["creation_date"] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# Data Set #-------------------------------- if not args.datasets_path: dataset_path = find_dir("datasets", 2) else: dataset_path = args.datasets_path assert os.path.exists(dataset_path), ( "Datasets directory {} does not exist".format(dataset_path)) # set output parent directory name dataset_path += "/" + args.dataset dataset_path = dataset_path + "/" if args.video: dataset_video_path = dataset_path + "dataset_display/" make_dir(dataset_video_path) print("Reading data set from: {}".format(dataset_path)) # Description #-------------------------------- dataset_desc = load_description(dataset_path) # Grids #-------------------------------- dataset_grids = [] for grid in args.grids: for grid2 in dataset_desc["grids"]: if grid == grid2: dataset_grids.append(grid) break
args = parser.parse_args() if args.benchmark != -1: if args.name == "": args.name = args.project + "_Bench{}".format(args.benchmark) else: args.name += "_Bench{}".format(args.benchmark) output_path = "." if args.output: output_path = str(pathlib.Path(args.output).resolve()) else: output_path = find_dir("predictions", 2) output_path += "/" + args.name output_path = get_uniqe_path(output_path) + "/" make_dir(output_path) print("Output path: {}".format(output_path)) if args.output_render_data or args.output_benchmark_data: make_dir(output_path + "uni/") if args.output_sequence: output_seq_path = output_path + "sequence/" make_dir(output_seq_path) description = {"args": vars(args)} #---------------------------------------------------------------------------- project_dir = find_dir("projects", 2) + "/" + args.project assert os.path.exists(project_dir), ( "The specified project directory does not exist or could not be found")
def evaluate_scenes(bench): # Dataset Property File #---------------------------------------------------------------------------- dataset_properties = None if args.property_file_path: dataset_properties = load_properties(args.property_file_path) # Handle input directories #---------------------------------------------------------------------------- reference_dir = predictions_dir + args.reference + args.suffix.format( bench) assert os.path.exists(reference_dir), ( "The specified reference directory ({}) does not exist or could not be found" .format(reference_dir)) print(reference_dir) reference_desc = load_description(reference_dir) print(reference_desc) reference_dir += "/uni/" assert os.path.exists(reference_dir), ( "The specified reference directory ({}) does not exist or could not be found" .format(reference_dir)) comparison_dirs = [] comparison_desc = [] for comp in args.comparison: comp_dir = predictions_dir + args.comparison_dir + comp + args.suffix.format( bench) assert os.path.exists(comp_dir), ( "The specified comparison directory ({}) does not exist or could not be found" .format(comp_dir)) comparison_desc.append(load_description(comp_dir)) print(comparison_desc[-1]) comp_dir += "/uni/" print(comp_dir) comparison_dirs.append(comp_dir) # Scene #---------------------------------------------------------------------------- warmup_steps = reference_desc["args"]["warmup_steps"] first_desc_warmup_step = comparison_desc[0]["args"]["warmup_steps"] for desc in comparison_desc: warmup_steps = desc["args"]["warmup_steps"] if desc["args"]["warmup_steps"] != first_desc_warmup_step: if (input("Warmup steps do not match. Continue? y/n") == "n"): return scene = PressureComparisonScene(reference_desc["scene"]["resolution"], reference_desc["scene"]["dimension"]) np_comp = np.empty(shape=[ 1, reference_desc["scene"]["resolution"], reference_desc["scene"]["resolution"], reference_desc["scene"]["resolution"], 1 ], order='C') np_comp_grad = np.empty(shape=[ 1, reference_desc["scene"]["resolution"], reference_desc["scene"]["resolution"], reference_desc["scene"]["resolution"], 3 ], order='C') np_comp_grad_min = 9999999999999999 np_comp_grad_max = -999999999999999 # Main Loop #---------------------------------------------------------------------------- # metrics store sequentially data for each comparison mae = [None] * len(comparison_dirs) mse = [None] * len(comparison_dirs) psnr_grad = [None] * len(comparison_dirs) psnr = [None] * len(comparison_dirs) # loop frame = warmup_steps endFrame = len(glob.glob(reference_dir + "*.uni")) endFrame_compdir = len(glob.glob(comp_dir + "*.uni")) endFrame = min(endFrame, endFrame_compdir) if args.max_frames > 0: endFrame = min(endFrame, frame + args.max_frames) while frame < endFrame: reference_path = reference_dir + args.reference_file_name.format(frame) if os.path.isfile(reference_path): # Store in ref grid scene.reference.load(reference_path) getGradientGrid(scene.reference, scene.pressure_gradient_ref) # comparison field for i, comp_dir in enumerate(comparison_dirs): comp_path = comp_dir + args.comparison_file_name.format(frame) if os.path.isfile(comp_path): # Store in comp grid scene.comparison.load(comp_path) getGradientGrid(scene.comparison, scene.pressure_gradient_comp) # Store Example Images of Predictions if args.output_sequence: pressure_example_dir = output_path + args.comparison[ i] + "/bench_{}/".format(bench) if not os.path.exists(pressure_example_dir): make_dir(pressure_example_dir) # Pressure Storage copyGridToArrayReal(scene.comparison, np_comp) np_selected_slice = np.squeeze( np_comp[0, reference_desc["scene"]["resolution"] // 2])[::-1, ::-1] #print("Comp Shape: {} -> {}".format(np_comp.shape, np_selected_slice.shape)) scipy.misc.toimage( np_selected_slice, cmin=-0.5, cmax=1.5).save(pressure_example_dir + "pressure_{}.png".format(frame)) # Gradient Storage copyGridToArrayVec3_New(scene.pressure_gradient_comp, np_comp_grad) np_selected_slice = np.squeeze(np_comp_grad[ 0, reference_desc["scene"]["resolution"] // 2])[::-1, ::-1] np_comp_grad_min = min(np_comp_grad_min, np_selected_slice.min()) np_comp_grad_max = max(np_comp_grad_max, np_selected_slice.max()) print("Comp Shape: {} -> {}; {} {}".format( np_comp_grad.shape, np_selected_slice.shape, np_comp_grad_min, np_comp_grad_max)) scipy.misc.toimage( np_selected_slice, cmin=-0.2, cmax=0.15).save( pressure_example_dir + "pressure_grad_{}.png".format(frame)) # Calculate metrics # mae mean_abs = meanAbsoluteError(scene.reference, scene.comparison) if mae[i] == None: mae[i] = [mean_abs] else: mae[i].append(mean_abs) # mse mean_squared = meanSquaredError(scene.reference, scene.comparison) if mse[i] == None: mse[i] = [mean_squared] else: mse[i].append(mean_squared) # psnr_grad max_pressure_grad_ref = dataset_properties["pressure"][ "Max"]["Total"] - dataset_properties["pressure"][ "Min"]["Total"] psnr_grad_temp = peakSignalToNoiseRatioVec3( scene.pressure_gradient_ref, scene.pressure_gradient_comp, 0.0, max_pressure_grad_ref) if psnr_grad[i] == None: psnr_grad[i] = [psnr_grad_temp] else: psnr_grad[i].append(psnr_grad_temp) # psnr psnr_temp = peakSignalToNoiseRatio( scene.reference, scene.comparison, dataset_properties["pressure"]["Min"]["Total"], dataset_properties["pressure"]["Max"]["Total"]) if psnr[i] == None: psnr[i] = [psnr_temp] else: psnr[i].append(psnr_temp) #else: # assert False, ("Comparison file ({}) not found".format(comp_path)) frame += 1 return mae, mse, psnr_grad, psnr
# it was already checked if the attribute is present in the scene grid = getattr(scene, grid_name) # save the grid to a npz file grid_io.write_grid(grid, output_path + "/" + grid_name + "_" + output_name) # reset seed -> same starting condition for scene creation -> reproducibility np.random.seed(seed=args.seed) random.seed(args.seed) for scene_num in range(args.num_scenes): # Output dirs, one per simulation #-------------------------------- if not args.no_output: output_path = dataset_path + "/sim" output_path = get_unique_path(output_path) make_dir(output_path) if args.gui: make_dir(output_path + "/screenshots/") # keep track of stored scenes stored_scenes_num = 0 # Dataset description #-------------------------------- description = {} description["version"] = "v0.01" description["creation_date"] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") description["grids"] = grids description["resolution_x"] = args.resolution_x description["resolution_y"] = args.resolution_y description["resolution_z"] = args.resolution_z description.update(vars(args)) # insert args