################################################### ## test_summaries_img = [0.0]*len(ind_img) # datasets_img) disp_out = np.empty((WIDTH * HEIGHT), dtype=np.float32) dbg_cost_nw = np.empty((WIDTH * HEIGHT), dtype=np.float32) dbg_cost_w = np.empty((WIDTH * HEIGHT), dtype=np.float32) dbg_d = np.empty((WIDTH * HEIGHT), dtype=np.float32) dbg_avg_disparity = np.empty((WIDTH * HEIGHT), dtype=np.float32) dbg_gt_disparity = np.empty((WIDTH * HEIGHT), dtype=np.float32) dbg_offs = np.empty((WIDTH * HEIGHT), dtype=np.float32) for ntest in ind_img: # datasets_img): dataset_img = qsf.readImageData(image_data=image_data, files=files, indx=ntest, cluster_radius=CLUSTER_RADIUS, tile_layers=TILE_LAYERS, tile_side=TILE_SIDE, width=IMG_WIDTH, replace_nans=True) sess.run(iterator_tt.initializer, feed_dict={ corr2d_train_placeholder: dataset_img['corr2d'], target_disparity_train_placeholder: dataset_img['target_disparity'], gt_ds_train_placeholder: dataset_img['gt_ds'] }) for start_offs in range(0, disp_out.shape[0], BATCH_SIZE): end_offs = min(start_offs + BATCH_SIZE, disp_out.shape[0])
merged = tf.summary.merge_all() writer = tf.summary.FileWriter(ROOT_PATH, sess.graph) lf = None if LOGPATH: lf=open(LOGPATH,"w") #overwrite previous (or make it "a"? for nimg,_ in enumerate(image_data): dataset_img = qsf.readImageData( image_data = image_data, files = files, indx = nimg, cluster_radius = 0, # CLUSTER_RADIUS, tile_layers = TILE_LAYERS, tile_side = TILE_SIDE, width = IMG_WIDTH, replace_nans = True, infer = True, keep_gt = True) # to generate same output files img_corr2d = dataset_img['corr2d'] # (?,324) img_target = dataset_img['target_disparity'] # (?,1) img_ntile = dataset_img['ntile'].reshape([-1]) # (?) - 0...78k int32 #run first stage network qsf.print_time("Running inferred model, stage1", end=" ") _ = sess.run([stage1done], feed_dict={ph_corr2d: img_corr2d, ph_target_disparity: img_target, ph_ntile: img_ntile }) qsf.print_time("Done.")