def inference(args, epoch, data_loader, model, offset=0): model.eval() if args.save_flow or args.render_validation: flow_folder = "{}/inference/{}.epoch-{}-flow-field".format(args.save,args.name.replace('/', '.'),epoch) if not os.path.exists(flow_folder): os.makedirs(flow_folder) args.inference_n_batches = np.inf if args.inference_n_batches < 0 else args.inference_n_batches progress = tqdm(data_loader, ncols=100, total=np.minimum(len(data_loader), args.inference_n_batches), desc='Inferencing ', leave=True, position=offset) statistics = [] total_loss = 0 for batch_idx, (data, target, video_name, frame_id) in enumerate(progress): if args.cuda: data, target = [d.cuda(async=True) for d in data], [t.cuda(async=True) for t in target] data, target = [Variable(d) for d in data], [Variable(t) for t in target] # when ground-truth flows are not available for inference_dataset, # the targets are set to all zeros. thus, losses are actually L1 or L2 norms of compute optical flows, # depending on the type of loss norm passed in with torch.no_grad(): if not args.no_loss: losses, output = model(data[0], target[0], inference=True) else: output = model(data[0], [], inference=True) losses = None if losses is not None: losses = [torch.mean(loss_value) for loss_value in losses] loss_val = losses[0] # Collect first loss for weight update total_loss += loss_val.data[0] loss_values = [v.data[0] for v in losses] # gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather' loss_labels = list(model.module.loss.loss_labels) statistics.append(loss_values) progress.set_description('Inference Averages for Epoch {}: '.format(epoch) + tools.format_dictionary_of_losses(loss_labels, np.array(statistics).mean(axis=0))) progress.update(1) # import IPython; IPython.embed() if args.save_flow or args.render_validation: for i in range(args.inference_batch_size): _pflow = output[i].data.cpu().numpy().transpose(1, 2, 0) # flow_utils.writeFlow( join(flow_folder,'%06d.flo'%(batch_idx * args.inference_batch_size + i)), _pflow) if not os.path.isdir(join(flow_folder, video_name[0][i])): os.mkdir(join(flow_folder, video_name[0][i])) flow_utils.writeFlow( join(flow_folder, video_name[0][i], frame_id[0][i]+'.flo'), _pflow) if batch_idx == (args.inference_n_batches - 1): break progress.close() return
def inference(args, model): model.eval() if args.save_flow: flow_folder = "{}".format(args.save) if not os.path.exists(flow_folder): os.makedirs(flow_folder) input_image_list = glob(args.input_dir + '*.jpg') input_image_list.sort() print(args.input_dir, "len: ", len(input_image_list)) for i in range(0, len(input_image_list) - 1, 2): print("img1: ", input_image_list[i]) print("img2: ", input_image_list[i + 1]) img1 = frame_utils.read_gen(input_image_list[i]) img2 = frame_utils.read_gen(input_image_list[i + 1]) # resize to 512 # inputs of the net are 256/512/1024... img1_in = imresize(img1, (512, 512)) img2_in = imresize(img2, (512, 512)) images = [img1_in, img2_in] images = np.array(images).transpose(3, 0, 1, 2) images = torch.from_numpy(images.astype(np.float32)) images = torch.unsqueeze(images, 0) images = [images] if args.cuda: data = [d.cuda() for d in images] data = [Variable(d) for d in data] with torch.no_grad(): output = model(data[0]) if args.save_flow: _pflow = output[0].data.cpu().numpy().transpose(1, 2, 0) frame_name = input_image_list[i].split('/')[-1] flow_path = join(flow_folder, '{}.flo'.format(frame_name)) print("flow saved as: ", flow_path) flow_utils.writeFlow(flow_path, _pflow) if args.save_img: # and saved as image flow = flow_utils.readFlow(flow_path) if not os.path.exists(flow_folder + '_img'): os.makedirs(flow_folder + '_img') img_path = join(flow_folder + '_img', frame_name) print('img saved as: ', img_path) img = flow_to_image(flow) img = imresize(img, (img1.shape[0], img1.shape[1])) imsave(img_path, img) return
def inference(self, data_loader, model, offset=0,): model.eval() if self.mode == 'flow_estimation': if (self.args.save_flow): flow_folder = self.args.inference_dir if not os.path.exists(flow_folder): os.makedirs(flow_folder) self.args.inference_n_batches = np.inf if self.args.inference_n_batches < 0 else self.args.inference_n_batches progress = tqdm(data_loader, ncols=100, total=np.minimum(len(data_loader), self.args.inference_n_batches), desc='Inferencing ', leave=True, position=offset) for batch_idx, (data, target) in enumerate(progress): if self.args.cuda: data, target = [d.cuda(async=True) for d in data], [t.cuda(async=True) for t in target] data, target = [Variable(d, volatile=True) for d in data], [Variable(t, volatile=True) for t in target] if self.mode == 'flow_estimation': losses, output = model(data[0], target[0], inference=True) if self.args.save_flow: for i in range(self.args.inference_batch_size): _pflow = output[i].data.cpu().numpy().transpose(1, 2, 0) flow_utils.writeFlow(join(flow_folder, '%06d.flo' % (batch_idx * self.args.inference_batch_size + i)), _pflow) if self.flow_visualize: flowX = _pflow[:, :, 0] plt.imshow(flowX) plt.savefig(fname= join(flow_folder, '%06d_x.png' % (batch_idx * self.args.inference_batch_size + i))) flowY = _pflow[:, :, 1] plt.imshow(flowY) plt.savefig( fname=join(flow_folder, '%06d_y.png' % (batch_idx * self.args.inference_batch_size + i))) elif self.mode == 'warping': warped_data, losses = model(data[0], target[0]) for i in range(self.args.inference_batch_size): warped_data = warped_data[i].data.cpu().numpy().transpose(1, 2, 0) misc.imsave('warped_image' + str(batch_idx) + '.png', warped_data) progress.update(1) progress.close() return
def inference(args, epoch, data_loader, model, loss, offset=0): model.eval() if args.save_flow or args.render_validation: flow_folder = "{}/{}.epoch-{}-flow-field".format( args.inference_dir, args.name.replace('/', '.'), epoch) if not os.path.exists(flow_folder): os.makedirs(flow_folder) gpu_mem = tools.gpumemusage() args.inference_n_batches = np.inf if args.inference_n_batches < 0 else args.inference_n_batches progress = tqdm(data_loader, ncols=100, total=np.minimum(len(data_loader), args.inference_n_batches), desc='Inferencing %s' % (gpu_mem), leave=True, position=offset) statistics = [] total_loss = 0 for batch_idx, (data, target) in enumerate(progress): if args.cuda: data, target = [d.cuda(async=True) for d in data ], [t.cuda(async=True) for t in target] data, target = [Variable(d, volatile=True) for d in data ], [Variable(t, volatile=True) for t in target] output = [model(data[0])] if len(target) == 0: target = [output[0]] loss_labels, loss_values = loss(output[0], target[0]) loss_val = loss_values[0] total_loss += loss_val.data[0] statistics.append([v.data[0] for v in loss_values]) _pflow = output[0].data.cpu().numpy().transpose(0, 2, 3, 1) if args.save_flow or args.render_validation: for i in range(args.inference_batch_size): flow_utils.writeFlow( join( flow_folder, '%06d.flo' % (batch_idx * args.inference_batch_size + i)), _pflow[i]) progress.set_description('Inference {} Averages for Epoch {}: '. format(tools.gpumemusage(), epoch) + tools.format_dictionary_of_losses( loss_labels, np.array(statistics).mean(axis=0))) progress.update(1) if batch_idx == (args.inference_n_batches - 1): break progress.close() return
def inference(args, epoch, data_path, data_loader, model, offset=0): model.eval() if args.save_flow or args.render_validation: flow_folder = "{}/flo".format(data_path) flow_back_folder = "{}/flo_back".format(data_path) if not os.path.exists(flow_folder): os.makedirs(flow_folder) if not os.path.exists(flow_back_folder): os.makedirs(flow_back_folder) # visualization folder if args.inference_visualize: flow_vis_folder = "{}/flo_vis".format(data_path) if not os.path.exists(flow_vis_folder): os.makedirs(flow_vis_folder) flow_back_vis_folder = "{}/flo_back_vis".format(data_path) if not os.path.exists(flow_back_vis_folder): os.makedirs(flow_back_vis_folder) args.inference_n_batches = np.inf if args.inference_n_batches < 0 else args.inference_n_batches progress = tqdm(data_loader, ncols=100, total=np.minimum(len(data_loader), args.inference_n_batches), desc='Inferencing ', leave=True, position=offset) for batch_idx, (data) in enumerate(progress): data = data[0] data_back = torch.cat((data[:,:,1:,:,:], data[:,:,:1,:,:]), dim = 2) if args.cuda: data_forward = data.cuda(non_blocking=True) data_back = data_back.cuda(non_blocking=True) data_forward = Variable(data_forward) data_back = Variable(data_back) flo_path = join(flow_folder, '%06d.flo'%(batch_idx)) flo_back_path = join(flow_back_folder, '%06d.flo'%(batch_idx)) frame_size = data_loader.dataset.frame_size if not os.path.exists(flo_path): with torch.no_grad(): output = model(data_forward)[:,:,:frame_size[0], :frame_size[1]] if args.save_flow or args.render_validation: _pflow = output[0].data.cpu().numpy().transpose(1, 2, 0) flow_utils.writeFlow( flo_path, _pflow) if args.inference_visualize: flow_utils.visulize_flow_file( join(flow_folder, '%06d.flo' % (batch_idx)),flow_vis_folder) if not os.path.exists(flo_back_path): with torch.no_grad(): output = model(data_back)[:,:,:frame_size[0], :frame_size[1]] if args.save_flow or args.render_validation: _pflow = output[0].data.cpu().numpy().transpose(1, 2, 0) flow_utils.writeFlow( flo_back_path, _pflow) if args.inference_visualize: flow_utils.visulize_flow_file( join(flow_back_folder, '%06d.flo' % (batch_idx)), flow_back_vis_folder) progress.update(1) if batch_idx == (args.inference_n_batches - 1): break progress.close() return
def inference(args, epoch, data_loader, logger, model, offset=0): model.eval() if args.save_flow or args.render_validation: flow_folder = "{}/{}.epoch-{}-flow-field".format( args.inference_dir, args.name.replace('/', '.'), epoch) rendered_flow_folder = "{}/{}.epoch-{}-rendered-flow-field".format( args.inference_dir, args.name.replace('/', '.'), epoch) if not os.path.exists(flow_folder): os.makedirs(flow_folder) if not os.path.exists(rendered_flow_folder): os.makedirs(rendered_flow_folder) args.inference_n_batches = np.inf if args.inference_n_batches < 0 else args.inference_n_batches progress = tqdm(data_loader, ncols=100, total=np.minimum(len(data_loader), args.inference_n_batches), desc='Inferencing ', leave=True, position=offset) statistics = [] total_loss = 0 for batch_idx, (data, target) in enumerate(progress): if args.cuda: data, target = [d.cuda(async=True) for d in data ], [t.cuda(async=True) for t in target] data, target = [Variable(d, volatile=True) for d in data ], [Variable(t, volatile=True) for t in target] # when ground-truth flows are not available for inference_dataset, # the targets are set to all zeros. thus, losses are actually L1 or L2 norms of compute optical flows, # depending on the type of loss norm passed in losses, output = model(data[0], target[0], inference=True) losses = [torch.mean(loss_value) for loss_value in losses] loss_val = losses[0] # Collect first loss for weight update total_loss += loss_val.data[0] loss_values = [v.data[0] for v in losses] # gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather' loss_labels = list(model.module.loss.loss_labels) statistics.append(loss_values) # import IPython; IPython.embed() if args.save_flow or args.render_validation: for i in range(args.inference_batch_size): _pflow = output[i].data.cpu().numpy().transpose(1, 2, 0) ground_truth = target[0][i].data.cpu().numpy().transpose( 1, 2, 0) render_img = tools.flow_to_image(_pflow).transpose(2, 0, 1) true_img = tools.flow_to_image(ground_truth).transpose( 2, 0, 1) render_img = torch.Tensor(render_img) / 255.0 true_img = torch.Tensor(true_img) / 255.0 input_img = data[0][i, :, 0, :, :].data.cpu() / 255.0 logger.add_image('renderimg', torchvision.utils.make_grid(render_img), batch_idx * args.inference_batch_size + i) logger.add_image('ground_truth', torchvision.utils.make_grid(true_img), batch_idx * args.inference_batch_size + i) logger.add_image('input_img', torchvision.utils.make_grid(input_img), batch_idx * args.inference_batch_size + i) if args.save_flow: scipy.misc.imsave( join( rendered_flow_folder, '%06d.png' % (batch_idx * args.inference_batch_size + i)), render_img.numpy().transpose(1, 2, 0)) flow_utils.writeFlow( join( flow_folder, '%06d.flo' % (batch_idx * args.inference_batch_size + i)), _pflow) progress.set_description( 'Inference Averages for Epoch {}: '.format(epoch) + tools.format_dictionary_of_losses( loss_labels, np.array(statistics).mean(axis=0))) progress.update(1) if batch_idx == (args.inference_n_batches - 1): break progress.close() return
def inference(args, epoch, data_loader, model, offset=0): model.eval() if args.save_flow or args.render_validation: flow_folder = "{}/inference/{}.epoch-{}-flow-field".format(args.save, args.name.replace("/", "."), epoch) if not os.path.exists(flow_folder): os.makedirs(flow_folder) # visualization folder if args.inference_visualize: flow_vis_folder = "{}/inference/{}.epoch-{}-flow-vis".format(args.save, args.name.replace("/", "."), epoch) if not os.path.exists(flow_vis_folder): os.makedirs(flow_vis_folder) args.inference_n_batches = np.inf if args.inference_n_batches < 0 else args.inference_n_batches progress = tqdm( data_loader, ncols=100, total=np.minimum(len(data_loader), args.inference_n_batches), desc="Inferencing ", leave=True, position=offset, ) statistics = [] total_loss = 0 for batch_idx, (data, target) in enumerate(progress): if args.cuda: data, target = [d.cuda(non_blocking=True) for d in data], [t.cuda(non_blocking=True) for t in target] data, target = [Variable(d) for d in data], [Variable(t) for t in target] # when ground-truth flows are not available for inference_dataset, # the targets are set to all zeros. thus, losses are actually L1 or L2 norms of compute optical flows, # depending on the type of loss norm passed in with torch.no_grad(): losses, output = model(data[0], target[0], inference=True) losses = [torch.mean(loss_value) for loss_value in losses] loss_val = losses[0] # Collect first loss for weight update total_loss += loss_val.item() loss_values = [v.item() for v in losses] # gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather' loss_labels = list(model.module.loss.loss_labels) statistics.append(loss_values) # import IPython; IPython.embed() if args.save_flow or args.render_validation: for i in range(args.inference_batch_size): _pflow = output[i].data.cpu().numpy().transpose(1, 2, 0) flow_utils.writeFlow( join(flow_folder, "%06d.flo" % (batch_idx * args.inference_batch_size + i)), _pflow ) # You can comment out the plt block in visulize_flow_file() for real-time visualization if args.inference_visualize: flow_utils.visulize_flow_file( join(flow_folder, "%06d.flo" % (batch_idx * args.inference_batch_size + i)), flow_vis_folder, ) progress.set_description( "Inference Averages for Epoch {}: ".format(epoch) + tools.format_dictionary_of_losses(loss_labels, np.array(statistics).mean(axis=0)) ) progress.update(1) if batch_idx == (args.inference_n_batches - 1): break progress.close() return
def inference(args, epoch, data_loader, model, offset=0): model.eval() if args.save_flow or args.render_validation: flow_folder = "{}/inference/{}.epoch-{}-flow-field".format( args.save, args.name.replace('/', '.'), epoch) if not os.path.exists(flow_folder): os.makedirs(flow_folder) # visualization folder if args.inference_visualize: flow_vis_folder = "{}/inference/{}.epoch-{}-flow-vis".format( args.save, args.name.replace('/', '.'), epoch) if not os.path.exists(flow_vis_folder): os.makedirs(flow_vis_folder) if args.save_frames or args.save_inferenceLog: inference_folder = "{}/{}.epoch-{}".format( args.save, args.name.replace('/', '.'), epoch) if not os.path.exists(inference_folder): os.makedirs(inference_folder) args.inference_n_batches = np.inf if args.inference_n_batches < 0 else args.inference_n_batches progress = tqdm(data_loader, ncols=100, total=np.minimum(len(data_loader), args.inference_n_batches), desc='Inferencing ', leave=True, position=offset) print('[LOG] We assume that "inference_batch_size" arg is always 1') if data_loader.dataset.ref_names == None: f_names = [f'{f_idx:06d}.png' for f_idx in range(len(data_loader))] else: f_names = data_loader.dataset.ref_names if args.save_inferenceLog: log_labels = ['filename'] + list(model.module.loss.loss_labels) log_dict = {l: {} for l in log_labels} for i in range(len(data_loader)): log_dict['filename'][i] = f_names[i] statistics = [] total_loss = 0 for batch_idx, (data, target) in enumerate(progress): if args.cuda: data, target = [d.cuda(non_blocking=True) for d in data ], [t.cuda(non_blocking=True) for t in target] data, target = [Variable(d) for d in data], [Variable(t) for t in target] # when ground-truth flows are not available for inference_dataset, # the targets are set to all zeros. thus, losses are actually L1 or L2 norms of compute optical flows, # depending on the type of loss norm passed in with torch.no_grad(): pred_losses, output = model(data[0], target[0], inference=True) losses = [torch.mean(loss_value) for loss_value in pred_losses] loss_val = losses[0] # Collect first loss for weight update total_loss += loss_val.item() loss_values = [v.item() for v in losses] # gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather' loss_labels = list(model.module.loss.loss_labels) statistics.append(loss_values) # import IPython; IPython.embed() if args.save_flow or args.render_validation: for i in range(args.inference_batch_size): _pflow = output[i].data.cpu().numpy().transpose(1, 2, 0) flow_utils.writeFlow( join( flow_folder, '%06d.flo' % (batch_idx * args.inference_batch_size + i)), _pflow) # You can comment out the plt block in visulize_flow_file() for real-time visualization if args.inference_visualize: flow_utils.visulize_flow_file( join( flow_folder, '%06d.flo' % (batch_idx * args.inference_batch_size + i)), flow_vis_folder) if args.save_frames: from PIL import Image _pframe = output[0].data.cpu().numpy().transpose(1, 2, 0) _pframe = (_pframe).clip(min=0, max=255).astype(np.uint8) f_name = f_names[batch_idx] png_data = Image.fromarray(_pframe) png_data.save(f'{inference_folder}/{f_name}') if args.save_inferenceLog: for label, loss in zip(loss_labels, pred_losses): log_dict[label][batch_idx] = str(loss.cpu().numpy()) progress.set_description( 'Inference Averages for Epoch {}: '.format(epoch) + tools.format_dictionary_of_losses( loss_labels, np.array(statistics).mean(axis=0))) progress.update(1) if batch_idx == (args.inference_n_batches - 1): break progress.close() if args.save_inferenceLog: import json with open(f'{inference_folder}/log.json', 'w') as fp: json.dump(log_dict, fp, sort_keys=True, indent=4) return
], [t.cuda(async=True) for t in target] data, target = [Variable(d) for d in data], [Variable(t) for t in target] # Actual forward pass through the network with torch.no_grad(): output = model(data[0]) # Saving the outputs for example_idx in range(output.shape[0]): flow_single = output[example_idx].data.cpu().numpy().transpose( 1, 2, 0) output_file = os.path.join( flow_subdir_out, '{:06d}'.format(batch_idx * args.batch_size + example_idx)) if args.flow_output_type == 'flo': output_file += '.flo' flow_utils.writeFlow(output_file, flow_single) else: output_file += '.jpg' minmax_curr = flow_utils.writeFlowJPEG( output_file, flow_single) minmax_arr.append(minmax_curr) # After every batch, save all the min/max values to disk minmax_file = os.path.join(flow_subdir_out, "minmax_values.npy") np.save(minmax_file, np.asarray(minmax_arr, np.float32)) print('#' * 60)
def inference(args, data_loader, model, offset=0): model.eval() if args.save_flow or args.render_validation: flow_folder = out_path # "./output/flo_rev" if args.reverse else "./output/flo" if not os.path.exists(flow_folder): os.makedirs(flow_folder) # visualization folder if args.inference_visualize: flow_vis_folder = out_path + "/" + "png/" if not os.path.exists(flow_vis_folder): os.makedirs(flow_vis_folder) args.inference_n_batches = np.inf if args.inference_n_batches < 0 else args.inference_n_batches progress = tqdm(data_loader, ncols=100, total=np.minimum(len(data_loader), args.inference_n_batches), desc='Inferencing ', leave=True, position=offset) statistics = [] total_loss = 0 ph, pw = inference_dataset.ph, inference_dataset.pw for batch_idx, (data, target) in enumerate(progress): if args.cuda: data, target = [d.cuda(non_blocking=True) for d in data ], [t.cuda(non_blocking=True) for t in target] data, target = [Variable(d) for d in data], [Variable(t) for t in target] # when ground-truth flows are not available for inference_dataset, # the targets are set to all zeros. thus, losses are actually L1 or L2 norms of compute optical flows, # depending on the type of loss norm passed in with torch.no_grad(): losses, output = model(data[0], target[0], inference=True) losses = [torch.mean(loss_value) for loss_value in losses] loss_val = losses[0] # Collect first loss for weight update total_loss += loss_val.item() loss_values = [v.item() for v in losses] statistics.append(loss_values) # import IPython; IPython.embed() if args.save_flow or args.render_validation: for i in range(args.inference_batch_size): _pflow = output[i].data.cpu().numpy().transpose(1, 2, 0) if ph != 0: _pflow = _pflow[ph:-ph, :, :] if pw != 0: _pflow = _pflow[:, pw:-pw, :] flow_utils.writeFlow( join( flow_folder, '%06d.flo' % (batch_idx * args.inference_batch_size + i)), _pflow) # You can comment out the plt block in visulize_flow_file() for real-time visualization if args.inference_visualize: flow_utils.visulize_flow_file( join( flow_folder, '%06d.flo' % (batch_idx * args.inference_batch_size + i)), flow_vis_folder) progress.update(1) if batch_idx == (args.inference_n_batches - 1): break progress.close() return
def inference(args, epoch, data_loader, model, offset=0): model.eval() if args.save_flow or args.render_validation: flow_folder = "{}/inference/{}.epoch-{}-flow-field".format( args.save, args.name.replace('/', '.'), epoch) if not os.path.exists(flow_folder): os.makedirs(flow_folder) # visualization folder if args.inference_visualize: flow_vis_folder = "{}/inference/{}.epoch-{}-flow-vis".format( args.save, args.name.replace('/', '.'), epoch) if not os.path.exists(flow_vis_folder): os.makedirs(flow_vis_folder) args.inference_n_batches = np.inf if args.inference_n_batches < 0 else args.inference_n_batches progress = tqdm(data_loader, ncols=200, total=np.minimum(len(data_loader), args.inference_n_batches), desc='Inferencing ', leave=True, position=offset) statistics = [] total_loss = 0 for batch_idx, (data, target) in enumerate(progress): if args.cuda: data, target = [d.cuda(non_blocking=True) for d in data ], [t.cuda(non_blocking=True) for t in target] data, target = [Variable(d) for d in data], [Variable(t) for t in target] # when ground-truth flows are not available for inference_dataset, # the targets are set to all zeros. thus, losses are actually L1 or L2 norms of compute optical flows, # depending on the type of loss norm passed in with torch.no_grad(): losses, output = model(data[0], target[0], inference=True) losses = [torch.mean(loss_value) for loss_value in losses] loss_val = losses[0] # Collect first loss for weight update total_loss += loss_val.item() loss_values = [v.item() for v in losses] # gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather' loss_labels = list(model.module.loss.loss_labels) statistics.append(loss_values) # import IPython; IPython.embed() if args.save_flow or args.render_validation: for i in range(args.inference_batch_size): _pflow_all = output[i].data.cpu().numpy().transpose( 1, 2, 0) _tflow_all = target[0][i].data.cpu().numpy() if len(_tflow_all.shape) == 4: _tflow_all = _tflow_all.transpose(1, 2, 3, 0) elif len(_tflow_all.shape) == 3: _tflow_all = _tflow_all.transpose(1, 2, 0) else: ValueError('Unsupported dimensions of _tflow_all') for j in range(0, output.shape[1], 2): _pflow = _pflow_all[:, :, j:j + 2] if len(_tflow_all.shape) == 4: _tflow = _tflow_all[int(j / 2), :, :, :] elif len(_tflow_all.shape) == 3: _tflow = _tflow_all else: ValueError('Unsupported dimensions of _tflow_all') flow_filename_base = '%06d_%06d' % ( batch_idx * args.inference_batch_size + i, int(j / 2)) flow_utils.writeFlow( join(flow_folder, flow_filename_base) + '.flo', _pflow) # You can comment out the plt block in visulize_flow_file() for real-time visualization # if args.inference_visualize: # flow_utils.visulize_flow_file( # join(flow_folder, '%06d.flo' % (batch_idx * args.inference_batch_size + i)),flow_vis_folder) flow_utils.writeFlow( join(flow_folder, flow_filename_base + '_target.flo'), _tflow) # You can comment out the plt block in visulize_flow_file() for real-time visualization if args.inference_visualize: # flow_utils.visulize_flow_file_and_target( # join(flow_folder, '%06d.flo' % (batch_idx * args.inference_batch_size + i)), # join(flow_folder, '%06d_target.flo' % (batch_idx * args.inference_batch_size + i)), # flow_vis_folder) results_image = visualize_results( _pflow, _tflow, data[0][i]) cv2.imwrite( join(flow_vis_folder, flow_filename_base + '_vis.png'), cv2.cvtColor(results_image, cv2.COLOR_RGB2BGR)) progress.set_description( 'Inference Averages for Epoch {}: '.format(epoch) + tools.format_dictionary_of_losses( loss_labels, np.array(statistics).mean(axis=0))) progress.update(1) if batch_idx == (args.inference_n_batches - 1): break progress.close() return