Beispiel #1
0
        def inference(args, epoch, data_loader, model, offset=0):
            model.eval()
            if args.save_flow or args.render_validation:
                flow_folder = "{}/inference/{}.epoch-{}-flow-field".format(args.save, args.name.replace('/', '.'),
                                                                           epoch)
                if not os.path.exists(flow_folder):
                    os.makedirs(flow_folder)

            # visualization folder
            if args.inference_visualize:
                flow_vis_folder = "{}/inference/{}.epoch-{}-flow-vis".format(args.save, args.name.replace('/', '.'),
                                                                             epoch)
                if not os.path.exists(flow_vis_folder):
                    os.makedirs(flow_vis_folder)

            args.inference_n_batches = np.inf if args.inference_n_batches < 0 else args.inference_n_batches

            progress = tqdm(data_loader, ncols=100, total=np.minimum(len(data_loader), args.inference_n_batches),
                            desc='Inferencing ',
                            leave=True, position=offset)

            statistics = []
            total_loss = 0

            for batch_idx, (data, target, city_name, video_name, image_list) in enumerate(progress):
                city_name = city_name[0]
                video_name = video_name[0]
            
                #print('cur ',image_list[0][0],' ', image_list[1][0])
                #print('city name = ', city_name)
               # print('video name = ', video_name)
                #if batch_idx == 0:
                 #   name = []
                  #  for i in filename:
                   #     name.append(i[0])
                        #print('st: ',i)
                #print('name = ', name)
                #if name[batch_idx] == '':
                 #   continue
                if args.cuda:
                    data, target = [d.cuda(non_blocking=True) for d in data], [t.cuda(non_blocking=True) for t in
                                                                               target]
                data, target = [Variable(d) for d in data], [
                    Variable(t) for t in target]

                # when ground-truth flows are not available for inference_dataset,
                # the targets are set to all zeros. thus, losses are actually L1 or L2 norms of compute optical flows,
                # depending on the type of loss norm passed in
                with torch.no_grad():
                    losses, output = model(data[0], target[0], inference=True)

                losses = [torch.mean(loss_value) for loss_value in losses]
                loss_val = losses[0]  # Collect first loss for weight update
                total_loss += loss_val.item()
                loss_values = [v.item() for v in losses]

                # gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather'
                loss_labels = list(model.module.loss.loss_labels)

                statistics.append(loss_values)
                # import IPython; IPython.embed()
                if args.save_flow or args.render_validation:
                    for i in range(args.inference_batch_size):
                        _pflow = output[i].data.cpu(
                        ).numpy().transpose(1, 2, 0)
                        # print(_pflow.shape)
                        img0 = scipy.misc.toimage(_pflow[:, :, 0])
                        img1 = scipy.misc.toimage(_pflow[:, :, 1])

                        #id = name[batch_idx][15:-4]
                        id = image_list[1][0].split('/')[-1]
                        #print('Saving : ', id)
                        if not os.path.exists(crop_image_path + '/horizontal/' + city_name):
                            os.makedirs(crop_image_path +
                                        '/horizontal/' + city_name)
                        if not os.path.exists(crop_image_path + '/vertical/' + city_name):
                            os.makedirs(crop_image_path +
                                        '/vertical/' + city_name)
                        if not os.path.exists(crop_image_path + '/horizontal/' + city_name + '/' + video_name):
                            os.makedirs(
                                crop_image_path + '/horizontal/' + city_name + '/' + video_name)
                        if not os.path.exists(crop_image_path + '/vertical/' + city_name + '/' + video_name):
                            os.makedirs(
                                crop_image_path + '/vertical/' + city_name + '/' + video_name)
                        #block.log('?????? %s '%(name[batch_idx * args.effective_inference_batch_size + i]))
                        img0.save(
                            crop_image_path + '/horizontal/' + city_name + '/' + video_name + '/' + id)
                        img1.save(crop_image_path + '/vertical/' + city_name +
                                  '/' + video_name + '/' + id)
                        # img0.save(
                        #     '/home/wangsen/flownet2_testpic/optical_flow/horizontal/' + str(id) + '_' + str(
                        #         batch_idx) + '.png')
                        # img1.save(
                        #     '/home/wangsen/flownet2_testpic/optical_flow/vertical/' + str(id) + '_' + str(
                        #         batch_idx) + '.png')
                        #flow_utils.writeFlow(join(flow_folder, '%06d.flo' % (batch_idx * args.effective_inference_batch_size + i)),
                         #                     _pflow)

                        # You can comment out the plt block in visulize_flow_file() for real-time visualization
                        if args.inference_visualize:
                            flow_utils.visulize_flow_file(
                                join(flow_folder, '%06d.flo' %
                                     (batch_idx * args.inference_batch_size + i)),
                                flow_vis_folder)

                progress.set_description(
                    'Inference Averages for Epoch {}: '.format(epoch) + tools.format_dictionary_of_losses(loss_labels,
                                                                                                          np.array(
                                                                                                              statistics).mean(
                                                                                                              axis=0)))
                progress.update(1)

                if batch_idx == (args.inference_n_batches - 1):
                    break

            progress.close()

            return
def inference(args, epoch, data_path, data_loader, model, offset=0):

    model.eval()
    
    if args.save_flow or args.render_validation:
        flow_folder = "{}/flo".format(data_path)
        flow_back_folder = "{}/flo_back".format(data_path)
        if not os.path.exists(flow_folder):
            os.makedirs(flow_folder)
        if not os.path.exists(flow_back_folder):
            os.makedirs(flow_back_folder)
    
    # visualization folder
    if args.inference_visualize:
        flow_vis_folder = "{}/flo_vis".format(data_path)
        if not os.path.exists(flow_vis_folder):
            os.makedirs(flow_vis_folder)
        flow_back_vis_folder = "{}/flo_back_vis".format(data_path)
        if not os.path.exists(flow_back_vis_folder):
            os.makedirs(flow_back_vis_folder)
    
    args.inference_n_batches = np.inf if args.inference_n_batches < 0 else args.inference_n_batches

    progress = tqdm(data_loader, ncols=100, total=np.minimum(len(data_loader), args.inference_n_batches), desc='Inferencing ', 
        leave=True, position=offset)

    for batch_idx, (data) in enumerate(progress):
        data = data[0]
        data_back = torch.cat((data[:,:,1:,:,:], data[:,:,:1,:,:]), dim = 2)
        if args.cuda:
            data_forward = data.cuda(non_blocking=True)
            data_back = data_back.cuda(non_blocking=True)
        data_forward = Variable(data_forward)
        data_back = Variable(data_back)

        flo_path = join(flow_folder, '%06d.flo'%(batch_idx))
        flo_back_path = join(flow_back_folder, '%06d.flo'%(batch_idx))
        frame_size = data_loader.dataset.frame_size
        if not os.path.exists(flo_path):
            with torch.no_grad():
                output = model(data_forward)[:,:,:frame_size[0], :frame_size[1]]
            if args.save_flow or args.render_validation:
                _pflow = output[0].data.cpu().numpy().transpose(1, 2, 0)
                flow_utils.writeFlow( flo_path,  _pflow)
                if args.inference_visualize:
                    flow_utils.visulize_flow_file(
                        join(flow_folder, '%06d.flo' % (batch_idx)),flow_vis_folder)

        if not os.path.exists(flo_back_path):
            with torch.no_grad():
                output = model(data_back)[:,:,:frame_size[0], :frame_size[1]]
            if args.save_flow or args.render_validation:
                _pflow = output[0].data.cpu().numpy().transpose(1, 2, 0)
                flow_utils.writeFlow( flo_back_path,  _pflow)
                if args.inference_visualize:
                    flow_utils.visulize_flow_file(
                        join(flow_back_folder, '%06d.flo' % (batch_idx)), flow_back_vis_folder)
                
        progress.update(1)

        if batch_idx == (args.inference_n_batches - 1):
            break
    progress.close()
    return
    def inference(args, epoch, data_loader, model, offset=0):

        model.eval()

        if args.save_flow or args.render_validation:
            flow_folder = "{}/inference/{}.epoch-{}-flow-field".format(args.save, args.name.replace("/", "."), epoch)
            if not os.path.exists(flow_folder):
                os.makedirs(flow_folder)

        # visualization folder
        if args.inference_visualize:
            flow_vis_folder = "{}/inference/{}.epoch-{}-flow-vis".format(args.save, args.name.replace("/", "."), epoch)
            if not os.path.exists(flow_vis_folder):
                os.makedirs(flow_vis_folder)

        args.inference_n_batches = np.inf if args.inference_n_batches < 0 else args.inference_n_batches

        progress = tqdm(
            data_loader,
            ncols=100,
            total=np.minimum(len(data_loader), args.inference_n_batches),
            desc="Inferencing ",
            leave=True,
            position=offset,
        )

        statistics = []
        total_loss = 0
        for batch_idx, (data, target) in enumerate(progress):
            if args.cuda:
                data, target = [d.cuda(non_blocking=True) for d in data], [t.cuda(non_blocking=True) for t in target]
            data, target = [Variable(d) for d in data], [Variable(t) for t in target]

            # when ground-truth flows are not available for inference_dataset,
            # the targets are set to all zeros. thus, losses are actually L1 or L2 norms of compute optical flows,
            # depending on the type of loss norm passed in
            with torch.no_grad():
                losses, output = model(data[0], target[0], inference=True)

            losses = [torch.mean(loss_value) for loss_value in losses]
            loss_val = losses[0]  # Collect first loss for weight update
            total_loss += loss_val.item()
            loss_values = [v.item() for v in losses]

            # gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather'
            loss_labels = list(model.module.loss.loss_labels)

            statistics.append(loss_values)
            # import IPython; IPython.embed()
            if args.save_flow or args.render_validation:
                for i in range(args.inference_batch_size):
                    _pflow = output[i].data.cpu().numpy().transpose(1, 2, 0)
                    flow_utils.writeFlow(
                        join(flow_folder, "%06d.flo" % (batch_idx * args.inference_batch_size + i)), _pflow
                    )

                    # You can comment out the plt block in visulize_flow_file() for real-time visualization
                    if args.inference_visualize:
                        flow_utils.visulize_flow_file(
                            join(flow_folder, "%06d.flo" % (batch_idx * args.inference_batch_size + i)),
                            flow_vis_folder,
                        )

            progress.set_description(
                "Inference Averages for Epoch {}: ".format(epoch)
                + tools.format_dictionary_of_losses(loss_labels, np.array(statistics).mean(axis=0))
            )
            progress.update(1)

            if batch_idx == (args.inference_n_batches - 1):
                break

        progress.close()

        return
Beispiel #4
0
    def inference(args, epoch, data_loader, model, offset=0):

        model.eval()

        if args.save_flow or args.render_validation:
            flow_folder = "{}/inference/{}.epoch-{}-flow-field".format(
                args.save, args.name.replace('/', '.'), epoch)
            if not os.path.exists(flow_folder):
                os.makedirs(flow_folder)

        # visualization folder
        if args.inference_visualize:
            flow_vis_folder = "{}/inference/{}.epoch-{}-flow-vis".format(
                args.save, args.name.replace('/', '.'), epoch)
            if not os.path.exists(flow_vis_folder):
                os.makedirs(flow_vis_folder)

        if args.save_frames or args.save_inferenceLog:
            inference_folder = "{}/{}.epoch-{}".format(
                args.save, args.name.replace('/', '.'), epoch)
            if not os.path.exists(inference_folder):
                os.makedirs(inference_folder)

        args.inference_n_batches = np.inf if args.inference_n_batches < 0 else args.inference_n_batches

        progress = tqdm(data_loader,
                        ncols=100,
                        total=np.minimum(len(data_loader),
                                         args.inference_n_batches),
                        desc='Inferencing ',
                        leave=True,
                        position=offset)

        print('[LOG] We assume that "inference_batch_size" arg is always 1')
        if data_loader.dataset.ref_names == None:
            f_names = [f'{f_idx:06d}.png' for f_idx in range(len(data_loader))]
        else:
            f_names = data_loader.dataset.ref_names

        if args.save_inferenceLog:
            log_labels = ['filename'] + list(model.module.loss.loss_labels)
            log_dict = {l: {} for l in log_labels}
            for i in range(len(data_loader)):
                log_dict['filename'][i] = f_names[i]

        statistics = []
        total_loss = 0
        for batch_idx, (data, target) in enumerate(progress):
            if args.cuda:
                data, target = [d.cuda(non_blocking=True) for d in data
                                ], [t.cuda(non_blocking=True) for t in target]
            data, target = [Variable(d)
                            for d in data], [Variable(t) for t in target]

            # when ground-truth flows are not available for inference_dataset,
            # the targets are set to all zeros. thus, losses are actually L1 or L2 norms of compute optical flows,
            # depending on the type of loss norm passed in
            with torch.no_grad():
                pred_losses, output = model(data[0], target[0], inference=True)

            losses = [torch.mean(loss_value) for loss_value in pred_losses]
            loss_val = losses[0]  # Collect first loss for weight update
            total_loss += loss_val.item()
            loss_values = [v.item() for v in losses]

            # gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather'
            loss_labels = list(model.module.loss.loss_labels)

            statistics.append(loss_values)
            # import IPython; IPython.embed()
            if args.save_flow or args.render_validation:
                for i in range(args.inference_batch_size):
                    _pflow = output[i].data.cpu().numpy().transpose(1, 2, 0)
                    flow_utils.writeFlow(
                        join(
                            flow_folder, '%06d.flo' %
                            (batch_idx * args.inference_batch_size + i)),
                        _pflow)

                    # You can comment out the plt block in visulize_flow_file() for real-time visualization
                    if args.inference_visualize:
                        flow_utils.visulize_flow_file(
                            join(
                                flow_folder, '%06d.flo' %
                                (batch_idx * args.inference_batch_size + i)),
                            flow_vis_folder)

            if args.save_frames:
                from PIL import Image
                _pframe = output[0].data.cpu().numpy().transpose(1, 2, 0)
                _pframe = (_pframe).clip(min=0, max=255).astype(np.uint8)
                f_name = f_names[batch_idx]
                png_data = Image.fromarray(_pframe)
                png_data.save(f'{inference_folder}/{f_name}')

            if args.save_inferenceLog:
                for label, loss in zip(loss_labels, pred_losses):
                    log_dict[label][batch_idx] = str(loss.cpu().numpy())

            progress.set_description(
                'Inference Averages for Epoch {}: '.format(epoch) +
                tools.format_dictionary_of_losses(
                    loss_labels,
                    np.array(statistics).mean(axis=0)))
            progress.update(1)

            if batch_idx == (args.inference_n_batches - 1):
                break

        progress.close()
        if args.save_inferenceLog:
            import json
            with open(f'{inference_folder}/log.json', 'w') as fp:
                json.dump(log_dict, fp, sort_keys=True, indent=4)
        return
Beispiel #5
0
    def inference(args, data_loader, model, offset=0):
        model.eval()
        if args.save_flow or args.render_validation:
            flow_folder = out_path  # "./output/flo_rev" if args.reverse else "./output/flo"
            if not os.path.exists(flow_folder):
                os.makedirs(flow_folder)

        # visualization folder
        if args.inference_visualize:
            flow_vis_folder = out_path + "/" + "png/"
            if not os.path.exists(flow_vis_folder):
                os.makedirs(flow_vis_folder)

        args.inference_n_batches = np.inf if args.inference_n_batches < 0 else args.inference_n_batches

        progress = tqdm(data_loader,
                        ncols=100,
                        total=np.minimum(len(data_loader),
                                         args.inference_n_batches),
                        desc='Inferencing ',
                        leave=True,
                        position=offset)

        statistics = []
        total_loss = 0
        ph, pw = inference_dataset.ph, inference_dataset.pw
        for batch_idx, (data, target) in enumerate(progress):
            if args.cuda:
                data, target = [d.cuda(non_blocking=True) for d in data
                                ], [t.cuda(non_blocking=True) for t in target]
            data, target = [Variable(d)
                            for d in data], [Variable(t) for t in target]

            # when ground-truth flows are not available for inference_dataset,
            # the targets are set to all zeros. thus, losses are actually L1 or L2 norms of compute optical flows,
            # depending on the type of loss norm passed in
            with torch.no_grad():
                losses, output = model(data[0], target[0], inference=True)

            losses = [torch.mean(loss_value) for loss_value in losses]
            loss_val = losses[0]  # Collect first loss for weight update
            total_loss += loss_val.item()
            loss_values = [v.item() for v in losses]

            statistics.append(loss_values)
            # import IPython; IPython.embed()
            if args.save_flow or args.render_validation:
                for i in range(args.inference_batch_size):
                    _pflow = output[i].data.cpu().numpy().transpose(1, 2, 0)
                    if ph != 0:
                        _pflow = _pflow[ph:-ph, :, :]
                    if pw != 0:
                        _pflow = _pflow[:, pw:-pw, :]
                    flow_utils.writeFlow(
                        join(
                            flow_folder, '%06d.flo' %
                            (batch_idx * args.inference_batch_size + i)),
                        _pflow)

                    # You can comment out the plt block in visulize_flow_file() for real-time visualization
                    if args.inference_visualize:
                        flow_utils.visulize_flow_file(
                            join(
                                flow_folder, '%06d.flo' %
                                (batch_idx * args.inference_batch_size + i)),
                            flow_vis_folder)

            progress.update(1)
            if batch_idx == (args.inference_n_batches - 1):
                break
        progress.close()
        return
Beispiel #6
0
def main():
    flow_filename = '../flownet2_validation_examples/NBA2K19_2019.01.31_23.50.52_frame124678.pred.flo'
    save_dir = '../'
    flow_utils.visulize_flow_file(flow_filename, save_dir)