Exemple #1
0
    def __init__(self, args):
        super(DummyModel, self).__init__()
        self.args = args

        self.flownet = FlowNet2S(args)
        checkpoint_file = "./checkpoints/FlowNet2-S_checkpoint.pth.tar"
        checkpoint = torch.load(checkpoint_file)
        self.flownet.load_state_dict(checkpoint['state_dict'])
        # self.flownet.training = False
        # for param in self.flownet.parameters():
        #     param.requires_grad = False

        if args.fp16:
            self.resample1 = nn.Sequential(tofp32(), Resample2d(), tofp16())
        else:
            self.resample1 = Resample2d()

        self.rgb_max = 255
Exemple #2
0
    def __init__(self, args, batchNorm=False, div_flow=20.):
        super(FlowNet2CSS, self).__init__()
        self.batchNorm = batchNorm
        self.div_flow = div_flow
        self.rgb_max = args.rgb_max
        self.args = args

        self.channelnorm = ChannelNorm()

        # First Block (FlowNetC)
        self.flownetc = FlowNetC.FlowNetC(args, batchNorm=self.batchNorm)
        self.upsample1 = nn.Upsample(scale_factor=4,
                                     mode='bilinear',
                                     align_corners=True)

        if args.fp16:
            self.resample1 = nn.Sequential(tofp32(), Resample2d(), tofp16())
        else:
            self.resample1 = Resample2d()

        # Block (FlowNetS1)
        self.flownets_1 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm)
        self.upsample2 = nn.Upsample(scale_factor=4,
                                     mode='bilinear',
                                     align_corners=True)
        if args.fp16:
            self.resample2 = nn.Sequential(tofp32(), Resample2d(), tofp16())
        else:
            self.resample2 = Resample2d()

        # Block (FlowNetS2)
        self.flownets_2 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm)
        self.upsample3 = nn.Upsample(scale_factor=4, mode='nearest')

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                if m.bias is not None:
                    init.uniform(m.bias)
                init.xavier_uniform(m.weight)

            if isinstance(m, nn.ConvTranspose2d):
                if m.bias is not None:
                    init.uniform(m.bias)
                init.xavier_uniform(m.weight)
def detect_occlusion(fw_flow, bw_flow):
    
    ## fw-flow: img1 => img2
    ## bw-flow: img2 => img1

    
    with torch.no_grad():

        ## convert to tensor
        fw_flow_t = img2tensor(fw_flow).cuda()
        bw_flow_t = img2tensor(bw_flow).cuda()

        ## warp fw-flow to img2
        flow_warping = Resample2d().cuda()
        fw_flow_w = flow_warping(fw_flow_t, bw_flow_t)
    
        ## convert to numpy array
        fw_flow_w = tensor2img(fw_flow_w)


    ## occlusion
    fb_flow_sum = fw_flow_w + bw_flow
    fb_flow_mag = compute_flow_magnitude(fb_flow_sum)
    fw_flow_w_mag = compute_flow_magnitude(fw_flow_w)
    bw_flow_mag = compute_flow_magnitude(bw_flow)

    mask1 = fb_flow_mag > 0.01 * (fw_flow_w_mag + bw_flow_mag) + 0.5
    
    ## motion boundary
    fx_du, fx_dv, fy_du, fy_dv = compute_flow_gradients(bw_flow)
    fx_mag = fx_du ** 2 + fx_dv ** 2
    fy_mag = fy_du ** 2 + fy_dv ** 2
    
    mask2 = (fx_mag + fy_mag) > 0.01 * bw_flow_mag + 0.002

    ## combine mask
    mask = np.logical_or(mask1, mask2)
    occlusion = np.zeros((fw_flow.shape[0], fw_flow.shape[1]))
    occlusion[mask == 1] = 1

    return occlusion
    output_dir = os.path.join(opts.data_dir, opts.phase, opts.method,
                              opts.task, opts.dataset)

    ## print average if result already exists
    metric_filename = os.path.join(output_dir, "WarpError.txt")
    if os.path.exists(metric_filename) and not opts.redo:
        print("Output %s exists, skip..." % metric_filename)

        cmd = 'tail -n1 %s' % metric_filename
        utils.run_cmd(cmd)
        sys.exit()

    ## flow warping layer
    device = torch.device("cuda" if opts.cuda else "cpu")
    flow_warping = Resample2d().to(device)

    ### load video list
    list_filename = os.path.join(opts.list_dir,
                                 "%s_%s.txt" % (opts.dataset, opts.phase))
    with open(list_filename) as f:
        video_list = [line.rstrip() for line in f.readlines()]

    ### start evaluation
    err_all = np.zeros(len(video_list))

    for v in range(len(video_list)):

        video = video_list[v]

        frame_dir = os.path.join(opts.data_dir, opts.phase, opts.method,
    def __init__(self, args, batchNorm=False, div_flow = 20.):
        super(FlowNet2,self).__init__()
        self.batchNorm = batchNorm
        self.div_flow = div_flow
        self.rgb_max = args.rgb_max
        self.args = args

        self.channelnorm = ChannelNorm()

        # First Block (FlowNetC)
        self.flownetc = FlowNetC.FlowNetC(args, batchNorm=self.batchNorm)
        self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')
        # import ipdb; ipdb.set_trace()
        if args.fp16:
            self.resample1 = nn.Sequential(
                            tofp32(), 
                            Resample2d(),
                            tofp16()) 
        else:
            self.resample1 = Resample2d()

        # Block (FlowNetS1)
        self.flownets_1 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm)
        self.upsample2 = nn.Upsample(scale_factor=4, mode='bilinear')
        if args.fp16:
            self.resample2 = nn.Sequential(
                            tofp32(), 
                            Resample2d(),
                            tofp16()) 
        else:
            self.resample2 = Resample2d()


        # Block (FlowNetS2)
        self.flownets_2 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm)

        # Block (FlowNetSD)
        self.flownets_d = FlowNetSD.FlowNetSD(args, batchNorm=self.batchNorm) 
        self.upsample3 = nn.Upsample(scale_factor=4, mode='nearest') 
        self.upsample4 = nn.Upsample(scale_factor=4, mode='nearest') 

        if args.fp16:
            self.resample3 = nn.Sequential(
                            tofp32(), 
                            Resample2d(),
                            tofp16()) 
        else:
            self.resample3 = Resample2d()

        if args.fp16:
            self.resample4 = nn.Sequential(
                            tofp32(), 
                            Resample2d(),
                            tofp16()) 
        else:
            self.resample4 = Resample2d()

        # Block (FLowNetFusion)
        self.flownetfusion = FlowNetFusion.FlowNetFusion(args, batchNorm=self.batchNorm)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                if m.bias is not None:
                    init.uniform_(m.bias)
                init.xavier_uniform_(m.weight)

            if isinstance(m, nn.ConvTranspose2d):
                if m.bias is not None:
                    init.uniform_(m.bias)
                init.xavier_uniform_(m.weight)
Exemple #6
0
from networks.resample2d_package.resample2d import Resample2d
import networks

class Object(object):
    pass
""" Flownet """
args = Object()
args.rgb_max = 1.0
args.fp16 = False
FlowNet = networks.FlowNet2(args, requires_grad=False)
model_filename = os.path.join("pretrained_models", "FlowNet2_checkpoint.pth.tar")
checkpoint = torch.load(model_filename)
FlowNet.load_state_dict(checkpoint['state_dict'])
FlowNet = FlowNet.cuda()
""" Submodules """
flow_warping = Resample2d().cuda()
downsampler = nn.AvgPool2d((2, 2), stride=2).cuda()

def norm(t):
    return torch.sum(t*t, dim=1, keepdim=True) 

def repackage_hidden(h):
    """Wraps hidden states in new Variables, to detach them from their history."""
    if isinstance(h, torch.Tensor):
        return h.detach()
    else:
        return tuple(repackage_hidden(v) for v in h)

def train_lstm_epoch(epoch, data_loader, model, criterion_L1, criterion_ssim, optimizer, opt):

    opt.w_ST, opt.w_LT, opt.w_Flow = 1.0, 1.0, 10.0
Exemple #7
0
    def __init__(self, args):
        super(InterpolNetOld, self).__init__()
        self.args = args

        # flownet predictor
        self.flownet = FlowNet2(args)
        checkpoint = torch.load("./checkpoints/FlowNet2_checkpoint.pth.tar")
        self.flownet.load_state_dict(checkpoint['state_dict'])
        self.flownet.training = False
        for param in self.flownet.parameters():
            param.requires_grad = False
        if args.fp16:
            self.resample1 = nn.Sequential(tofp32(), Resample2d(), tofp16())
        else:
            self.resample1 = Resample2d()

        res_kernel_number = 128
        self.convResIn_img1 = nn.Conv2d(3,
                                        res_kernel_number,
                                        kernel_size=3,
                                        stride=1,
                                        padding=1)
        self.resBlock_img1 = BasicResBlock(res_kernel_number)
        self.convResOut_img1 = nn.Conv2d(res_kernel_number,
                                         3,
                                         kernel_size=3,
                                         stride=1,
                                         padding=1)

        self.convResIn_img2 = nn.Conv2d(3,
                                        res_kernel_number,
                                        kernel_size=3,
                                        stride=1,
                                        padding=1)
        self.resBlock_img2 = BasicResBlock(res_kernel_number)
        self.convResOut_img2 = nn.Conv2d(res_kernel_number,
                                         3,
                                         kernel_size=3,
                                         stride=1,
                                         padding=1)

        self.convResIn_mid = nn.Conv2d(3,
                                       res_kernel_number,
                                       kernel_size=3,
                                       stride=1,
                                       padding=1)
        self.resBlock_mid = BasicResBlock(res_kernel_number)
        self.convResOut_mid = nn.Conv2d(res_kernel_number,
                                        3,
                                        kernel_size=3,
                                        stride=1,
                                        padding=1)
        self.convResIn_final = nn.Conv2d(9,
                                         res_kernel_number,
                                         kernel_size=3,
                                         stride=1,
                                         padding=1)
        self.resBlock_final = BasicResBlock(res_kernel_number)
        self.convResOut_final = nn.Conv2d(res_kernel_number,
                                          3,
                                          kernel_size=3,
                                          stride=1,
                                          padding=1)
        self.relu = nn.ReLU()

        self.rgb_max = 255