コード例 #1
0
ファイル: raft.py プロジェクト: christian-rauch/RAFT
    def __init__(self, args):
        super(RAFT, self).__init__()
        self.args = args

        if args.small:
            self.hidden_dim = hdim = 96
            self.context_dim = cdim = 64
            args.corr_levels = 4
            args.corr_radius = 3
        
        else:
            self.hidden_dim = hdim = 128
            self.context_dim = cdim = 128
            args.corr_levels = 4
            args.corr_radius = 4

        if 'dropout' not in self.args:
            self.args.dropout = 0

        if 'alternate_corr' not in self.args:
            self.args.alternate_corr = False

        # feature network, context network, and update block
        if args.small:
            self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout)        
            self.cnet = SmallEncoder(output_dim=hdim+cdim, norm_fn='none', dropout=args.dropout)
            self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim)

        else:
            self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout)        
            self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=args.dropout)
            self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim)
コード例 #2
0
    def __init__(self, args):
        super(RAFT, self).__init__()
        self.args = args

        if args.small:
            self.hidden_dim = hdim = 96
            self.context_dim = cdim = 64
            args.corr_levels = 4
            args.corr_radius = 3

        else:
            self.hidden_dim = hdim = 128
            self.context_dim = cdim = 128
            args.corr_levels = 4
            args.corr_radius = 4

        if 'dropout' not in args._get_kwargs():
            args.dropout = 0

        # feature network, context network, and update block
        if args.small:
            self.fnet = SmallEncoder(output_dim=128,
                                     norm_fn='instance',
                                     dropout=args.dropout)
            self.cnet = SmallEncoder(output_dim=hdim + cdim,
                                     norm_fn='none',
                                     dropout=args.dropout)
            self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim)

        else:
            self.fnet = BasicEncoder(output_dim=256,
                                     norm_fn='instance',
                                     dropout=args.dropout)
            self.cnet = BasicEncoder(output_dim=hdim + cdim,
                                     norm_fn='batch',
                                     dropout=args.dropout)
            self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim)

        # Load the pretrained model
        if args.load_pretrained is not None:
            state_dict = torch.load(args.load_pretrained)
            from collections import OrderedDict
            new_state_dict = OrderedDict()
            for k, v in state_dict.items():
                name = k[7:]  # remove `module.`
                new_state_dict[name] = v
            # load params
            self.load_state_dict(new_state_dict)

        self.update_block.mask = nn.Sequential()

        if args.freeze_raft:
            for p in self.parameters():
                p.requires_grad = False

        # Upsampler
        self.upsampler = get_upsampler(2, 128, args)
        self.data_idx = 0
コード例 #3
0
    def __init__(self, requires_grad=False):
        super(RAFT, self).__init__()
        #        self.args = args

        self.hidden_dim = hdim = 128
        self.context_dim = cdim = 128
        corr_levels = 4
        corr_radius = 4
        self.corr_radius = corr_radius

        # feature network, context network, and update block
        self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=0)
        self.cnet = BasicEncoder(output_dim=hdim + cdim,
                                 norm_fn='batch',
                                 dropout=0)
        self.update_block = BasicUpdateBlock(corr_levels,
                                             corr_radius,
                                             hidden_dim=hdim)
        if not requires_grad:
            for param in self.parameters():
                param.requires_grad = False