예제 #1
0
 def __init__(
     self,
     backbone: nn.Module,
     mlp: Optional[nn.Module] = None,
     neg_size: int = 4096,
     temperature: float = 0.07,
     bank_size: int = 1280000,
     dim: int = 2048,
     mmt: float = 0.999,
 ) -> None:
     """
     Args:
         backbone (nn.Module): backbone used to forward the input.
         mlp (nn.Module): multi-layer perception used in memory bank instance
             discrimination model.
         neg_size (int): size of negative samples per instance.
         temperature (float): temperature to use for contrastive learning.
         bank_size (int): size of the memory bank, expected to be the same size as
             the training set.
         dim (int): dimension of the channel.
         mmt (float): momentum to use.
     """
     super().__init__()
     set_attributes(self, locals())
     self._init_mem_bank(bank_size, dim)
예제 #2
0
 def __init__(
     self,
     *,
     conv_a: nn.Module = None,
     norm_a: nn.Module = None,
     act_a: nn.Module = None,
     conv_b: nn.Module = None,
     norm_b: nn.Module = None,
     act_b: nn.Module = None,
     conv_c: nn.Module = None,
     norm_c: nn.Module = None,
 ) -> None:
     """
     Args:
         conv_a (torch.nn.modules): convolutional module.
         norm_a (torch.nn.modules): normalization module.
         act_a (torch.nn.modules): activation module.
         conv_b (torch.nn.modules): convolutional module.
         norm_b (torch.nn.modules): normalization module.
         act_b (torch.nn.modules): activation module.
         conv_c (torch.nn.modules): convolutional module.
         norm_c (torch.nn.modules): normalization module.
     """
     super().__init__()
     set_attributes(self, locals())
     assert all(op is not None for op in (self.conv_a, self.conv_b, self.conv_c))
     if self.norm_c is not None:
         # This flag is used for weight initialization.
         self.norm_c.block_final_bn = True
예제 #3
0
 def __init__(
     self,
     *,
     pre_conv: nn.Module = None,
     pre_norm: nn.Module = None,
     pre_act: nn.Module = None,
     pool: nn.Module = None,
     post_conv: nn.Module = None,
     post_norm: nn.Module = None,
     post_act: nn.Module = None,
 ) -> None:
     """
     Args:
         pre_conv (torch.nn.modules): convolutional module.
         pre_norm (torch.nn.modules): normalization module.
         pre_act (torch.nn.modules): activation module.
         pool (torch.nn.modules): pooling module.
         post_conv (torch.nn.modules): convolutional module.
         post_norm (torch.nn.modules): normalization module.
         post_act (torch.nn.modules): activation module.
     """
     super().__init__()
     set_attributes(self, locals())
     assert self.pre_conv is not None
     assert self.pool is not None
     assert self.post_conv is not None
예제 #4
0
 def __init__(
     self,
     *,
     conv_a: nn.Module,
     norm_a: nn.Module,
     act_a: nn.Module,
     conv_b: nn.ModuleList,
     norm_b: nn.ModuleList,
     act_b: nn.ModuleList,
     conv_c: nn.Module,
     norm_c: nn.Module,
     reduce_method: str = "sum",
 ) -> None:
     """
     Args:
         conv_a (torch.nn.modules): convolutional module.
         norm_a (torch.nn.modules): normalization module.
         act_a (torch.nn.modules): activation module.
         conv_b (torch.nn.modules_list): convolutional module(s).
         norm_b (torch.nn.modules_list): normalization module(s).
         act_b (torch.nn.modules_list): activation module(s).
         conv_c (torch.nn.modules): convolutional module.
         norm_c (torch.nn.modules): normalization module.
         reduce_method (str): if multiple conv_b is used, reduce the output with
             `sum`, or `cat`.
     """
     super().__init__()
     set_attributes(self, locals())
     assert all(
         op is not None for op in (self.conv_b, self.conv_c)
     ), f"{self.conv_a}, {self.conv_b}, {self.conv_c} has None"
     assert reduce_method in ["sum", "cat"]
     if self.norm_c is not None:
         # This flag is used for weight initialization.
         self.norm_c.block_final_bn = True
예제 #5
0
 def __init__(
     self,
     mlp: nn.Module,
     backbone: Optional[nn.Module] = None,
     temperature: float = 0.07,
 ) -> None:
     super().__init__()
     set_attributes(self, locals())
예제 #6
0
    def __init__(
        self,
        mlp: nn.Module,
        backbone: Optional[nn.Module] = None,
        temperature: float = 0.07,
    ) -> None:
        super().__init__()

        torch._C._log_api_usage_once("PYTORCHVIDEO.model.SimCLR.__init__")

        set_attributes(self, locals())
예제 #7
0
 def __init__(
     self,
     *,
     multipathway_blocks: nn.ModuleList,
     multipathway_fusion: Optional[nn.Module],
 ) -> None:
     """
     Args:
         multipathway_blocks (nn.module_list): list of models from all pathways.
         multipathway_fusion (nn.module): fusion model.
     """
     super().__init__()
     set_attributes(self, locals())
예제 #8
0
 def __init__(
     self,
     conv_fast_to_slow: nn.Module,
     norm: Optional[nn.Module] = None,
     activation: Optional[nn.Module] = None,
 ) -> None:
     """
     Args:
         conv_fast_to_slow (nn.module): convolution to perform fusion.
         norm (nn.module): normalization module.
         activation (torch.nn.modules): activation module.
     """
     super().__init__()
     set_attributes(self, locals())
예제 #9
0
 def __init__(
     self,
     retain_list: bool = False,
     pool: Optional[nn.ModuleList] = None,
     dim: int = 1,
 ) -> None:
     """
     Args:
         retain_list (bool): if True, return the concatenated tensor in a list.
         pool (nn.module_list): if not None, list of pooling models for different
             pathway before performing concatenation.
         dim (int): dimension to performance concatenation.
     """
     super().__init__()
     set_attributes(self, locals())
예제 #10
0
 def __init__(
     self,
     *,
     multipathway_blocks: nn.ModuleList,
     multipathway_fusion: Optional[nn.Module],
     inplace: Optional[bool] = True,
 ) -> None:
     """
     Args:
         multipathway_blocks (nn.module_list): list of models from all pathways.
         multipathway_fusion (nn.module): fusion model.
         inplace (bool): If inplace, directly update the input list without making
             a copy.
     """
     super().__init__()
     set_attributes(self, locals())
예제 #11
0
 def __init__(
     self,
     pool: nn.Module = None,
     dropout: nn.Module = None,
     proj: nn.Module = None,
     activation: nn.Module = None,
     output_pool: nn.Module = None,
 ) -> None:
     """
     Args:
         pool (torch.nn.modules): pooling module.
         dropout(torch.nn.modules): dropout module.
         proj (torch.nn.modules): project module.
         activation (torch.nn.modules): activation module.
         output_pool (torch.nn.Module): pooling module for output.
     """
     super().__init__()
     set_attributes(self, locals())
     assert self.proj is not None
예제 #12
0
 def __init__(
     self,
     branch1_conv: nn.Module = None,
     branch1_norm: nn.Module = None,
     branch2: nn.Module = None,
     activation: nn.Module = None,
     branch_fusion: Callable = None,
 ) -> nn.Module:
     """
     Args:
         branch1_conv (torch.nn.modules): convolutional module in branch1.
         branch1_norm (torch.nn.modules): normalization module in branch1.
         branch2 (torch.nn.modules): bottleneck block module in branch2.
         activation (torch.nn.modules): activation module.
         branch_fusion: (Callable): A callable or layer that combines branch1
             and branch2.
     """
     super().__init__()
     set_attributes(self, locals())
     assert self.branch2 is not None
예제 #13
0
 def __init__(
     self,
     *,
     conv_t: nn.Module = None,
     norm: nn.Module = None,
     activation: nn.Module = None,
     conv_xy: nn.Module = None,
     conv_xy_first: bool = False,
 ) -> None:
     """
     Args:
         conv_t (torch.nn.modules): temporal convolution module.
         norm (torch.nn.modules): normalization module.
         activation (torch.nn.modules): activation module.
         conv_xy (torch.nn.modules): spatial convolution module.
         conv_xy_first (bool): If True, spatial convolution comes before temporal conv
     """
     super().__init__()
     set_attributes(self, locals())
     assert self.conv_t is not None
     assert self.conv_xy is not None
예제 #14
0
 def __init__(
     self,
     slowfast_channel_reduction_ratio: int,
     conv_fusion_channel_ratio: float,
     conv_kernel_size: Tuple[int],
     conv_stride: Tuple[int],
     norm: Callable = nn.BatchNorm3d,
     norm_eps: float = 1e-5,
     norm_momentum: float = 0.1,
     activation: Callable = nn.ReLU,
     max_stage_idx: int = 3,
 ) -> None:
     """
     Given a list of two tensors from Slow pathway and Fast pathway, fusion information
     from the Fast pathway to the Slow on through a convolution followed by a
     concatenation, then return the fused list of tensors from Slow and Fast pathway in
     order.
     Args:
         slowfast_channel_reduction_ratio (int): Reduction ratio from the stage dimension.
             Used to compute conv_dim_in = fusion_dim_in // slowfast_channel_reduction_ratio
         conv_fusion_channel_ratio (int): channel ratio for the convolution used to fuse
             from Fast pathway to Slow pathway.
         conv_kernel_size (int): kernel size of the convolution used to fuse from Fast
             pathway to Slow pathway.
         conv_stride (int): stride size of the convolution used to fuse from Fast pathway
             to Slow pathway.
         norm (callable): a callable that constructs normalization layer, examples
             include nn.BatchNorm3d, None (not performing normalization).
         norm_eps (float): normalization epsilon.
         norm_momentum (float): normalization momentum.
         activation (callable): a callable that constructs activation layer, examples
             include: nn.ReLU, nn.Softmax, nn.Sigmoid, and None (not performing
             activation).
         max_stage_idx (int): Returns identity module if we exceed this
     """
     set_attributes(self, locals())
예제 #15
0
 def __init__(
     self,
     *,
     conv_theta: nn.Module,
     conv_phi: nn.Module,
     conv_g: nn.Module,
     conv_out: nn.Module,
     pool: Optional[nn.Module] = None,
     norm: Optional[nn.Module] = None,
     instantiation: str = "dot_product",
 ) -> None:
     super().__init__()
     set_attributes(self, locals())
     assert None not in (conv_theta, conv_phi, conv_g, conv_out)
     assert instantiation in (
         "dot_product",
         "softmax",
     ), "Unknown norm type {}".format(instantiation)
     assert (len({
         self.conv_theta.out_channels,
         self.conv_phi.out_channels,
         self.conv_g.out_channels,
         self.conv_out.in_channels,
     }) == 1), "Nonlocal convolution's input/ output dimension mismatch."