Ejemplo n.º 1
0
    def __init__(self, in_shape, id, normalize):
        super().__init__(in_shape, id)
        bias = not normalize
        self._in_shape = in_shape
        self._out_shape = None
        self.conv1 = Conv2d(in_shape[0], 32, 7, stride=2, padding=1, bias=bias)
        self.conv2 = Conv2d(32, 32, 3, stride=2, padding=1, bias=bias)
        self.conv3 = Conv2d(32, 32, 3, stride=2, padding=1, bias=bias)
        self.conv4 = Conv2d(32, 32, 3, stride=2, padding=1, bias=bias)

        if normalize == "bn":
            self.bn1 = BatchNorm2d(32)
            self.bn2 = BatchNorm2d(32)
            self.bn3 = BatchNorm2d(32)
            self.bn4 = BatchNorm2d(32)
        elif normalize == "gn":
            self.bn1 = GroupNorm(8, 32)
            self.bn2 = GroupNorm(8, 32)
            self.bn3 = GroupNorm(8, 32)
            self.bn4 = GroupNorm(8, 32)
        else:
            self.bn1 = Identity()
            self.bn2 = Identity()
            self.bn3 = Identity()
            self.bn4 = Identity()

        relu_gain = init.calculate_gain("relu")
        self.conv1.weight.data.mul_(relu_gain)
        self.conv2.weight.data.mul_(relu_gain)
        self.conv3.weight.data.mul_(relu_gain)
        self.conv4.weight.data.mul_(relu_gain)
 def __init__(self, num_channels):
     super(GroupNormBlock, self).__init__()
     if n_divs == 1:  # net_type is real
         # self.gn = BatchNorm2d(num_features=num_channels, **bnArgs)
         self.gn = GroupNorm(num_groups=32, num_channels=num_channels, eps=bnArgs['eps'])
     else:
         self.gn = GroupNorm(num_groups=n_divs, num_channels=num_channels, eps=bnArgs['eps'])
     self.num_features = num_channels
Ejemplo n.º 3
0
    def __init__(self,
                 in_channels,
                 bottleneck_channels,
                 out_channels,
                 num_groups=1,
                 stride_in_1x1=True,
                 stride=1,
                 padding=1,
                 dilation=1):
        super(DeformConvBottleneckWithGroupNorm, self).__init__()

        self.downsample = None
        if in_channels != out_channels:
            self.downsample = nn.Sequential(
                Conv2d(in_channels,
                       out_channels,
                       kernel_size=1,
                       stride=stride,
                       bias=False),
                GroupNorm(Global_Group_Num, out_channels),
            )

        # The original MSRA ResNet models have stride in the first 1x1 conv
        # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have
        # stride in the 3x3 conv
        stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)

        self.conv1 = Conv2d(
            in_channels,
            bottleneck_channels,
            kernel_size=1,
            stride=stride_1x1,
            bias=False,
        )
        self.gn1 = GroupNorm(Global_Group_Num, bottleneck_channels)
        # TODO: specify init for the above

        self.conv2 = DeformConv2d(bottleneck_channels,
                                  bottleneck_channels,
                                  kernel_size=3,
                                  stride=stride_3x3,
                                  padding=padding,
                                  dilation=dilation,
                                  use_bias=False)
        self.gn2 = GroupNorm(Global_Group_Num, bottleneck_channels)

        self.conv3 = Conv2d(bottleneck_channels,
                            out_channels,
                            kernel_size=1,
                            bias=False)
        self.gn3 = GroupNorm(Global_Group_Num, out_channels)

        self.reset_parameters()
Ejemplo n.º 4
0
    def __init__(self, device, size, getRawData=False, mode='udacity'):
        super(Challenge, self).__init__()
        if mode == 'udacity':
            self.fc1 = Linear(8295, 128)
            self.fc2 = Linear(1938, 128)
            self.fc3 = Linear(408, 128)
            self.fc4 = Linear(4480, 128)
            self.fc5 = Linear(4480, 1024)
        else:
            self.fc1 = Linear(6195, 128)
            self.fc2 = Linear(1428, 128)
            self.fc3 = Linear(288, 128)
            self.fc4 = Linear(2560, 128)
            self.fc5 = Linear(2560, 1024)
        self.conv1 = Conv3d(size,
                            64,
                            kernel_size=(3, 12, 12),
                            stride=(1, 6, 6))
        self.conv2 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))
        self.conv3 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))
        self.conv4 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))

        self.fc6 = Linear(1024, 512)
        self.fc7 = Linear(512, 256)
        self.fc8 = Linear(256, 128)
        self.fc9 = Linear(258, 1)
        self.lstm1 = LSTM(130, 128, 32)

        self.h1 = torch.zeros(32, 1, 128).to(device)
        self.c1 = torch.zeros(32, 1, 128).to(device)
        self.drop = Dropout3d(.25)
        self.elu = ELU()
        self.relu = ReLU()
        self.laynorm = GroupNorm(1, 128)
        self.getRawData = getRawData
Ejemplo n.º 5
0
    def __init__(self, device, size, outNum, batch=None):
        super(Challenge, self).__init__()
        self.fc1 = Linear(8295, 128)
        self.fc2 = Linear(1938, 128)
        self.fc3 = Linear(408, 128)
        self.fc4 = Linear(4480, 128)
        self.fc5 = Linear(4480, 1024)

        self.conv1 = Conv3d(size,
                            64,
                            kernel_size=(3, 12, 12),
                            stride=(1, 6, 6))
        self.conv2 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))
        self.conv3 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))
        self.conv4 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))

        self.fc6 = Linear(1024, 512)
        self.fc7 = Linear(512, 256)
        self.fc8 = Linear(256, 128)
        self.fc9 = Linear(258, outNum)
        self.lstm1 = LSTM(130, 128, 32)

        self.h1 = (torch.rand((32, 1, 128)) / 64).to(device)
        self.c1 = (torch.rand((32, 1, 128)) / 64).to(device)
        self.drop = Dropout3d(.25)
        self.elu = ELU()
        self.relu = ReLU()
        self.laynorm = GroupNorm(1, 128)
Ejemplo n.º 6
0
 def block(in_channels, out_channels, kernel_size, stride, padding):
     return torch.nn.Sequential(
         Conv2d(in_channels,
                out_channels,
                kernel_size,
                stride,
                padding,
                bias=False), GroupNorm(1, out_channels), LeakyReLU(0.2))
Ejemplo n.º 7
0
 def __init__(self,
              in_channels: int,
              out_channels: int,
              normalization: bool = True,
              kernel_size: int = 3):
     super().__init__()
     # block = nn.ModuleList()
     # works for kernel size 5 and 3 at least
     self.conv = Conv3d(in_channels,
                        out_channels,
                        kernel_size,
                        padding=(kernel_size + 1) // 2 - 1)
     # self.conv = Conv3d(in_channels, out_channels, kernel_size, padding=1)
     self.gnorm = GroupNorm(num_groups=1, num_channels=out_channels)
     self.act = ReLU(inplace=True)
Ejemplo n.º 8
0
def create_norm(num_features):
    """Creates a normalization layer.

    Note:
        The normalization is configured via :meth:`pytorch_layers.Config.norm`,
        and :attr:`pytorch_layers.Config.norm_kwargs`, and the saptial dimension
        is configured via :attr:`pytorch_layers.Config.dim`.

    Args:
        num_features (int): The number of input channels.

    Returns:
        torch.nn.Module: The created normalization layer.

    """
    config = Config()
    if config.norm_mode is NormMode.GROUP:
        from torch.nn import GroupNorm
        kwargs = config.norm_kwargs.copy()
        num_groups = kwargs.pop('num_groups')
        return GroupNorm(num_groups, num_features, **kwargs)
    elif config.norm_mode is NormMode.NONE:
        from torch.nn import Identity
        return Identity()
    if config.dim is Dim.ONE:
        if config.norm_mode is NormMode.INSTANCE:
            from torch.nn import InstanceNorm1d
            return InstanceNorm1d(num_features, **config.norm_kwargs)
        elif config.norm_mode is NormMode.BATCH:
            from torch.nn import BatchNorm1d
            return BatchNorm1d(num_features, **config.norm_kwargs)
    elif config.dim is Dim.TWO:
        if config.norm_mode is NormMode.INSTANCE:
            from torch.nn import InstanceNorm2d
            return InstanceNorm2d(num_features, **config.norm_kwargs)
        elif config.norm_mode is NormMode.BATCH:
            from torch.nn import BatchNorm2d
            return BatchNorm2d(num_features, **config.norm_kwargs)
    elif config.dim is Dim.THREE:
        if config.norm_mode is NormMode.INSTANCE:
            from torch.nn import InstanceNorm3d
            return InstanceNorm3d(num_features, **config.norm_kwargs)
        elif config.norm_mode is NormMode.BATCH:
            from torch.nn import BatchNorm3d
            return BatchNorm3d(num_features, **config.norm_kwargs)
Ejemplo n.º 9
0
    def __init__(self,
                 device,
                 size,
                 getRawData=False,
                 batch=1,
                 mode='udacity'):
        super(TSNENet, self).__init__()
        self.fc1 = Linear(8295, 128)  # 8374
        self.fc2 = Linear(475, 128)
        self.fc3 = Linear(88, 128)
        self.fc4 = Linear(512, 128)
        self.fc5 = Linear(512, 1024)

        self.conv1 = Conv3d(size,
                            64,
                            kernel_size=(3, 12, 12),
                            stride=(1, 6, 6))  # , padding=1)
        self.conv2 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))
        self.conv3 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))
        self.conv4 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))

        self.fc6 = Linear(1024, 512)
        self.fc7 = Linear(512, 256)
        self.fc8 = Linear(256, 128)
        self.fc9 = Linear(258, 128)
        self.fc10 = Linear(128, 15)
        self.lstm1 = LSTM(130, 128, 32)

        self.h1 = (torch.rand((32, 1, 128)) / 64).to(device)
        self.c1 = (torch.rand((32, 1, 128)) / 64).to(device)
        self.drop = Dropout3d(.05)
        self.elu = ELU()
        self.relu = ReLU()
        self.laynorm = GroupNorm(1, 128)

        self.bnorm1 = BatchNorm3d(64)
        self.bnorm2 = BatchNorm2d(64)
        self.bnorm3 = BatchNorm2d(64)
        self.bnorm4 = BatchNorm2d(64)

        self.pool1 = MaxPool2d(2)
        self.pool2 = MaxPool2d(2)

        self.getRawData = getRawData
        self.batch = batch
Ejemplo n.º 10
0
 def __init__(self, device):
     super(zModel, self).__init__()
     self.conv1 = Conv1d(1, 16, kernel_size=1, stride=1)
     self.conv2 = Conv1d(16, 16, kernel_size=2, stride=2)
     self.conv3 = Conv1d(16, 16, kernel_size=3, stride=2)
     self.fc1 = Linear(10, 32)
     self.fc2 = Linear(5, 32)
     self.fc3 = Linear(2, 32)
     self.fc4 = Linear(32, 128)
     self.fc5 = Linear(128, 64)
     self.fc6 = Linear(64, 32)
     self.fc7 = Linear(32, 1)
     self.lstm1 = LSTM(32, 16, 32)
     self.h1 = torch.zeros(32, 1, 16).to(device)
     self.c1 = torch.zeros(32, 1, 16).to(device)
     self.drop = Dropout(.1)
     self.elu = ELU()
     self.relu = ReLU()
     self.laynorm = GroupNorm(1, 32)
class GroupNormBlock(nn.Module):
    def __init__(self, num_channels):
        super(GroupNormBlock, self).__init__()
        if n_divs == 1:  # net_type is real
            # self.gn = BatchNorm2d(num_features=num_channels, **bnArgs)
            self.gn = GroupNorm(num_groups=32, num_channels=num_channels, eps=bnArgs['eps'])
        else:
            self.gn = GroupNorm(num_groups=n_divs, num_channels=num_channels, eps=bnArgs['eps'])
        self.num_features = num_channels

    def forward(self, x):
        return self.gn(x)

    def name(self):
        return f'group_num_'

    def __repr__(self):
        return self.__class__.__name__ + '(' \
               + self.gn.__repr__() + '\n' + ')'
Ejemplo n.º 12
0
def MLP(channels, enable_group_norm=True):
    if enable_group_norm:
        num_groups = [0]
        for i in range(1, len(channels)):
            if channels[i] >= 32:
                num_groups.append(channels[i] // 32)
            else:
                num_groups.append(1)
        return Seq(*[
            Seq(torch.nn.utils.weight_norm(Lin(channels[i - 1], channels[i])),
                LeakyReLU(
                    negative_slope=0.2), GroupNorm(num_groups[i], channels[i]))
            for i in range(1, len(channels))
        ])
    else:
        return Seq(*[
            Seq(torch.nn.utils.weight_norm(Lin(channels[i - 1]), channels[i]),
                LeakyReLU(negative_slope=0.2))
            for i in range(1, len(channels))
        ])
Ejemplo n.º 13
0
    def __init__(self,
                 device,
                 size,
                 getRawData=False,
                 batch=1,
                 mode='udacity'):
        super(TSNENet, self).__init__()
        self.fc1 = Linear(118, 128)
        self.fc2 = Linear(117, 128)
        self.fc3 = Linear(116, 128)
        self.fc4 = Linear(116, 128)
        self.fc5 = Linear(1856, 1024)

        self.conv1 = Conv1d(size, 32, kernel_size=3, stride=1)
        self.conv2 = Conv1d(32, 32, kernel_size=2, stride=1)
        self.conv3 = Conv1d(32, 32, kernel_size=2, stride=1)
        self.conv4 = Conv1d(32, 16, kernel_size=1, stride=1)

        self.fc6 = Linear(1024, 512)
        self.fc7 = Linear(512, 256)
        self.fc8 = Linear(256, 128)
        self.fc9 = Linear(256, 128)
        self.fc10 = Linear(128, 10)
        self.lstm1 = LSTM(128, 128, 16)

        self.h1 = (torch.rand((16, 1, 128)) / 64).to(device)
        self.c1 = (torch.rand((16, 1, 128)) / 64).to(device)
        self.drop = Dropout3d(.25)
        self.elu = ELU()
        self.relu = ReLU()
        self.laynorm = GroupNorm(1, 128)

        self.bnorm1 = BatchNorm1d(32)
        self.bnorm2 = BatchNorm1d(32)
        self.bnorm4 = BatchNorm1d(16)

        self.getRawData = getRawData
        self.batch = batch
Ejemplo n.º 14
0
    def __init__(self,
                 num_input_channels,
                 num_out_channels=64,
                 num_layers=8,
                 kernel_size=4,
                 num_classification=10):
        """

        :param num_input_channels:
        :param num_out_channels:
        :param num_layers:
        :param kernel_size:

        Notes:
        use leaky relu to allow small negative gradients to pass to the generator. https://sthalles.github.io/advanced_gans/
        Use group norm because it performs better than batch norm for small batches
        """
        super(Discriminator, self).__init__()
        pad = 1
        seq = [
            Conv2d(num_input_channels,
                   num_out_channels,
                   kernel_size=kernel_size,
                   stride=2,
                   padding=1,
                   bias=True),
            LeakyReLU(0.2, True)
        ]
        for i in range(1, int(num_layers / 2)):
            seq += [
                Conv2d(num_out_channels * i,
                       num_out_channels * (i + 1),
                       kernel_size=kernel_size,
                       stride=2,
                       padding=pad,
                       bias=True),
                GroupNorm(num_groups=8,
                          num_channels=num_out_channels * (i + 1)),
                LeakyReLU(0.2, True),
                Conv2d(num_out_channels * (i + 1),
                       num_out_channels * (i + 1),
                       kernel_size=kernel_size,
                       stride=1,
                       padding=pad,
                       bias=True),
                GroupNorm(num_groups=8,
                          num_channels=num_out_channels * (i + 1)),
                LeakyReLU(0.2, True)
            ]
        seq += [
            Conv2d(num_out_channels * int(num_layers / 2),
                   num_out_channels * int(num_layers / 4),
                   kernel_size=kernel_size,
                   stride=1,
                   padding=pad,
                   bias=True),
            GroupNorm(num_groups=8,
                      num_channels=num_out_channels * int(num_layers / 4)),
            LeakyReLU(0.2, True),
            Conv2d(num_out_channels * int(num_layers / 4),
                   num_out_channels,
                   kernel_size=kernel_size,
                   stride=1,
                   padding=pad,
                   bias=True)
        ]
        self.conv = Sequential(*seq)
        self.gan_layer = Linear(num_out_channels, 2)
        self.classification_layer = Linear(num_out_channels,
                                           num_classification)
Ejemplo n.º 15
0
def custom_conv_layer(
    ni: int,
    nf: int,
    ks: int = 3,
    stride: int = 1,
    padding: int = None,
    bias: bool = None,
    is_1d: bool = False,
    norm_type: Optional[NormType] = NormType.Batch,
    use_activ: bool = True,
    leaky: float = None,
    transpose: bool = False,
    init: Callable = nn.init.kaiming_normal_,
    self_attention: bool = False,
    extra_bn: bool = False,
):
    "Create a sequence of convolutional (`ni` to `nf`), ReLU (if `use_activ`) and batchnorm (if `bn`) layers."
    if padding is None:
        padding = (ks - 1) // 2 if not transpose else 0
    bn = norm_type in (NormType.Batch, NormType.BatchZero) or extra_bn == True
    # gn = norm_type == NormType.GroupNorm
    if bias is None:
        bias = not bn
    conv_func = nn.ConvTranspose2d if transpose else nn.Conv1d if is_1d else nn.Conv2d
    conv = init_default(
        conv_func(ni,
                  nf,
                  kernel_size=ks,
                  bias=bias,
                  stride=stride,
                  padding=padding),
        init,
    )

    # spectral norm by default
    conv = spectral_norm(conv)

    layers = [conv]
    if use_activ:
        layers.append(relu(True, leaky=leaky))

    # elif norm_type == NormType.GroupNorm:
    #     # https://pytorch.org/docs/stable/nn.html#groupnorm
    #     # >>> input = torch.randn(20, 6, 10, 10)
    #     # >>> # Separate 6 channels into 3 groups
    #     # >>> m = nn.GroupNorm(3, 6)
    #     # >>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm)
    #     # >>> m = nn.GroupNorm(6, 6)
    #     # >>> # Put all 6 channels into a single group (equivalent with LayerNorm)
    #     # >>> m = nn.GroupNorm(1, 6)
    #     # >>> # Activating the module
    #     # >>> output = m(input)

    #     # "We set G = 32 for GN by default."" (Wu, He 2018 - Group Norm paper)
    #     # torch.nn.GroupNorm(num_groups, num_channels, eps=1e-05, affine=True)
    #     group_norm = GroupNorm(32, 3)
    #     conv = group_norm(conv)

    if norm_type == NormType.Batch:
        layers.append((nn.BatchNorm1d if is_1d else nn.BatchNorm2d)(nf))
    elif norm_type == NormType.InstanceNorm:
        layers.append(GroupNorm(nf, nf))
    elif norm_type == NormType.LayerNorm:
        layers.append(GroupNorm(1, nf))
    elif norm_type == NormType.GroupNorm:
        # if channels is divisible by 32, split into
        # 32 groups, otherwise, use simple grouping
        gs = 32 if nf % 32 == 0 else 1
        layers.append(GroupNorm(gs, nf))
    elif norm_type == NormType.BatchRenorm:
        layers.append(BatchRenormalization2D(nf))

    if self_attention:
        layers.append(SelfAttention(nf))
    return nn.Sequential(*layers)
Ejemplo n.º 16
0
    def __init__(self,
                 in_channels_primal,
                 in_channels_dual,
                 out_channels_primal,
                 out_channels_dual,
                 norm_layer_type,
                 num_groups_norm_layer,
                 single_dual_nodes,
                 undirected_dual_edges,
                 num_primal_edges_to_keep=None,
                 fraction_primal_edges_to_keep=None,
                 primal_attention_coeffs_threshold=None,
                 num_res_blocks=3,
                 heads=1,
                 concat_primal=True,
                 concat_dual=True,
                 negative_slope_primal=0.2,
                 negative_slope_dual=0.2,
                 dropout_primal=0,
                 dropout_dual=0,
                 bias_primal=False,
                 bias_dual=False,
                 add_self_loops_to_dual_graph=False,
                 allow_pooling_consecutive_edges=True,
                 aggr_primal_features_pooling='mean',
                 aggr_dual_features_pooling='mean',
                 use_decreasing_attention_coefficients=True,
                 log_ratio_new_old_primal_nodes=True,
                 log_ratio_new_old_primal_edges=False,
                 return_old_dual_node_to_new_dual_node=False,
                 return_graphs_before_pooling=False):
        super(DualPrimalResDownConv, self).__init__()
        self.__use_pooling = (num_primal_edges_to_keep is not None
                              or fraction_primal_edges_to_keep is not None
                              or primal_attention_coeffs_threshold is not None)
        self.__norm_layer_type = norm_layer_type

        if (self.__use_pooling):
            self.__log_ratio_new_old_primal_nodes = (
                log_ratio_new_old_primal_nodes)
            self.__log_ratio_new_old_primal_edges = (
                log_ratio_new_old_primal_edges)
        else:
            self.__log_ratio_new_old_primal_nodes = False
            self.__log_ratio_new_old_primal_edges = False
        self.__return_graphs_before_pooling = return_graphs_before_pooling

        # Add the convolution layer.
        self.conv = DualPrimalResConv(
            in_channels_primal=in_channels_primal,
            in_channels_dual=in_channels_dual,
            out_channels_primal=out_channels_primal,
            out_channels_dual=out_channels_dual,
            heads=heads,
            concat_primal=concat_primal,
            concat_dual=concat_dual,
            negative_slope_primal=negative_slope_primal,
            negative_slope_dual=negative_slope_dual,
            dropout_primal=dropout_primal,
            dropout_dual=dropout_dual,
            bias_primal=bias_primal,
            bias_dual=bias_dual,
            single_dual_nodes=single_dual_nodes,
            undirected_dual_edges=undirected_dual_edges,
            add_self_loops_to_dual_graph=add_self_loops_to_dual_graph,
            num_skips=num_res_blocks)
        # Optionally add a normalization layer.
        out_channels_primal_considering_heads = out_channels_primal
        if (concat_primal):
            out_channels_primal_considering_heads *= heads
        out_channels_dual_considering_heads = out_channels_dual
        if (concat_dual):
            out_channels_dual_considering_heads *= heads
        if (norm_layer_type == 'batch_norm'):
            self.norm_primal = BatchNorm(
                in_channels=out_channels_primal_considering_heads)
            self.norm_dual = BatchNorm(
                in_channels=out_channels_dual_considering_heads)
        elif (norm_layer_type == 'group_norm'):
            self.norm_primal = GroupNorm(
                num_groups=num_groups_norm_layer,
                num_channels=out_channels_primal_considering_heads)
            self.norm_dual = GroupNorm(
                num_groups=num_groups_norm_layer,
                num_channels=out_channels_dual_considering_heads)
        # Add pooling layer.
        if (self.__use_pooling):
            self.pool = DualPrimalEdgePooling(
                self_loops_in_output_dual_graph=add_self_loops_to_dual_graph,
                single_dual_nodes=single_dual_nodes,
                undirected_dual_edges=undirected_dual_edges,
                num_primal_edges_to_keep=num_primal_edges_to_keep,
                fraction_primal_edges_to_keep=fraction_primal_edges_to_keep,
                primal_att_coeff_threshold=primal_attention_coeffs_threshold,
                allow_pooling_consecutive_edges=allow_pooling_consecutive_edges,
                aggr_primal_features=aggr_primal_features_pooling,
                aggr_dual_features=aggr_dual_features_pooling,
                use_decreasing_attention_coefficient=(
                    use_decreasing_attention_coefficients),
                return_old_dual_node_to_new_dual_node=
                return_old_dual_node_to_new_dual_node,
                log_ratio_new_old_primal_nodes=log_ratio_new_old_primal_nodes,
                log_ratio_new_old_primal_edges=log_ratio_new_old_primal_edges)
Ejemplo n.º 17
0
 def __call__(self, num_channels):
     if type(self.num_groups) is int:
         return GroupNorm(self.num_groups, num_channels, **self.kwargs)
     elif type(self.num_groups) is float:
         return GroupNorm(int(self.num_groups * num_channels), num_channels,
                          **self.kwargs)
Ejemplo n.º 18
0
 def __init__(self, groups, num_features, eps):
     super(MyGroupNorm, self).__init__()
     self.layer = GroupNorm(groups, num_features, eps=eps)