def __init__(self, batchnorm_pool = False, **kwargs):
   super(NegConvBNRelu, self).__init__(**kwargs)
   self.conv1 = nn.Conv2D(channels=64, kernel_size=(3, 3), strides=(1,1), use_bias=False)
   self.bn = nn.BatchNorm()
   self.act = nn.Activation('relu')
   self.pool = nn.AvgPool2D(pool_size=(4,4))
   self.tailneg = TailNegBlock()
   self.batchnorm_pool = batchnorm_pool
 def __init__(self, **kwargs):
   super(NegConvAdd, self).__init__(**kwargs)
   self.conv1 = nn.Conv2D(channels=64, kernel_size=(3, 3), strides=(1,1), use_bias=False)
   self.act = nn.Activation('relu')
   self.pool = nn.AvgPool2D(pool_size=(4,4))
   self.tailneg = TailNegBlock()
   self.add_value = mx.gluon.Parameter('add_value', init=mx.init.Xavier(magnitude=2.24),
                                       dtype='float32', allow_deferred_init=True)
 def __init__(self, **kwargs):
     super(NegConvBN, self).__init__(**kwargs)
     self.conv1 = nn.Conv2D(channels=64,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            use_bias=False)
     self.bn1 = nn.BatchNorm()
     self.pool = nn.AvgPool2D(pool_size=(4, 4))
     self.tailneg = TailNegBlock()
 def __init__(self, use_bias, flatten, **kwargs):
     super(NegFCReLU, self).__init__(**kwargs)
     self.fc = nn.Dense(units=64, use_bias=use_bias, flatten=flatten)
     self.act1 = nn.Activation('relu')
     self.act2 = nn.Activation('sigmoid')
     self.tail_neg = TailNegBlock()