def __init__(self): super(Binarizer, self).__init__() self.conv = nn.Conv2d(512, 32, kernel_size=1, bias=False) for param in self.conv.parameters(): param.requires_grad = False self.sign = Sign()
def __init__(self): super(Binarizer, self).__init__() self.conv = nn.Conv2d(512, 32, kernel_size=1, bias=False) for param in self.conv.parameters(): param.requires_grad = False self.customConv = BatchConv2dContext(512, 32, kernel_size=1, stride=1, padding=0, bias=False) self.sign = Sign()
def __init__(self, bits): super(Binarizer, self).__init__() self.conv = nn.Conv2d(512, bits, kernel_size=1, bias=False) self.sign = Sign()
def __init__(self): super(Binarizer, self).__init__() self.conv = nn.Conv2d(512, 32, kernel_size=1, bias=False) # changed output channels from 32 to 16 self.sign = Sign()
def __init__(self, bottleneck): super(Binarizer, self).__init__() self.conv = nn.Conv2d(512, bottleneck, kernel_size=1, bias=False) #elf.conv = nn.Conv2d(32, 16, kernel_size=1, bias=False) self.tanh = nn.Tanh() self.sign = Sign()
def __init__(self): super(Binarizer, self).__init__() #self.conv1 = nn.Conv2d(NUM_FEAT1, NUM_FEAT1, kernel_size=1, bias=False) #self.conv4 = nn.Conv2d(NUM_FEAT2, NUM_FEAT2, kernel_size=1, bias=False) self.sign = Sign()