Пример #1
0
    def __init__(self, features, num_classes, sobel):
        super(AlexNet, self).__init__()
        self.features = features
        self.classifier = nn.Sequential(nn.Dropout(0.5),
                                        nn.Linear(256 * 6 * 6, 4096),
                                        nn.ReLU(inplace=True),
                                        nn.Dropout(0.5),
                                        nn.Linear(4096, 4096),
                                        nn.ReLU(inplace=True))

        self.top_layer = nn.Linear(4096, num_classes)
        self.l2norm = Normalize(2)
        self._initialize_weights()

        if sobel:
            grayscale = nn.Conv2d(3, 1, kernel_size=1, stride=1, padding=0)
            grayscale.weight.data.fill_(1.0 / 3.0)
            grayscale.bias.data.zero_()
            sobel_filter = nn.Conv2d(1, 2, kernel_size=3, stride=1, padding=1)
            sobel_filter.weight.data[0, 0].copy_(
                torch.FloatTensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]])
            )
            sobel_filter.weight.data[1, 0].copy_(
                torch.FloatTensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
            )
            sobel_filter.bias.data.zero_()
            self.sobel = nn.Sequential(grayscale, sobel_filter)
            for p in self.sobel.parameters():
                p.requires_grad = False
        else:
            self.sobel = None
Пример #2
0
    def __init__(self, block, layers, low_dim=128):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.fc = nn.Linear(512 * block.expansion, low_dim)
        self.l2norm = Normalize(2)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Пример #3
0
    def __init__(self,
                 num_features=0,
                 out_planes=0,
                 norm=False,
                 dropout=0,
                 num_classes=0,
                 num_triplet_features=0):
        super(ShareNet, self).__init__()

        self.num_features = num_features
        self.norm = norm
        self.dropout = dropout
        self.has_embedding = num_features > 0
        self.num_classes = num_classes
        self.num_triplet_features = num_triplet_features

        self.l2norm = Normalize(2)

        # Append new layers
        if self.has_embedding:
            self.feat = nn.Linear(out_planes * 2, self.num_features)
            self.feat_bn = nn.BatchNorm1d(self.num_features)
            init.kaiming_normal_(self.feat.weight, mode='fan_out')
            init.constant_(self.feat.bias, 0)
            init.constant_(self.feat_bn.weight, 1)
            init.constant_(self.feat_bn.bias, 0)
        else:
            # Change the num_features to CNN output channels
            self.num_features = out_planes
        if self.dropout >= 0:
            self.drop = nn.Dropout(self.dropout)
        if self.num_classes > 0:
            self.classifier = nn.Linear(self.num_features, self.num_classes)
            init.normal_(self.classifier.weight, std=0.001)
            init.constant_(self.classifier.bias, 0)
Пример #4
0
    def __init__(self,
                 out_size=128,
                 channels=128,
                 window_size=512,
                 stride=512,
                 embd_size=8,
                 log_stride=None):
        super(MalConv, self).__init__()
        self.embd = nn.Embedding(257, embd_size, padding_idx=0)
        if not log_stride is None:
            stride = 2**log_stride

        self.conv_1 = nn.Conv1d(embd_size,
                                channels,
                                window_size,
                                stride=stride,
                                bias=True)
        self.conv_2 = nn.Conv1d(embd_size,
                                channels,
                                window_size,
                                stride=stride,
                                bias=True)

        self.fc_1 = nn.Linear(channels, channels)
        self.fc_2 = nn.Linear(channels, out_size)
        self.l2norm = Normalize(2)
Пример #5
0
 def __init__(self, nfeatures=128, nclass=10, T=0.1):
     super(_Head_fc, self).__init__()
     self.nclass = nclass
     self.l2norm = Normalize(2)
     self.weight = nn.Parameter(torch.empty(nfeatures, nclass))
     self.softmax = nn.Softmax(dim=1)
     self.T = T
     nn.init.normal_(self.weight)
 def __init__(self, low_dim, tau2):
     super(FeatureDecorrelation, self).__init__()
     self.low_dim = torch.tensor(low_dim)
     self.tau2 = torch.tensor(tau2)
     self.l2norm = Normalize(2)
     self.ce = torch.nn.CrossEntropyLoss()
     #        self.seq = torch.tensor(torch.range(0, low_dim-1), dtype=torch.long).to(get_dev())
     self.seq = torch.arange(low_dim, dtype=torch.long, device=get_dev())
Пример #7
0
    def __init__(self, block, num_blocks, low_dim=128):
        super(ResNetwithSobel, self).__init__()
        self.in_planes = 64

        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)

        self.in_planes = 64
        self.conv1_sobel = nn.Conv2d(2,
                                     64,
                                     kernel_size=3,
                                     stride=1,
                                     padding=1,
                                     bias=False)
        self.bn1_sobel = nn.BatchNorm2d(64)
        self.layer1_sobel = self._make_layer(block,
                                             64,
                                             num_blocks[0],
                                             stride=1)
        self.layer2_sobel = self._make_layer(block,
                                             128,
                                             num_blocks[1],
                                             stride=2)
        self.layer3_sobel = self._make_layer(block,
                                             256,
                                             num_blocks[2],
                                             stride=2)
        self.layer4_sobel = self._make_layer(block,
                                             512,
                                             num_blocks[3],
                                             stride=2)

        self.linear = nn.Linear(512 * 2 * block.expansion, low_dim)
        self.l2norm = Normalize(2)

        grayscale = nn.Conv2d(3, 1, kernel_size=1, stride=1, padding=0)
        grayscale.weight.data.fill_(1.0 / 3.0)
        grayscale.bias.data.zero_()
        sobel_filter = nn.Conv2d(1, 2, kernel_size=3, stride=1, padding=1)
        sobel_filter.weight.data[0, 0].copy_(
            torch.FloatTensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]]))
        sobel_filter.weight.data[1, 0].copy_(
            torch.FloatTensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]))
        sobel_filter.bias.data.zero_()
        self.sobel = nn.Sequential(grayscale, sobel_filter)
        for p in self.sobel.parameters():
            p.requires_grad = False
Пример #8
0
    def __init__(self, depth, pretrained=True, cut_at_pooling=False,
                 num_features=0, norm=False, dropout=0, num_classes=0, num_triplet_features=0):
        super(ResNet, self).__init__()

        self.depth = depth
        self.pretrained = pretrained
        self.cut_at_pooling = cut_at_pooling

        # Construct base (pretrained) resnet
        if depth not in ResNet.__factory:
            raise KeyError("Unsupported depth:", depth)
        self.base = ResNet.__factory[depth](pretrained=pretrained)

        # Fix layers [conv1 ~ layer2]
        fixed_names = []
        for name, module in self.base._modules.items():
            if name == "layer3":
                # assert fixed_names == ["conv1", "bn1", "relu", "maxpool", "layer1", "layer2"]
                break
            fixed_names.append(name)
            for param in module.parameters():
                param.requires_grad = False

        if not self.cut_at_pooling:
            self.num_features = num_features
            self.norm = norm
            self.dropout = dropout
            self.has_embedding = num_features > 0
            self.num_classes = num_classes
            self.num_triplet_features = num_triplet_features

            self.l2norm = Normalize(2)

            out_planes = self.base.fc.in_features

            # Append new layers
            if self.has_embedding:
                self.feat = nn.Linear(out_planes, self.num_features)
                self.feat_bn = nn.BatchNorm1d(self.num_features)
                init.kaiming_normal_(self.feat.weight, mode='fan_out')
                init.constant_(self.feat.bias, 0)
                init.constant_(self.feat_bn.weight, 1)
                init.constant_(self.feat_bn.bias, 0)
            else:
                # Change the num_features to CNN output channels
                self.num_features = out_planes
            if self.dropout >= 0:
                self.drop = nn.Dropout(self.dropout)
            if self.num_classes > 0:
                self.classifier = nn.Linear(self.num_features, self.num_classes)
                init.normal_(self.classifier.weight, std=0.001)
                init.constant_(self.classifier.bias, 0)

        if not self.pretrained:
            self.reset_params()
Пример #9
0
    def __init__(self, block, num_blocks, low_dim=128):
        super(ResNet, self).__init__()
        self.in_planes = 64

        self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
        self.linear = nn.Linear(512*block.expansion, low_dim)
        self.l2norm = Normalize(2)
Пример #10
0
    def __init__(self, block, num_blocks, low_dim=128, multitask=False):
        super(ResNet, self).__init__()
        self.multitask = multitask

        self.in_planes = 23
        self.conv1 = P4MConvZ2(3, 23, kernel_size=7, stride=2, padding=3, bias=False, batch_norm=True, max_pool=True)

        self.layer1 = self._make_layer(block, 23, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 45, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 91, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 181, num_blocks[3], stride=2)
        self.linear = nn.Linear(181*8*block.expansion, low_dim)

        self.l2norm = Normalize(2)
Пример #11
0
    def __init__(self,
                 block,
                 nblocks,
                 num_classes=[10],
                 final_pool=True,
                 mlp_depth=1,
                 stride=2,
                 color_channels=3,
                 color_temp=1,
                 num_color_classes=313,
                 normalize=False):
        super(ResNet, self).__init__()
        blocks = [block, block, block]
        factor = 1
        self.factor = factor
        self.in_planes = int(32 * factor)
        self.pre_layers_conv = conv_task(color_channels, int(32 * factor), 1)
        self.layer1 = self._make_layer(blocks[0],
                                       int(64 * factor),
                                       nblocks[0],
                                       stride=stride)
        self.layer2 = self._make_layer(blocks[1],
                                       int(128 * factor),
                                       nblocks[1],
                                       stride=stride)
        self.layer3 = self._make_layer(blocks[2],
                                       int(256 * factor),
                                       nblocks[2],
                                       stride=stride)
        self.end_bns = nn.Sequential(nn.BatchNorm2d(int(256 * factor)),
                                     nn.ReLU(True))
        self.avgpool = nn.AdaptiveAvgPool2d(1)
        self.final_pool = final_pool
        self.stride = stride
        self.normalize = normalize
        if normalize:
            self.l2norm = Normalize(2)
        linear_ins = int(256 * factor) if final_pool else int(256 * 8 * 8 *
                                                              factor)
        self.linears = self._mlp_layer(int(linear_ins),
                                       num_classes,
                                       depth=mlp_depth)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Пример #12
0
    def __init__(self,
                 block,
                 num_blocks,
                 low_dim=128,
                 medium_dim=128,
                 mlp=False,
                 pool_len=4,
                 normlinear=False):
        super(ResNet, self).__init__()
        self.pool_len = pool_len
        self.in_planes = 64
        linear_layer = NormedLinear if normlinear else nn.Linear

        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
        self.mlp = mlp
        if self.mlp:
            self.pre_fc = nn.Sequential(
                nn.Linear(512 * block.expansion, medium_dim), )
            self.linear = linear_layer(medium_dim, low_dim)
            self.l2norm = Normalize(2)
        else:
            self.linear = linear_layer(512 * block.expansion, low_dim)
            self.l2norm = Normalize(2)

        self.groupDis = nn.Sequential(
            linear_layer(512 * block.expansion, low_dim), Normalize(2))
Пример #13
0
    def __init__(self,
                 depth,
                 num_classes,
                 widen_factor=1,
                 dropRate=0.0,
                 norm=True):
        super(WideResNet, self).__init__()
        n_channels = [
            16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor
        ]
        assert ((depth - 4) % 6 == 0)
        n = (depth - 4) / 6
        block = BasicBlock
        # 1st conv before any network block
        self.conv1 = nn.Conv2d(3,
                               n_channels[0],
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        # 1st block
        self.block1 = NetworkBlock(n, n_channels[0], n_channels[1], block, 1,
                                   dropRate)
        # 2nd block
        self.block2 = NetworkBlock(n, n_channels[1], n_channels[2], block, 2,
                                   dropRate)
        # 3rd block
        self.block3 = NetworkBlock(n, n_channels[2], n_channels[3], block, 2,
                                   dropRate)
        # global average pooling and classifier
        self.bn1 = nn.BatchNorm2d(n_channels[3])
        self.relu = nn.ReLU(inplace=True)
        self.fc = nn.Linear(n_channels[3], num_classes)
        self.nChannels = n_channels[3]

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_()

        self.l2norm = Normalize(2)
        self.norm = norm
Пример #14
0
    def wrapper(request, *args, **kwargs):
      validation = valid_dict

      if resolve:
        target = request.resolver_match.url_name.split('.')
      else:
        target = a.split('.')
      for t in target:
        validation = validation[t]

      data = {}
      document = dict(request.GET)
      schema = validation[request.method]

      if not document and request.method not in ['GET']:
        if isinstance(request._stream.stream, io.BufferedReader):
          data = request._stream.stream.peek()
        else:
          data = request._stream.stream.read()

        if re.match(r'^[0-9a-fA-F]{2}', data.decode()):
          split = data.split(b'\r\n')

          data = b''
          for i in range(len(split)):
            if i % 2:
              data += split[i]

        meta = request.META['CONTENT_TYPE']
        data = data.decode()
        parser = None

        if re.match(r'^(text/plain|application/json)', meta):
          parser = json.loads
        elif re.match(r'^application/yaml', meta):
          parser = yaml.safe_load
        elif re.match(r'^application/xml', meta):
          parser = lambda x: xmltodict.parse(x)['root']

        try:
          document.update(parser(data) if data else {})
        except Exception as e:
          'Failed parsing payload ({})'.format(e), 512

      converted = {}
      for k, v in schema['parameters'].items():
        converted[k] = v

        if 'coerce' not in converted[k]:
          converted[k]['coerce'] = Normalize(converted[k]['type'], converted[k]).convert

      v = BaseValidator(converted, update=True, purge_unknown=True)
      document = v.normalized(document)

      if document is None:
        return v.errors, 428

      if not v.validate(document):
        return v.errors, 428

      data = document
      return f(request, validated=data, *args, **kwargs)
Пример #15
0
 def __init__(self, negM, T, batchSize, device):
     super(Criterion, self).__init__()
     self.negM = negM
     self.T = T
     self.diag_mat = 1 - torch.eye(batchSize*2).to(device)
     self.l2norm = Normalize(2)
Пример #16
0
    def __init__(self,
                 block,
                 layers,
                 low_dim=128,
                 multitask=False,
                 showfeature=False,
                 finetune=False,
                 domain=False,
                 args=None):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        # self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1,
        #                        bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7,
                                    stride=1)  # 7 if input is 224 !!!!!!!!!!
        self.fc = nn.Linear(512 * block.expansion, low_dim)
        self.l2norm = Normalize(2)
        self.saveembed = args.saveembed

        self.showfeature = showfeature
        self.multitask = multitask
        self.finetune = finetune
        self.domain = domain
        if self.finetune:
            self.finetune_layer = nn.Sequential(
                Flatten(),
                nn.Linear(128, 128, bias=False),
                nn.BatchNorm1d(128),
                nn.ReLU(inplace=True),
                nn.Linear(128, 2, bias=False),
            )

        if self.multitask and self.domain:
            self.domain_classifier = nn.Linear(128, 2)
            self.pool = nn.MaxPool2d(kernel_size=3, stride=2)
            self.fc_block = nn.Sequential(
                Flatten(),
                # 3*3 if input is 224
                nn.Linear(512 * 3 * 3, 256, bias=False),
                nn.BatchNorm1d(256),
                nn.ReLU(inplace=True),
                nn.Linear(256, 256, bias=False),
                nn.BatchNorm1d(256),
                nn.ReLU(inplace=True),
            )

        if self.multitask:
            self.rotation_classifier = nn.Linear(128, 4)
            self.pool = nn.MaxPool2d(kernel_size=3, stride=2)
            self.fc_block = nn.Sequential(
                Flatten(),
                # 3*3 if input is 224
                nn.Linear(512 * 3 * 3, 256, bias=False),
                nn.BatchNorm1d(256),
                nn.ReLU(inplace=True),
                nn.Linear(256, 256, bias=False),
                nn.BatchNorm1d(256),
                nn.ReLU(inplace=True),
            )

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()