Пример #1
0
    def __init__(self,
                 vgg_name,
                 nh=1,
                 n_h=512,
                 dropout_prob=0.25,
                 sm_type='softmax'):
        super(VGG, self).__init__()

        self.dropout_prob = dropout_prob
        self.n_hidden = nh
        self.hidden_size = n_h
        self.sm_type = sm_type

        self.features = self._make_layers(cfg[vgg_name])
        self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
        self.lin_proj = nn.Sequential(nn.Linear(512 * 7 * 7, 4096),
                                      nn.ReLU(True), nn.Dropout(),
                                      nn.Linear(4096, 512))

        if sm_type == 'softmax':
            self.out_proj = Softmax(input_features=512, output_features=64)
        elif sm_type == 'am_softmax':
            self.out_proj = AMSoftmax(input_features=512, output_features=64)
        else:
            raise NotImplementedError

        self.bin_classifier = self.make_bin_layers(n_in=2 * 512,
                                                   n_h_layers=nh,
                                                   h_size=n_h,
                                                   dropout_p=dropout_prob)
Пример #2
0
    def __init__(self, block, num_blocks, sm_type, num_classes=64):
        super(ResNet, self).__init__()
        self.in_planes = 64

        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)

        if sm_type == 'softmax':
            self.out_proj = Softmax(input_features=512 * block.expansion,
                                    output_features=num_classes)
        elif sm_type == 'am_softmax':
            self.out_proj = AMSoftmax(input_features=512 * block.expansion,
                                      output_features=num_classes)
        else:
            raise NotImplementedError
Пример #3
0
    def __init__(self,
                 vgg_name,
                 nh=1,
                 n_h=512,
                 dropout_prob=0.25,
                 sm_type='softmax'):
        super(VGG, self).__init__()

        self.dropout_prob = dropout_prob
        self.n_hidden = nh
        self.hidden_size = n_h
        self.sm_type = sm_type

        self.features = self._make_layers(cfg[vgg_name])

        if sm_type == 'softmax':
            self.out_proj = Softmax(input_features=512, output_features=10)
        elif sm_type == 'am_softmax':
            self.out_proj = AMSoftmax(input_features=512, output_features=10)
        else:
            raise NotImplementedError

        self.bin_classifier = self.make_bin_layers(n_in=2 * 512,
                                                   n_h_layers=nh,
                                                   h_size=n_h,
                                                   dropout_p=dropout_prob)
Пример #4
0
	def __init__(self, block, num_blocks, nh, n_h, sm_type, num_classes=64, dropout_prob=0.25):
		super(ResNet, self).__init__()

		self.dropout_prob = dropout_prob
		self.n_hidden = nh
		self.hidden_size = n_h
		self.sm_type = sm_type

		self.in_planes = 64

		self.conv1 = nn.Conv2d(3, 64, kernel_size=5, stride=1, padding=1, bias=False)
		self.bn1 = nn.BatchNorm2d(64)
		self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
		self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
		self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
		self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
		self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)

		if sm_type=='softmax':
			self.out_proj=Softmax(input_features=512*block.expansion, output_features=num_classes)
		elif sm_type=='am_softmax':
			self.out_proj=AMSoftmax(input_features=512*block.expansion, output_features=num_classes)
		else:
			raise NotImplementedError

		self.classifier = self.make_bin_layers(n_in=2*512*block.expansion, n_h_layers=nh, h_size=n_h, dropout_p=dropout_prob)
Пример #5
0
    def __init__(self,
                 block,
                 nblocks,
                 nh,
                 n_h,
                 sm_type,
                 growth_rate=12,
                 reduction=0.5,
                 num_classes=10,
                 dropout_prob=0.25):
        super(DenseNet, self).__init__()
        self.growth_rate = growth_rate

        num_planes = 2 * growth_rate
        self.conv1 = nn.Conv2d(3,
                               num_planes,
                               kernel_size=3,
                               padding=1,
                               bias=False)

        self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
        num_planes += nblocks[0] * growth_rate
        out_planes = int(math.floor(num_planes * reduction))
        self.trans1 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
        num_planes += nblocks[1] * growth_rate
        out_planes = int(math.floor(num_planes * reduction))
        self.trans2 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
        num_planes += nblocks[2] * growth_rate
        out_planes = int(math.floor(num_planes * reduction))
        self.trans3 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
        num_planes += nblocks[3] * growth_rate

        self.bn = nn.BatchNorm2d(num_planes)

        if sm_type == 'softmax':
            self.out_proj = Softmax(input_features=num_planes,
                                    output_features=num_classes)
        elif sm_type == 'am_softmax':
            self.out_proj = AMSoftmax(input_features=num_planes,
                                      output_features=num_classes)
        else:
            raise NotImplementedError

        self.classifier = self.make_bin_layers(n_in=2 * num_planes,
                                               n_h_layers=nh,
                                               h_size=n_h,
                                               dropout_p=dropout_prob)
Пример #6
0
    def __init__(self, vgg_name, sm_type='softmax'):
        super(VGG, self).__init__()
        self.features = self._make_layers(cfg[vgg_name])

        if sm_type == 'softmax':
            self.out_proj = Softmax(input_features=512, output_features=10)
        elif sm_type == 'am_softmax':
            self.out_proj = AMSoftmax(input_features=512, output_features=10)
        else:
            raise NotImplementedError
Пример #7
0
	def __init__(self, block, layers, sm_type, num_classes=1000, zero_init_residual=False,
				 groups=1, width_per_group=64, replace_stride_with_dilation=None,
				 norm_layer=None):
		super(ResNet, self).__init__()
		if norm_layer is None:
			norm_layer = nn.BatchNorm2d
		self._norm_layer = norm_layer
		self.n_classes = num_classes

		self.inplanes = 64
		self.dilation = 1
		if replace_stride_with_dilation is None:
			# each element in the tuple indicates if we should replace
			# the 2x2 stride with a dilated convolution instead
			replace_stride_with_dilation = [False, False, False]
		if len(replace_stride_with_dilation) != 3:
			raise ValueError("replace_stride_with_dilation should be None "
							 "or a 3-element tuple, got {}".format(replace_stride_with_dilation))
		self.groups = groups
		self.base_width = width_per_group
		self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
		self.bn1 = norm_layer(self.inplanes)
		self.relu = nn.ReLU(inplace=True)
		self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
		self.layer1 = self._make_layer(block, 64, layers[0])
		self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
		self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
		self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
		self.avgpool = nn.AdaptiveAvgPool2d((1, 1))

		self.lin_proj = nn.Sequential(nn.Linear(512*block.expansion, 128))

		if sm_type=='softmax':
			self.out_proj=Softmax(input_features=512*block.expansion, output_features=num_classes)
		elif sm_type=='am_softmax':
			self.out_proj=AMSoftmax(input_features=512*block.expansion, output_features=num_classes)
		else:
			raise NotImplementedError

		for m in self.modules():
			if isinstance(m, nn.Conv2d):
				nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
			elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
				nn.init.constant_(m.weight, 1)
				nn.init.constant_(m.bias, 0)

		# Zero-initialize the last BN in each residual branch,
		# so that the residual branch starts with zeros, and each residual block behaves like an identity.
		# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
		if zero_init_residual:
			for m in self.modules():
				if isinstance(m, Bottleneck):
					nn.init.constant_(m.bn3.weight, 0)
				elif isinstance(m, BasicBlock):
					nn.init.constant_(m.bn2.weight, 0)
Пример #8
0
    def __init__(self,
                 block,
                 nblocks,
                 sm_type,
                 growth_rate=12,
                 reduction=0.5,
                 num_classes=1000):
        super(DenseNet, self).__init__()
        self.growth_rate = growth_rate

        self.n_classes = num_classes

        num_planes = 2 * growth_rate
        self.conv1 = nn.Conv2d(3,
                               num_planes,
                               kernel_size=7,
                               padding=1,
                               bias=False)

        self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
        num_planes += nblocks[0] * growth_rate
        out_planes = int(math.floor(num_planes * reduction))
        self.trans1 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
        num_planes += nblocks[1] * growth_rate
        out_planes = int(math.floor(num_planes * reduction))
        self.trans2 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
        num_planes += nblocks[2] * growth_rate
        out_planes = int(math.floor(num_planes * reduction))
        self.trans3 = Transition(num_planes, out_planes)
        num_planes = out_planes

        self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
        num_planes += nblocks[3] * growth_rate

        self.bn = nn.BatchNorm2d(num_planes)

        self.lin_proj = nn.Sequential(nn.Linear(1024 * 6 * 6, 1024),
                                      nn.ReLU(True), nn.Dropout(0.1),
                                      nn.Linear(1024, 512))

        if sm_type == 'softmax':
            self.out_proj = Softmax(input_features=512,
                                    output_features=num_classes)
        elif sm_type == 'am_softmax':
            self.out_proj = AMSoftmax(input_features=512,
                                      output_features=num_classes)
        else:
            raise NotImplementedError
Пример #9
0
    def __init__(self, vgg_name, sm_type='softmax'):
        super(VGG, self).__init__()
        self.features = self._make_layers(cfg[vgg_name])
        self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
        self.lin_proj = nn.Sequential(nn.Linear(512 * 7 * 7, 4096),
                                      nn.ReLU(True), nn.Dropout(),
                                      nn.Linear(4096, 512))

        if sm_type == 'softmax':
            self.out_proj = Softmax(input_features=512, output_features=600)
        elif sm_type == 'am_softmax':
            self.out_proj = AMSoftmax(input_features=512, output_features=600)
        else:
            raise NotImplementedError
Пример #10
0
    def __init__(self, vgg_name, sm_type='softmax', n_classes=1000):
        super(VGG, self).__init__()

        self.n_classes = n_classes

        self.features = self._make_layers(cfg[vgg_name])
        self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
        self.lin_proj = nn.Sequential(nn.Linear(512 * 7 * 7, 128))

        if sm_type == 'softmax':
            self.out_proj = Softmax(input_features=512 * 7 * 7,
                                    output_features=self.n_classes)
        elif sm_type == 'am_softmax':
            self.out_proj = AMSoftmax(input_features=512 * 7 * 7,
                                      output_features=self.n_classes)
        else:
            raise NotImplementedError
Пример #11
0
	def __init__(self, vgg_name, nh=1, n_h=512, emb_size=128, dropout_prob=0.25, sm_type='softmax', n_classes=1000, r_proj_size=0):
		super(VGG, self).__init__()

		self.dropout_prob = dropout_prob
		self.n_hidden = nh
		self.hidden_size = n_h
		self.sm_type = sm_type
		self.n_classes = n_classes
		self.emb_size = emb_size
		self.r_proj_size = r_proj_size

		self.features = self._make_layers(cfg[vgg_name])
		self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
		self.lin_proj = nn.Sequential(nn.Linear(512 * 7 * 7, self.emb_size))

		if sm_type=='softmax':
			self.out_proj=Softmax(input_features=self.emb_size, output_features=self.n_classes)
		elif sm_type=='am_softmax':
			self.out_proj=AMSoftmax(input_features=self.emb_size, output_features=self.n_classes)
		else:
			raise NotImplementedError

		self.bin_classifier = self.make_bin_layers(n_in=2*self.emb_size, n_h_layers=nh, h_size=n_h, dropout_p=dropout_prob)