def __init__(self, num_classes, pretrained="imagenet"): super().__init__() self.net = xception(pretrained=pretrained) self.avg_pool = AdaptiveConcatPool2d() self.net.last_linear = nn.Sequential(Flatten(), SEBlock(2048 * 2), nn.Dropout(), nn.Linear(2048 * 2, num_classes))
def __init__(self, num_classes, pretrained="imagenet"): super().__init__() self.net = inceptionresnetv2(pretrained=pretrained) self.net.avgpool_1a = AdaptiveConcatPool2d() self.net.last_linear = nn.Sequential(Flatten(), SEBlock(1536 * 2), nn.Dropout(), nn.Linear(1536 * 2, num_classes))
def __init__(self, num_classes, encoder='efficientnet-b0', pool_type="avg"): super().__init__() n_channels_dict = { 'efficientnet-b0': 1280, 'efficientnet-b1': 1280, 'efficientnet-b2': 1408, 'efficientnet-b3': 1536, 'efficientnet-b4': 1792, 'efficientnet-b5': 2048, 'efficientnet-b6': 2304, 'efficientnet-b7': 2560 } self.net = EfficientNet.from_pretrained(encoder) self.avg_pool = nn.AdaptiveAvgPool2d(1) if pool_type == "concat": self.net.avg_pool = AdaptiveConcatPool2d() out_shape = n_channels_dict[encoder] * 2 elif pool_type == "avg": self.net.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) out_shape = n_channels_dict[encoder] elif pool_type == "gem": self.net.avg_pool = GeM() out_shape = n_channels_dict[encoder] self.classifier = nn.Sequential(Flatten(), SEBlock(out_shape), nn.Dropout(), nn.Linear(out_shape, num_classes))
def __init__(self, in_channels=5): super(SDiscriminator, self).__init__() self.model = nn.Sequential(ResConv2d(in_channels, 64), ResConv2d(64, 128), ResConv2d(128, 256), ResConv2d(256, 512), nn.AdaptiveMaxPool2d((1, 1)), Flatten(), nn.Linear(512, 1))
def __init__(self, num_classes, encoder="se_resnext50_32x4d", pretrained="imagenet", pool_type="concat"): super().__init__() self.net = encoders[encoder]["encoder"](pretrained=pretrained) if encoder in ["resnet34", "resnet50"]: if pool_type == "concat": self.net.avgpool = AdaptiveConcatPool2d() out_shape = encoders[encoder]["out_shape"] * 2 elif pool_type == "avg": self.net.avgpool = nn.AdaptiveAvgPool2d((1, 1)) out_shape = encoders[encoder]["out_shape"] elif pool_type == "gem": self.net.avgpool = GeM() out_shape = encoders[encoder]["out_shape"] self.net.fc = nn.Sequential(Flatten(), SEBlock(out_shape), nn.Dropout(), nn.Linear(out_shape, num_classes)) elif encoder == "inceptionresnetv2": if pool_type == "concat": self.net.avgpool_1a = AdaptiveConcatPool2d() out_shape = encoders[encoder]["out_shape"] * 2 elif pool_type == "avg": self.net.avgpool_1a = nn.AdaptiveAvgPool2d((1, 1)) out_shape = encoders[encoder]["out_shape"] elif pool_type == "gem": self.net.avgpool_1a = GeM() out_shape = encoders[encoder]["out_shape"] self.net.last_linear = nn.Sequential( Flatten(), SEBlock(out_shape), nn.Dropout(), nn.Linear(out_shape, num_classes)) else: if pool_type == "concat": self.net.avg_pool = AdaptiveConcatPool2d() out_shape = encoders[encoder]["out_shape"] * 2 elif pool_type == "avg": self.net.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) out_shape = encoders[encoder]["out_shape"] elif pool_type == "gem": self.net.avg_pool = GeM() out_shape = encoders[encoder]["out_shape"] self.net.last_linear = nn.Sequential( Flatten(), SEBlock(out_shape), nn.Dropout(), nn.Linear(out_shape, num_classes))
def __init__( self, base="efficientnet-b0", pool_type="gem", in_ch=3, out_ch=1, pretrained=False, ): super(CustomEfficientNet, self).__init__() assert base in { "efficientnet-b0", "efficientnet-b1", "efficientnet-b2", "efficientnet-b3", "efficientnet-b4", } assert pool_type in {"concat", "avg", "gem"} self.base = base self.in_ch = in_ch self.out_ch = out_ch self.pretrained = pretrained if pretrained: self.net = EfficientNet.from_pretrained(base) else: self.net = EfficientNet.from_name(base) out_shape = self.net._fc.in_features if pool_type == "concat": self.net._avg_pooling = AdaptiveConcatPool2d() out_shape = out_shape * 2 elif pool_type == "gem": self.net._avg_pooling = GeM() out_shape = out_shape self.net._fc = nn.Sequential(Flatten(), SEBlock(out_shape), nn.Dropout(), nn.Linear(out_shape, out_ch)) if in_ch != 3: old_in_ch = 3 old_conv = self.net._conv_stem # Make new weight weight = old_conv.weight new_weight = torch.cat([weight] * (self.in_ch // old_in_ch), dim=1) # Make new conv new_conv = nn.Conv2d( in_channels=self.in_ch, out_channels=old_conv.out_channels, kernel_size=old_conv.kernel_size, stride=old_conv.stride, padding=old_conv.padding, bias=old_conv.bias, ) self.net._conv_stem = new_conv self.net._conv_stem.weight = nn.Parameter(new_weight)
def __init__(self, num_classes, pretrained=False, net_cls=models.densenet121): super().__init__() self.net = net_cls(pretrained=pretrained) self.avg_pool = AdaptiveConcatPool2d() self.net.classifier = nn.Sequential(Flatten(), SEBlock(1024 * 2), nn.Dropout(), nn.Linear(1024 * 2, num_classes))
def _gen_layers(self): """ x_train: ndarray of shape(n_samples, n_channels, height, width) """ #とりあえず画像サイズは正方形を想定し、input_size= heightとする self.n_train_samples, n_channels, input_size, _ = self.x_train.shape n_filters = self.conv_param['n_filters'] filter_size = self.conv_param['filter_size'] filter_stride = self.conv_param['stride'] filter_pad = self.conv_param['pad'] pool_size = self.pool_param['pool_size'] conv_output_size = get_output_size(input_size, filter_size, filter_stride, filter_pad) pool_output_size = int(n_filters * np.power(conv_output_size / pool_size, 2)) #initialize hyper parameters self.params = {} self.params['W1'] = self.weight_init_std * np.random.randn( n_filters, n_channels, filter_size, filter_size) self.params['b1'] = np.zeros(n_filters) self.params['W2'] = self.weight_init_std * np.random.randn( pool_output_size, self.layer_nodes['hidden']) self.params['b2'] = np.zeros(self.layer_nodes['hidden']) self.params['W3'] = self.weight_init_std * np.random.randn( self.layer_nodes['hidden'], self.layer_nodes['output']) self.params['b3'] = np.zeros(self.layer_nodes['output']) #generate layers self.layers = OrderedDict() self.layers['Conv1'] = Conv2D(self.params['W1'], self.params['b1'], filter_stride, filter_pad) self.layers['Relu1'] = Relu() self.layers['Pool1'] = MaxPooling2D(pool_h=pool_size, pool_w=pool_size, stride=pool_size) self.layers['Flatten1'] = Flatten() self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2']) self.layers['Relu2'] = Relu() self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3']) self.layers['Last'] = Softmax() #gradients self.grads = {}
def __init__(self, num_classes, pretrained=False, net_cls=models.resnet50): super().__init__() self.net = create_net(net_cls, pretrained=pretrained) self.net.avgpool = AdaptiveConcatPool2d() self.net.fc = nn.Sequential(Flatten(), SEBlock(2048 * 2), nn.Dropout(), nn.Linear(2048 * 2, num_classes))
def __init__(self, in_channels=5, hidden_channels=1024): super(RDiscriminator, self).__init__() self.model = nn.Sequential(ResConv2d(in_channels, 32), ResConv2d(32, 32), ResConv2d(32, 16), Flatten(), nn.Linear(hidden_channels, 1))