예제 #1
0
 def __init__(self):
     super(Net, self).__init__()
     self.conv1 = Conv2d(1, 10, kernel_size=5)
     self.conv2 = Conv2d(10, 20, kernel_size=5)
     self.conv2_drop = Dropout2d()
     self.fc1 = Linear(320, 50)
     self.fc2 = Linear(50, 10)
예제 #2
0
    def __init__(self,
                 state_space,
                 channels,
                 action_space,
                 epsilon=0.99,
                 epsilon_min=0.01,
                 epsilon_decay=0.99,
                 gamma=0.9,
                 learning_rate=0.01):
        super(Agent, self).__init__()
        self.action_space = action_space
        self.state_space = state_space
        self.channels = channels
        self.learning_rate = learning_rate
        self.epsilon = epsilon
        self.epsilon_min = epsilon_min
        self.epsilon_decay = epsilon_decay
        self.gamma = gamma

        self.conv1 = Conv2d(self.channels, 32, 8)
        self.conv2 = Conv2d(32, 64, 4)
        self.conv3 = Conv2d(64, 128, 3)
        self.fc1 = Linear(128 * 52 * 52, 64)
        self.fc2 = Linear(64, 32)
        self.output = Linear(32, action_space)

        self.loss_fn = MSELoss()
        self.optimizer = Adam(self.parameters(), lr=self.learning_rate)
예제 #3
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=3,
              dilation=1,
              stride=1,
              padding=1,
              activation='PReLU',
              bias=False,
              asymmetric=False,
              dropout_prob=0):
     super(RegularBottleNeck, self).__init__()
     internal_channels = in_channels // 4
     self.conv_down = Sequential(
         Conv2d(in_channels,
                internal_channels,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=bias), BatchNorm2d(internal_channels),
         PReLU() if activation == 'PReLU' else ReLU())
     if asymmetric is False:
         self.conv_main = Sequential(
             Conv2d(internal_channels,
                    internal_channels,
                    kernel_size=kernel_size,
                    dilation=dilation,
                    stride=stride,
                    padding=padding,
                    bias=bias), BatchNorm2d(internal_channels),
             PReLU() if activation == 'PReLU' else ReLU())
     else:
         self.conv_main = Sequential(
             Conv2d(internal_channels,
                    internal_channels,
                    kernel_size=(kernel_size, 1),
                    dilation=dilation,
                    stride=stride,
                    padding=(padding, 0),
                    bias=bias), BatchNorm2d(internal_channels),
             PReLU() if activation == 'PReLU' else ReLU(),
             Conv2d(internal_channels,
                    internal_channels,
                    kernel_size=(1, kernel_size),
                    dilation=dilation,
                    stride=stride,
                    padding=(0, padding),
                    bias=bias), BatchNorm2d(internal_channels),
             PReLU() if activation == 'PReLU' else ReLU())
     self.conv_up = Sequential(
         Conv2d(internal_channels,
                out_channels,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=bias), BatchNorm2d(out_channels),
         PReLU() if activation == 'PReLU' else ReLU())
     self.regularizer = Dropout2d(p=dropout_prob)
     self.out_activation = PReLU() if activation == 'PReLU' else ReLU()
예제 #4
0
 def __init__(self, in_channels, out_channels, ksize=3, stride=2, padding=1, bias=True):
     super(dsc,self).__init__()
     self.conv1 = Conv2d(in_channels=in_channels, out_channels=in_channels,
                         kernel_size=ksize,stride=stride,padding=padding,bias=bias,groups=in_channels)
     # self.bn1 = BatchNorm2d(in_channels)
     self.conv2 = Conv2d(
         in_channels=in_channels,out_channels=out_channels,kernel_size=1,groups=1,bias=bias)
     self.bna1 = bna(out_channels)
예제 #5
0
파일: cnn.py 프로젝트: makar21/core
 def __init__(self):
     super(Net, self).__init__()
     self.conv1 = Conv2d(3, 6, 5)
     self.pool = MaxPool2d(2, 2)
     self.conv2 = Conv2d(6, 16, 5)
     self.fc1 = Linear(16 * 5 * 5, 120)
     self.fc2 = Linear(120, 84)
     self.fc3 = Linear(84, 10)
예제 #6
0
 def __init__(self, in_channels, n_classes=13, bias=True, mode='B'):
     super(DFANet, self).__init__()
     channels = {'A': 72, 'B': 48}
     ch = channels[mode]
     self.conv1 = Sequential(Conv2d(in_channels, 8, 3, 2, 1, bias=bias),
                             bna(8))
     self.enc2_1 = enc(in_channels=8, stage=2, mode=mode, bias=bias)
     self.enc3_1 = enc(in_channels=ch, stage=3, mode=mode, bias=bias)
     self.enc4_1 = enc(in_channels=ch * 2, stage=4, mode=mode, bias=bias)
     self.fca1 = fca(ch * 4, ch * 4, bias=bias)
     self.enc2_2 = enc(in_channels=ch * 5, stage=2, mode=mode, bias=bias)
     self.enc3_2 = enc(in_channels=ch * 3, stage=3, mode=mode, bias=bias)
     self.enc4_2 = enc(in_channels=ch * 6, stage=4, mode=mode, bias=bias)
     self.fca2 = fca(ch * 4, ch * 4, bias=bias)
     self.enc2_3 = enc(in_channels=ch * 5, stage=2, mode=mode, bias=bias)
     self.enc3_3 = enc(in_channels=ch * 3, stage=3, mode=mode, bias=bias)
     self.enc4_3 = enc(in_channels=ch * 6, stage=4, mode=mode, bias=bias)
     self.fca3 = fca(ch * 4, ch * 4, bias=bias)
     self.de2_1 = Sequential(Conv2d(ch, ch // 2, 1, bias=bias),
                             bna(ch // 2))
     self.de2_2 = Sequential(Conv2d(ch, ch // 2, 1, bias=bias),
                             bna(ch // 2))
     self.de2_3 = Sequential(Conv2d(ch, ch // 2, 1, bias=bias),
                             bna(ch // 2))
     self.final = Sequential(Conv2d(ch // 2, n_classes, 1, bias=bias),
                             bna(n_classes))
     self.de4_1 = Sequential(Conv2d(ch * 4, n_classes, 1, bias=bias),
                             bna(n_classes))
     self.de4_2 = Sequential(Conv2d(ch * 4, n_classes, 1, bias=bias),
                             bna(n_classes))
     self.de4_3 = Sequential(Conv2d(ch * 4, n_classes, 1, bias=bias),
                             bna(n_classes))
예제 #7
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=3,
              dilation=1,
              stride=2,
              padding=1,
              output_padding=1,
              activation='PReLU',
              bias=False,
              dropout_prob=0.1):
     super(UpsamplingBottleNeck, self).__init__()
     internal_channels = in_channels // 4
     self.conv_down = Sequential(
         Conv2d(in_channels,
                internal_channels,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=bias), BatchNorm2d(internal_channels),
         PReLU() if activation == 'PReLU' else ReLU())
     self.conv_main = Sequential(
         ConvTranspose2d(internal_channels,
                         internal_channels,
                         kernel_size=kernel_size,
                         stride=stride,
                         padding=padding,
                         output_padding=output_padding,
                         dilation=dilation,
                         bias=bias), BatchNorm2d(internal_channels),
         PReLU() if activation == 'PReLU' else ReLU())
     self.conv_up = Sequential(
         Conv2d(internal_channels,
                out_channels,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=bias), BatchNorm2d(out_channels),
         PReLU() if activation == 'PReLU' else ReLU())
     self.main_conv = Sequential(
         Conv2d(in_channels,
                out_channels,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=bias), BatchNorm2d(out_channels))
     self.mainmaxunpool = MaxUnpool2d(kernel_size=2, stride=2, padding=0)
     self.regularizer = Dropout2d(p=dropout_prob)
     self.out_activation = PReLU() if activation == 'PReLU' else ReLU()
예제 #8
0
    def __init__(
        self,
        in_shape: Tuple[int, int, int],
        outdims: int,
        bias: bool,
        init_noise: float = 1e-3,
        attention_kernel: int = 1,
        attention_layers: int = 1,
        mean_activity: Optional[Mapping[str, float]] = None,
        feature_reg_weight: float = 1.0,
        gamma_readout: Optional[
            float] = None,  # deprecated, use feature_reg_weight instead
        **kwargs: Any,
    ) -> None:
        super().__init__()
        self.in_shape = in_shape
        self.outdims = outdims
        self.feature_reg_weight = self.resolve_deprecated_gamma_readout(
            feature_reg_weight, gamma_readout)  # type: ignore[no-untyped-call]
        self.mean_activity = mean_activity
        c, w, h = in_shape
        self.features = Parameter(torch.Tensor(self.outdims, c))

        attention = Sequential()
        for i in range(attention_layers - 1):
            attention.add_module(
                f"conv{i}",
                Conv2d(c, c, attention_kernel, padding=attention_kernel > 1),
            )
            attention.add_module(
                f"norm{i}", BatchNorm2d(c))  # type: ignore[no-untyped-call]
            attention.add_module(f"nonlin{i}", ELU())
        else:
            attention.add_module(
                f"conv{attention_layers}",
                Conv2d(c,
                       outdims,
                       attention_kernel,
                       padding=attention_kernel > 1),
            )
        self.attention = attention

        self.init_noise = init_noise
        if bias:
            bias_param = Parameter(torch.Tensor(self.outdims))
            self.register_parameter("bias", bias_param)
        else:
            self.register_parameter("bias", None)
        self.initialize(mean_activity)
예제 #9
0
def mini_vgg_features():
    model = Sequential(
        # 32 x 32
        Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        ReLU(),
        Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        ReLU(),
        MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),

        # 16 x 16
        Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        ReLU(),
        Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        ReLU(),
        MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),

        # 8 x 8
        Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        ReLU(),
        Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        ReLU(),
        Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        ReLU(),
        MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),

        # 4 x 4
    )
    return model
예제 #10
0
 def __init__(self,in_channels,out_channels,stride,bias,down_sampling=False):
     super(block,self).__init__()
     if down_sampling or in_channels != out_channels[2]:
         self.shortcut = Sequential(Conv2d(in_channels,out_channels[2],1,stride,padding=0,groups=1,bias=bias),bna(out_channels[2]))
     else:
         self.shortcut = None
     self.conv1 = dsc(in_channels,out_channels[0],3,stride=1,bias=bias)
     self.conv2 = dsc(out_channels[0],out_channels[1],3,stride=1,bias=bias)
     self.conv3 = dsc(out_channels[1],out_channels[2],3,stride=stride,bias=bias)
예제 #11
0
파일: farnet.py 프로젝트: wr19960001/FarSeg
    def __init__(self, backbone="ResNet50", num_classes=7, num_feature=256, pretrained=False, ignore_index=255, **kwargs):
        super(FarNet, self).__init__()

        self.num_classes = num_classes
        self.num_feature = num_feature
        self.ignore_index = ignore_index
        self.EPS = 1e-5
        self.current_step = 0
        self.annealing_step = 2000
        self.focal_factor = 4
        self.focal_z = 1.0

        self.backbone = BACKBONES[backbone](pretrained=pretrained)

        self.conv_c6 = Conv2d(2048, num_feature, 1)
        self.conv_c5 = Conv2d(2048, num_feature, 1)
        self.conv_c4 = Conv2d(1024, num_feature, 1)
        self.conv_c3 = Conv2d(512, num_feature, 1)
        self.conv_c2 = Conv2d(256, num_feature, 1)

        self.fs5 = FSModule(num_feature, num_feature)
        self.fs4 = FSModule(num_feature, num_feature)
        self.fs3 = FSModule(num_feature, num_feature)
        self.fs2 = FSModule(num_feature, num_feature)

        self.up5 = Decoder(num_feature, 8)
        self.up4 = Decoder(num_feature, 4)
        self.up3 = Decoder(num_feature, 2)
        self.up2 = Decoder(num_feature, 1)

        self.classify = Conv2d(num_feature, num_classes, 3, padding=1)
예제 #12
0
 def __init__(self,
              in_channels,
              out_channels,
              activation='PReLU',
              bias=False):
     super(InitialBlock, self).__init__()
     self.conv = Conv2d(in_channels=in_channels,
                        out_channels=out_channels - 3,
                        kernel_size=3,
                        stride=2,
                        padding=1)
     self.maxpooling = MaxPool2d(kernel_size=2, stride=2, padding=0)
     self.bnActivate = Sequential(
         BatchNorm2d(out_channels),
         PReLU() if activation == 'PReLU' else ReLU())
예제 #13
0
def test_degenerate_conv_with_dilation(dilation1: int) -> None:
    """
    Check if a 2D convolution with a degenerate kernel size along one dimension, and a
    dilation > 1 along the same dimension, works as expected.
    :return:
    """
    input = torch.zeros((1, 1, 10, 20))
    # Kernel is degenerate across [0], but dilation is 2
    feature_channels = 2
    conv = Conv2d(1, feature_channels, kernel_size=(1, 3), dilation=(dilation1, dilation1))
    output = conv(input)
    print("Input has size {}, output has size {}".format(input.shape, output.shape))
    # Expectation is that the image is unchanged across the first dimension, even though there is a
    # dilation specified.
    assert output.shape[0] == input.shape[0]
    assert output.shape[1] == feature_channels
    assert output.shape[2] == input.shape[2]
    assert output.shape[3] < input.shape[3]
예제 #14
0
 def __init__(self,in_channels,n_classes=13,bias=True,mode='B'):
     super(DFANet,self).__init__()
     channels={'A':[48,96,192],'B':[32,64,128]}
     ch=channels[mode]
     self.conv1= Sequential(Conv2d(in_channels,8,3,2,1,bias=bias),bna(8))
     self.enc2_1 = enc(in_channels=8,stage=2,mode=mode,bias=bias)
     self.enc3_1 = enc(in_channels=ch[0], stage=3, mode=mode,bias=bias)
     self.enc4_1 = enc(in_channels=ch[1], stage=4, mode=mode, bias=bias)
     self.fca1 = fca(ch[2], ch[2], bias=bias)
     self.enc2_2 = enc(in_channels=ch[2]+ch[0], stage=2, mode=mode, bias=bias)
     self.enc3_2 = enc(in_channels=ch[0]+ch[1], stage=3, mode=mode, bias=bias)
     self.enc4_2 = enc(in_channels=ch[1]+ch[2], stage=4, mode=mode, bias=bias)
     self.fca2 = fca(ch[2], ch[2], bias=bias)
     self.enc2_3 = enc(in_channels=ch[2]+ch[0], stage=2, mode=mode, bias=bias)
     self.enc3_3 = enc(in_channels=ch[0]+ch[1], stage=3, mode=mode, bias=bias)
     self.enc4_3 = enc(in_channels=ch[1]+ch[2], stage=4, mode=mode, bias=bias)
     self.fca3 = fca(ch[2], ch[2], bias=bias)
     self.de2_1 = Sequential(Conv2d(ch[0],ch[0]//2,1,bias=bias),bna(ch[0]//2))
     self.de2_2 = Sequential(Conv2d(ch[0],ch[0]//2,1,bias=bias),bna(ch[0]//2))
     self.de2_3 = Sequential(Conv2d(ch[0],ch[0]//2,1,bias=bias),bna(ch[0]//2))
     self.final = Sequential(Conv2d(ch[0]//2,n_classes,1,bias=bias),bna(n_classes))
     self.de4_1 = Sequential(Conv2d(ch[2],n_classes,1,bias=bias),bna(n_classes))
     self.de4_2 = Sequential(Conv2d(ch[2],n_classes,1,bias=bias),bna(n_classes))
     self.de4_3 = Sequential(Conv2d(ch[2],n_classes,1,bias=bias),bna(n_classes))
예제 #15
0
    def __init__(
        self, in_channels, out_channels, kernel_size, stride=1, padding=0,
        dilation=1, groups=1, bias=False, padding_mode='zeros',
    ) -> None:
        super().__init__()
        self.is_calculated: bool = False
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size: Union[int, Tuple[int, int]] = kernel_size
        self.delta: float = 1e-3

        self.conv_layer: Conv2d = Conv2d(
            in_channels, out_channels, kernel_size, stride, padding,
            dilation, groups, bias, padding_mode,
        )

        self.freq = nn.Parameter(
            (math.pi / 2) * math.sqrt(2)
            ** (-torch.randint(0, 5, (self.out_channels, self.in_channels)))
            .type(torch.Tensor),
            requires_grad=True,
        )

        self.theta = nn.Parameter(
            (math.pi / 8) * torch.randint(0, 8,
                                          (self.out_channels, self.in_channels)).type(torch.Tensor),
            requires_grad=True,
        )

        self.sigma = nn.Parameter(math.pi / self.freq,
                                  requires_grad=True)

        self.psi = nn.Parameter(
            math.pi * torch.rand(self.out_channels, self.in_channels),
            requires_grad=True,
        )

        self.x0 = nn.Parameter(
            torch.ceil(torch.Tensor([self.kernel_size[0] / 2]))[0],
            requires_grad=True,
        )

        self.y0 = nn.Parameter(
            torch.ceil(torch.Tensor([self.kernel_size[1] / 2]))[0],
            requires_grad=True,
        )

        self.y, self.x = nn.Parameter(y), nn.Parameter(x) = (
            torch.meshgrid([
                torch.linspace(-self.x0 + 1, self.x0 + 0, self.kernel_size[0]),
                torch.linspace(-self.y0 + 1, self.y0 + 0, self.kernel_size[1]),
            ])
        )

        self.weight = nn.Parameter(
            torch.empty(self.conv_layer.weight.shape, requires_grad=True),
            requires_grad=True,
        )

        self.register_parameter("freq", self.freq)
        self.register_parameter("theta", self.theta)
        self.register_parameter("sigma", self.sigma)
        self.register_parameter("psi", self.psi)
        self.register_parameter("x_shape", self.x0)
        self.register_parameter("y_shape", self.y0)
        self.register_parameter("y_grid", self.y)
        self.register_parameter("x_grid", self.x)
        self.register_parameter("weight", self.weight)
예제 #16
0
파일: farnet.py 프로젝트: wr19960001/FarSeg
 def __init__(self, c_in, c_out, filter_size, stride=1, padding=0, **kwargs):
     super(Conv2dBN, self).__init__()
     self.conv = Conv2d(c_in, c_out, filter_size, stride=stride, padding=padding, **kwargs)
     self.bn = BatchNorm2d(c_out)
     self.relu = ReLU()
예제 #17
0
 def __init__(self, in_channels, out_channels,bias):
     super(fca,self).__init__()
     self.pool = AdaptiveAvgPool2d(1)
     self.fc= Linear(in_channels,1000,bias=bias)
     self.conv = Conv2d(1000, out_channels, 1, bias=bias)
     self.bna = bna(out_channels)
예제 #18
0
    def __init__(
        self,
        in_channels,
        out_channels,
        kernel_size,
        stride=1,
        padding=0,
        dilation=1,
        groups=1,
        bias=False,
        padding_mode="zeros",
    ):
        super().__init__()

        self.is_calculated = False

        self.conv_layer = Conv2d(
            in_channels,
            out_channels,
            kernel_size,
            stride,
            padding,
            dilation,
            groups,
            bias,
            padding_mode,
        )
        self.kernel_size = self.conv_layer.kernel_size

        # small addition to avoid division by zero
        self.delta = 1e-3

        # freq, theta, sigma are set up according to S. Meshgini,
        # A. Aghagolzadeh and H. Seyedarabi, "Face recognition using
        # Gabor filter bank, kernel principal component analysis
        # and support vector machine"
        self.freq = Parameter(
            (math.pi / 2) * math.sqrt(2)
            **(-torch.randint(0, 5,
                              (out_channels, in_channels))).type(torch.Tensor),
            requires_grad=True,
        )
        self.theta = Parameter(
            (math.pi / 8) *
            torch.randint(0, 8,
                          (out_channels, in_channels)).type(torch.Tensor),
            requires_grad=True,
        )
        self.sigma = Parameter(math.pi / self.freq, requires_grad=True)
        self.psi = Parameter(math.pi * torch.rand(out_channels, in_channels),
                             requires_grad=True)

        self.x0 = Parameter(torch.ceil(torch.Tensor([self.kernel_size[0] / 2
                                                     ]))[0],
                            requires_grad=False)
        self.y0 = Parameter(torch.ceil(torch.Tensor([self.kernel_size[1] / 2
                                                     ]))[0],
                            requires_grad=False)

        self.y, self.x = torch.meshgrid([
            torch.linspace(-self.x0 + 1, self.x0 + 0, self.kernel_size[0]),
            torch.linspace(-self.y0 + 1, self.y0 + 0, self.kernel_size[1]),
        ])
        self.y = Parameter(self.y)
        self.x = Parameter(self.x)

        self.weight = Parameter(
            torch.empty(self.conv_layer.weight.shape, requires_grad=True),
            requires_grad=True,
        )

        self.register_parameter("freq", self.freq)
        self.register_parameter("theta", self.theta)
        self.register_parameter("sigma", self.sigma)
        self.register_parameter("psi", self.psi)
        self.register_parameter("x_shape", self.x0)
        self.register_parameter("y_shape", self.y0)
        self.register_parameter("y_grid", self.y)
        self.register_parameter("x_grid", self.x)
        self.register_parameter("weight", self.weight)