Exemple #1
0
 def __init__(self, n_classes):
     super(LeNet5, self).__init__()
     self.feature_extractor = nn.Sequential(
         nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, stride=1),
         nn.Tanh(),
         nn.AvgPool2d(kernel_size=2),
         nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1),
         nn.Tanh(),
         nn.AvgPool2d(kernel_size=2),
         nn.Conv2d(in_channels=16,
                   out_channels=120,
                   kernel_size=5,
                   stride=1),
         nn.Tanh(),
     )
     self.classifier = nn.Sequential(
         nn.Linear(in_features=120, out_features=84),
         nn.Tanh(),
         nn.Linear(in_features=84, out_features=n_classes),
     )
Exemple #2
0
    def __init__(self, scale_factor):
        upsample_block_num = int(math.log(scale_factor, 2))

        super(Generator, self).__init__()
        self.block1 = nn.Sequential(nn.Conv2d(3, 64, kernel_size=9, padding=4),
                                    nn.PReLU())
        self.block2 = ResidualBlock(64)
        self.block3 = ResidualBlock(64)
        self.block4 = ResidualBlock(64)
        self.block5 = ResidualBlock(64)
        self.block6 = ResidualBlock(64)
        self.block7 = nn.Sequential(
            nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.PReLU())
        block8 = [UpsampleBLock(64, 2) for _ in range(upsample_block_num)]
        block8.append(nn.Conv2d(64, 3, kernel_size=9, padding=4))
        block8.append(nn.Tanh())
        self.block8 = nn.Sequential(*block8)
Exemple #3
0
def act_fun(act_type):
    if act_type == "relu":
        return nn.ReLU()

    if act_type == "tanh":
        return nn.Tanh()

    if act_type == "sigmoid":
        return nn.Sigmoid()

    if act_type == "leaky_relu":
        return nn.LeakyReLU(0.2)

    if act_type == "elu":
        return nn.ELU()

    if act_type == "softmax":
        return nn.LogSoftmax(dim=1)

    if act_type == "linear":
        return nn.LeakyReLU(1)
Exemple #4
0
    def __init__(
        self,
        input_size: int,
        hidden_size: int,
        num_layers: int = 1,
        nonlinearity: str = "tanh",
        bias: bool = True,
        batch_first: bool = False,
        dropout: float = 0,
        bidirectional: bool = False,
    ):

        super().__init__()

        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.nonlinearity = nonlinearity
        self.bias = bias
        self.batch_first = batch_first
        self.dropout = dropout
        self.bidirectional = bidirectional
        num_directions = 2 if bidirectional else 1
        gate_size = hidden_size
        self.drop = nn.Dropout(self.dropout)

        if self.nonlinearity == "tanh":
            self.act = nn.Tanh()
        elif self.nonlinearity == "relu":
            self.act = nn.ReLU()
        else:
            raise ValueError("Unknown nonlinearity '{}'".format(self.nonlinearity))

        for layer in range(num_layers):
            for direction in range(num_directions):

                real_hidden_size = hidden_size
                layer_input_size = (
                    input_size if layer == 0 else real_hidden_size * num_directions
                )

                # TODO: Modify after adding the stride attribute
                # w_ih = flow.nn.Parameter(flow.Tensor(gate_size, layer_input_size))
                # w_hh = flow.nn.Parameter(flow.Tensor(gate_size, real_hidden_size))
                # b_ih = flow.nn.Parameter(flow.Tensor(gate_size))
                # b_hh = flow.nn.Parameter(flow.Tensor(gate_size))

                w_ih = flow.nn.Parameter(flow.Tensor(layer_input_size, gate_size))
                w_hh = flow.nn.Parameter(flow.Tensor(real_hidden_size, gate_size))
                b_ih = flow.nn.Parameter(flow.Tensor(gate_size))
                b_hh = flow.nn.Parameter(flow.Tensor(gate_size))

                layer_params = ()

                if bias:
                    layer_params = (w_ih, w_hh, b_ih, b_hh)
                else:
                    layer_params = (w_ih, w_hh)

                suffix = "_reverse" if direction == 1 else ""
                param_names = ["weight_ih_l{}{}", "weight_hh_l{}{}"]
                if bias:
                    param_names += ["bias_ih_l{}{}", "bias_hh_l{}{}"]
                param_names = [x.format(layer, suffix) for x in param_names]

                for name, param in zip(param_names, layer_params):
                    setattr(self, name, param)

        self.reset_parameters()
Exemple #5
0
 def __init__(self, config: Callable[..., None]) -> None:
     super().__init__()
     self.dense = nn.Linear(config.hidden_size, config.hidden_size)
     self.activation = nn.Tanh()
Exemple #6
0
    def __init__(
        self,
        input_nc=3,
        output_nc=3,
        ngf=64,
        norm_layer=nn.InstanceNorm2d,
        use_dropout=False,
        n_blocks=9,
        padding_type="reflect",
    ):
        """Construct a Resnet-based generator

        Parameters:
            input_nc (int)      -- the number of channels in input images
            output_nc (int)     -- the number of channels in output images
            ngf (int)           -- the number of filters in the last conv layer
            norm_layer          -- normalization layer
            use_dropout (bool)  -- if use dropout layers
            n_blocks (int)      -- the number of ResNet blocks
            padding_type (str)  -- the name of padding layer in conv layers: reflect | replicate | zero
        """
        assert n_blocks >= 0
        super(ResnetGenerator, self).__init__()
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.InstanceNorm2d
        else:
            use_bias = norm_layer == nn.InstanceNorm2d

        model = [
            nn.ReflectionPad2d(3),
            nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
            norm_layer(ngf),
            nn.ReLU(True),
        ]

        n_downsampling = 2
        for i in range(n_downsampling):  # add downsampling layers
            mult = 2**i
            model += [
                nn.Conv2d(
                    ngf * mult,
                    ngf * mult * 2,
                    kernel_size=3,
                    stride=2,
                    padding=1,
                    bias=use_bias,
                ),
                norm_layer(ngf * mult * 2),
                nn.ReLU(True),
            ]

        mult = 2**n_downsampling
        for i in range(n_blocks):  # add ResNet blocks

            model += [
                ResnetBlock(
                    ngf * mult,
                    padding_type=padding_type,
                    norm_layer=norm_layer,
                    use_dropout=use_dropout,
                    use_bias=use_bias,
                )
            ]

        for i in range(n_downsampling):  # add upsampling layers
            mult = 2**(n_downsampling - i)
            model += [
                nn.ConvTranspose2d(
                    ngf * mult,
                    int(ngf * mult / 2),
                    kernel_size=3,
                    stride=2,
                    padding=1,
                    output_padding=1,
                    bias=use_bias,
                ),
                norm_layer(int(ngf * mult / 2)),
                nn.ReLU(True),
            ]
        model += [nn.ReflectionPad2d(3)]
        model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
        model += [nn.Tanh()]

        self.model = nn.Sequential(*model)
Exemple #7
0
 def __init__(self, hidden_size):
     super().__init__()
     self.dense = nn.Linear(hidden_size, hidden_size)
     self.activation = nn.Tanh()