예제 #1
0
파일: net.py 프로젝트: duyiqi17/PaddleRec
    def __init__(self,
                 feature_vocabulary,
                 embedding_size,
                 tower_dims=[128, 64, 32],
                 drop_prob=[0.1, 0.3, 0.3]):
        super(AITM, self).__init__()
        self.feature_vocabulary = feature_vocabulary
        self.feature_names = sorted(list(feature_vocabulary.keys()))
        self.embedding_size = embedding_size
        self.embedding_dict = nn.LayerList()
        self.__init_weight()

        self.tower_input_size = len(feature_vocabulary) * embedding_size
        self.click_tower = Tower(self.tower_input_size, tower_dims, drop_prob)
        self.conversion_tower = Tower(self.tower_input_size, tower_dims,
                                      drop_prob)
        self.attention_layer = Attention(tower_dims[-1])

        self.info_layer = nn.Sequential(nn.Linear(tower_dims[-1], 32),
                                        nn.ReLU(), nn.Dropout(drop_prob[-1]))

        self.click_layer = nn.Sequential(nn.Linear(tower_dims[-1], 1),
                                         nn.Sigmoid())
        self.conversion_layer = nn.Sequential(nn.Linear(tower_dims[-1], 1),
                                              nn.Sigmoid())
예제 #2
0
파일: dtcdscd.py 프로젝트: geoyee/PdRSCD
 def __init__(self, channel, reduction=16):
     super(SCSEBlock, self).__init__()
     self.avg_pool = nn.AdaptiveAvgPool2D(1)
     self.channel_excitation = nn.Sequential(
         nn.Conv2D(channel, int(channel//reduction), kernel_size=1, stride=1, padding=0),
         nn.ReLU(),
         nn.Conv2D(int(channel//reduction), channel, kernel_size=1, stride=1, padding=0),
         nn.Sigmoid()
     )
     self.spatial_se = nn.Sequential(
         nn.Conv2D(channel, 1, kernel_size=1, stride=1, padding=0),
         nn.Sigmoid()
     )
예제 #3
0
    def __init__(self, in_channel, mid_channel, out_channel, fuse):
        super().__init__()
        self.conv1 = nn.Conv2D(
            in_channel,
            mid_channel,
            kernel_size=1,
            bias_attr=False,
            weight_attr=ParamAttr(initializer=KaimingNormal()))
        self.conv1_bn = nn.BatchNorm2D(mid_channel)

        self.conv2 = nn.Conv2D(
            mid_channel,
            out_channel,
            kernel_size=3,
            stride=1,
            padding=1,
            bias_attr=False,
            weight_attr=ParamAttr(initializer=KaimingNormal()))
        self.conv2_bn = nn.BatchNorm2D(out_channel)
        if fuse:
            self.att_conv = nn.Sequential(
                nn.Conv2D(mid_channel * 2, 2, kernel_size=1),
                nn.Sigmoid(),
            )
        else:
            self.att_conv = None
예제 #4
0
    def __init_weight(self):
        self.num_users = self.dataset.n_users
        self.num_items = self.dataset.m_items
        self.latent_dim = self.config['latent_dim_rec']
        self.n_layers = self.config['lightGCN_n_layers']
        self.lgn = LightGCNonv(self.n_layers)
        self.embedding_user = nn.Embedding(num_embeddings=self.num_users,
                                           embedding_dim=self.latent_dim)
        self.embedding_item = nn.Embedding(num_embeddings=self.num_items,
                                           embedding_dim=self.latent_dim)
        if self.config['pretrain'] == 0:
            emb_item_weight = np.random.normal(
                0, 0.1,
                self.embedding_item.weight.numpy().shape).astype(np.float32)
            emb_user_weight = np.random.normal(
                0, 0.1,
                self.embedding_user.weight.numpy().shape).astype(np.float32)
        else:
            emb_item_weight = np.load('item_embedding.npy').astype(np.float32)
            emb_user_weight = np.load('item_embedding.npy').astype(np.float32)
        self.embedding_item.weight.set_value(emb_item_weight)
        self.embedding_user.weight.set_value(emb_user_weight)

        self.f = nn.Sigmoid()
        num_nodes = self.dataset.n_users + self.dataset.m_items
        edges = paddle.to_tensor(self.dataset.trainEdge, dtype='int64')

        self.Graph = pgl.Graph(num_nodes=num_nodes, edges=edges)
        print(f"lgn is already to go(dropout:{self.config['dropout']})")
        self.lgn.train()
예제 #5
0
 def __init__(self, in_channels, channels, se_ratio=12):
     super(SE, self).__init__()
     self.avg_pool = nn.AdaptiveAvgPool2D(1)
     self.fc = nn.Sequential(
         nn.Conv2D(in_channels, channels // se_ratio, kernel_size=1, padding=0),
         nn.BatchNorm2D(channels // se_ratio), nn.ReLU(),
         nn.Conv2D(channels // se_ratio, channels, kernel_size=1, padding=0), nn.Sigmoid())
예제 #6
0
파일: elmo.py 프로젝트: wbj0110/models
    def __init__(self, input_dim, num_layers):
        super(Highway, self).__init__()

        self._num_layers = num_layers

        self._highway_layers = []
        for i in range(num_layers):
            paramAttr = paddle.ParamAttr(
                initializer=I.Normal(mean=0.0, std=1.0 / np.sqrt(input_dim)))
            paramAttr_b = paddle.ParamAttr(initializer=I.Constant(value=-2.0))
            carry_linear = nn.Linear(input_dim,
                                     input_dim,
                                     weight_attr=paramAttr,
                                     bias_attr=paramAttr_b)
            self.add_sublayer('carry_linear_{}'.format(i), carry_linear)

            paramAttr = paddle.ParamAttr(
                initializer=I.Normal(mean=0.0, std=1.0 / np.sqrt(input_dim)))
            transform_linear = nn.Linear(input_dim,
                                         input_dim,
                                         weight_attr=paramAttr)
            self.add_sublayer('transform_linear_{}'.format(i),
                              transform_linear)

            self._highway_layers.append([carry_linear, transform_linear])

        self._relu = nn.ReLU()
        self._sigmoid = nn.Sigmoid()
예제 #7
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 reduction_ratio,
                 name=None,
                 data_format="NCHW"):
        super(SELayer, self).__init__()

        self.data_format = data_format
        self.pool2d_gap = AdaptiveAvgPool2D(1, data_format=self.data_format)

        self._num_channels = num_channels

        med_ch = int(num_channels / reduction_ratio)
        stdv = 1.0 / math.sqrt(num_channels * 1.0)
        self.squeeze = Linear(num_channels,
                              med_ch,
                              weight_attr=ParamAttr(
                                  initializer=Uniform(-stdv, stdv),
                                  name=name + "_sqz_weights"),
                              bias_attr=ParamAttr(name=name + '_sqz_offset'))
        self.relu = nn.ReLU()
        stdv = 1.0 / math.sqrt(med_ch * 1.0)
        self.excitation = Linear(
            med_ch,
            num_filters,
            weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv),
                                  name=name + "_exc_weights"),
            bias_attr=ParamAttr(name=name + '_exc_offset'))
        self.sigmoid = nn.Sigmoid()
예제 #8
0
    def __init__(self, in_channels, out_channels, size=(8, 8)):
        super(AttentionModule_stage3_cifar, self).__init__()
        self.first_residual_blocks = ResidualBlock(in_channels, out_channels)

        self.trunk_branches = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.middle_2r_blocks = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.conv1_1_blocks = nn.Sequential(
            nn.BatchNorm2D(out_channels), nn.ReLU(),
            nn.Conv2D(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=False), nn.BatchNorm2D(out_channels),
            nn.ReLU(),
            nn.Conv2D(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=False), nn.Sigmoid())

        self.last_blocks = ResidualBlock(in_channels, out_channels)
예제 #9
0
파일: model.py 프로젝트: WenjinW/PGL
    def __init__(self, args, dataset):
        super(LightGCN, self).__init__()
        self.args = args
        self.dataset = dataset
        self.num_users = self.dataset.n_users
        self.num_items = self.dataset.m_items
        num_nodes = self.dataset.n_users + self.dataset.m_items
        self.latent_dim = self.args.recdim
        self.n_layers = self.args.n_layers
        self.lightgcn = LightGCN_Layer(self.n_layers)
        #         self.lightgcn = LightGCNonv(self.n_layers)
        self.embedding_user = nn.Embedding(
            num_embeddings=self.num_users, embedding_dim=self.latent_dim)
        self.embedding_item = nn.Embedding(
            num_embeddings=self.num_items, embedding_dim=self.latent_dim)
        emb_item_weight = np.random.normal(
            0, 0.1,
            self.embedding_item.weight.numpy().shape).astype(np.float32)
        emb_user_weight = np.random.normal(
            0, 0.1,
            self.embedding_user.weight.numpy().shape).astype(np.float32)
        self.embedding_item.weight.set_value(emb_item_weight)
        self.embedding_user.weight.set_value(emb_user_weight)

        self.f = nn.Sigmoid()
        edges = paddle.to_tensor(self.dataset.trainEdge, dtype='int64')
        self.Graph = pgl.Graph(num_nodes=num_nodes, edges=edges)
        self.lightgcn.train()
예제 #10
0
 def __init__(self, channel, reduction=8):
     super(SELayer, self).__init__()
     self.avg_pool = nn.AdaptiveAvgPool2D(1)
     self.fc = nn.Sequential(nn.Linear(channel, channel // reduction),
                             nn.ReLU(),
                             nn.Linear(channel // reduction, channel),
                             nn.Sigmoid())
예제 #11
0
 def __init__(self,
              in_channels=3,
              num_classes=2,
              is_batchnorm=True,
              is_deepsup=False,
              is_CGM=False):
     super(UNet3Plus, self).__init__()
     # parameters
     self.is_deepsup = True if is_CGM else is_deepsup
     self.is_CGM = is_CGM
     # internal definition
     self.filters = [64, 128, 256, 512, 1024]
     self.cat_channels = self.filters[0]
     self.cat_blocks = 5
     self.up_channels = self.cat_channels * self.cat_blocks
     # layers
     self.encoder = Encoder(in_channels, self.filters, is_batchnorm)
     self.decoder = Decoder(self.filters, self.cat_channels,
                            self.up_channels)
     if self.is_deepsup:
         self.deepsup = DeepSup(self.up_channels, self.filters, num_classes)
         if self.is_CGM:
             self.cls = nn.Sequential(
                 nn.Dropout(p=0.5), nn.Conv2D(self.filters[4], 2, 1),
                 nn.AdaptiveMaxPool2D(1), nn.Sigmoid())
     else:
         self.outconv1 = nn.Conv2D(
             self.up_channels, num_classes, 3, padding=1)
     # initialise weights
     for sublayer in self.sublayers():
         if isinstance(sublayer, nn.Conv2D):
             kaiming_normal_init(sublayer.weight)
         elif isinstance(sublayer, (nn.BatchNorm, nn.SyncBatchNorm)):
             kaiming_normal_init(sublayer.weight)
예제 #12
0
 def __init__(self,
              num_filters=64,
              kernel_size=3,
              stride=1,
              padding=1,
              dilation=1,
              deformable_groups=8,
              extra_offset_mask=True):
     super(DCNPack, self).__init__()
     self.extra_offset_mask = extra_offset_mask
     self.deformable_groups = deformable_groups
     self.num_filters = num_filters
     if isinstance(kernel_size, int):
         self.kernel_size = [kernel_size, kernel_size]
     self.conv_offset_mask = nn.Conv2D(in_channels=self.num_filters,
                                       out_channels=self.deformable_groups *
                                       3 * self.kernel_size[0] *
                                       self.kernel_size[1],
                                       kernel_size=self.kernel_size,
                                       stride=stride,
                                       padding=padding)
     self.total_channels = self.deformable_groups * 3 * self.kernel_size[
         0] * self.kernel_size[1]
     self.split_channels = self.total_channels // 3
     self.dcn = DeformableConv_dygraph(
         num_filters=self.num_filters,
         filter_size=self.kernel_size,
         dilation=dilation,
         stride=stride,
         padding=padding,
         deformable_groups=self.deformable_groups)
     # self.dcn = DeformConv2D(in_channels=self.num_filters,out_channels=self.num_filters,kernel_size=self.kernel_size,stride=stride,padding=padding,dilation=dilation,deformable_groups=self.deformable_groups,groups=1) # to be compiled
     self.sigmoid = nn.Sigmoid()
예제 #13
0
 def __init__(self, encoding_model):
     super(UIE, self).__init__()
     self.encoder = encoding_model
     hidden_size = self.encoder.config["hidden_size"]
     self.linear_start = paddle.nn.Linear(hidden_size, 1)
     self.linear_end = paddle.nn.Linear(hidden_size, 1)
     self.sigmoid = nn.Sigmoid()
 def __init__(self, channels, reduction):
     super(SEModule, self).__init__()
     self.avg_pool = nn.AdaptiveAvgPool2D(1)
     self.fc1 = nn.Conv2D(channels, channels // reduction, kernel_size=1, padding=0, bias_attr=False)
     self.relu = nn.ReLU()
     self.fc2 = nn.Conv2D(channels // reduction, channels, kernel_size=1, padding=0, bias_attr=False)
     self.sigmoid = nn.Sigmoid()
예제 #15
0
 def __init__(self,
              dnn_units=[8, 64, 16],
              dnn_activation='sigmoid',
              weight_normalization=False,
              name=None):
     super().__init__()
     self.dnn_units = dnn_units
     self.dnn_activation = 'sigmoid'
     self.weight_normalization = weight_normalization
     self.name = name
     layer_list = []
     #bn_list = []
     for i in range(len(dnn_units) - 1):
         dnn_layer = nn.Linear(in_features=self.dnn_units[i]
                               if i != 0 else self.dnn_units[i] * 4,
                               out_features=self.dnn_units[i + 1],
                               weight_attr=self._weight_init())
         self.add_sublayer(self.name + f'linear_{i}', dnn_layer)
         layer_list.append(dnn_layer)
         #layer_list.append(copy.deepcopy(dnn_layer))
         #bn_layer = nn.BatchNorm(50)
         #self.add_sublayer(self.name + f'bn_{i}', bn_layer)
         #bn_list.append(bn_layer)
         #bn_list.append(copy.deepcopy(bn_layer))
     #self.bn_layer = nn.LayerList(bn_list)
     self.layers = nn.LayerList(layer_list)
     self.dnn = nn.Linear(self.dnn_units[-1],
                          1,
                          weight_attr=self._weight_init())
     self.activation = nn.Sigmoid()
     self.soft = nn.Softmax()
예제 #16
0
 def __init__(self, planes, r=16):
     super(SEBlock, self).__init__()
     self.squeeze = nn.AdaptiveAvgPool2D(1)
     self.excitation = nn.Sequential(nn.Linear(planes, planes // r),
                                     nn.ReLU(),
                                     nn.Linear(planes // r, planes),
                                     nn.Sigmoid())
예제 #17
0
    def __init__(self, channels_img, features_d):
        super(Discriminator, self).__init__()

        # Input : N x C x 256 x 256
        self.disc = nn.Sequential(
            nn.Conv2D(  # 128 x 128
                channels_img,
                features_d,
                kernel_size=4,
                stride=2,
                padding=1,
                weight_attr=paddle.ParamAttr(initializer=conv_initializer())),
            nn.LeakyReLU(0.2),
            self._block(features_d, features_d * 2, 4, 2, 1),  # 64 x 64 
            self._block(features_d * 2, features_d * 4, 4, 2, 1),  # 32 x 32
            self._block(features_d * 4, features_d * 8, 4, 2, 1),  # 16 x 16
            self._block(features_d * 8, features_d * 16, 4, 2, 1),  # 8 x 8
            self._block(features_d * 16, features_d * 32, 4, 2, 1),  # 4 x 4
            nn.Conv2D(
                features_d * 32,
                1,
                kernel_size=4,
                stride=2,
                padding=0,  # 1 x 1 
                weight_attr=paddle.ParamAttr(initializer=conv_initializer())),
            nn.Sigmoid(),
        )
예제 #18
0
    def __init__(self, in_channels, out_channels, size=(8, 8)):
        super(AttentionModule_stage2_cifar, self).__init__()
        self.first_residual_blocks = ResidualBlock(in_channels, out_channels)

        self.trunk_branches = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.mpool1 = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)  # 4*4

        self.middle_2r_blocks = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.interpolation1 = nn.UpsamplingBilinear2D(size=size)  # 8*8

        self.conv1_1_blocks = nn.Sequential(
            nn.BatchNorm2D(out_channels), nn.ReLU(),
            nn.Conv2D(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=False), nn.BatchNorm2D(out_channels),
            nn.ReLU(),
            nn.Conv2D(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=False), nn.Sigmoid())

        self.last_blocks = ResidualBlock(in_channels, out_channels)
예제 #19
0
    def __init__(self, num_classes=10):
        super(ImperativeLenet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2D(
                in_channels=1,
                out_channels=6,
                kernel_size=3,
                stride=1,
                padding=1,
                bias_attr=False),
            nn.BatchNorm2D(6),
            nn.ReLU(),
            nn.MaxPool2D(
                kernel_size=2, stride=2),
            nn.Conv2D(
                in_channels=6,
                out_channels=16,
                kernel_size=5,
                stride=1,
                padding=0),
            nn.BatchNorm2D(16),
            nn.PReLU(),
            nn.MaxPool2D(
                kernel_size=2, stride=2))

        self.fc = nn.Sequential(
            nn.Linear(
                in_features=400, out_features=120),
            nn.LeakyReLU(),
            nn.Linear(
                in_features=120, out_features=84),
            nn.Sigmoid(),
            nn.Linear(
                in_features=84, out_features=num_classes),
            nn.Softmax())
예제 #20
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 size1=(56, 56),
                 size2=(28, 28),
                 size3=(14, 14)):
        super(AttentionModule_stage1, self).__init__()
        self.first_residual_blocks = ResidualBlock(in_channels, out_channels)

        self.trunk_branches = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.mpool1 = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)

        self.softmax1_blocks = ResidualBlock(in_channels, out_channels)

        self.skip1_connection_residual_block = ResidualBlock(
            in_channels, out_channels)

        self.mpool2 = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)

        self.softmax2_blocks = ResidualBlock(in_channels, out_channels)

        self.skip2_connection_residual_block = ResidualBlock(
            in_channels, out_channels)

        self.mpool3 = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)

        self.softmax3_blocks = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.interpolation3 = nn.UpsamplingBilinear2D(size=size3)

        self.softmax4_blocks = ResidualBlock(in_channels, out_channels)

        self.interpolation2 = nn.UpsamplingBilinear2D(size=size2)

        self.softmax5_blocks = ResidualBlock(in_channels, out_channels)

        self.interpolation1 = nn.UpsamplingBilinear2D(size=size1)

        self.softmax6_blocks = nn.Sequential(
            nn.BatchNorm2D(out_channels), nn.ReLU(),
            nn.Conv2D(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=False), nn.BatchNorm2D(out_channels),
            nn.ReLU(),
            nn.Conv2D(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=False), nn.Sigmoid())

        self.last_blocks = ResidualBlock(in_channels, out_channels)
예제 #21
0
 def __init__(self, in_chan, out_chan, *args, **kwargs):
     super(AttentionRefinementModule, self).__init__()
     self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1)
     self.conv_atten = nn.Conv2D(out_chan,
                                 out_chan,
                                 kernel_size=1,
                                 bias_attr=False)
     self.bn_atten = nn.BatchNorm(out_chan)
     self.sigmoid_atten = nn.Sigmoid()
예제 #22
0
 def __init__(self, inplanes, reduction=16):
     super(SELayer, self).__init__()
     self.avg_pool = nn.AdaptiveAvgPool2D(1)
     self.fc = nn.Sequential(
         nn.Linear(inplanes, inplanes // reduction, bias_attr=False),
         nn.ReLU(),
         nn.Linear(inplanes // reduction, inplanes, bias_attr=False),
         nn.Sigmoid(),
     )
예제 #23
0
    def __init__(self, input_nc=6, ndf=64):
        super(NLayerDiscriminator, self).__init__()

        self.layers = nn.Sequential(
            nn.Conv2D(input_nc, ndf, kernel_size=4, stride=2, padding=1),
            nn.LeakyReLU(0.2), ConvBlock(ndf, ndf * 2),
            ConvBlock(ndf * 2, ndf * 4), ConvBlock(ndf * 4, ndf * 8, stride=1),
            nn.Conv2D(ndf * 8, 1, kernel_size=4, stride=1, padding=1),
            nn.Sigmoid())
예제 #24
0
 def __init__(self):
     super(Discriminator, self).__init__()
     self.dis == nn.Sequential(
         nn.Conv2D(1, 64, 4, 2, 1, bias_attr=False), nn.LeakyReLU(0.2),
         nn.Conv2D(64, 64 * 2, 4, 2, 1, bias_attr=False),
         nn.BatchNorm2D(64 * 2), nn.LeakyReLU(0.2),
         nn.Conv2D(64 * 2, 64 * 4, 4, 2, 1, bias_attr=False),
         nn.BatchNorm2D(64 * 4), nn.LeakyReLU(0.2),
         nn.Conv2D(64 * 4, 1, 4, 1, 0, bias_attr=False), nn.Sigmoid())
예제 #25
0
파일: dtcdscd.py 프로젝트: geoyee/PdRSCD
 def __init__(self, in_channels, reduction=16):
     super(SELayer, self).__init__()
     assert reduction >= 16
     self.avg_pool = nn.AdaptiveAvgPool2D(1)
     self.fc = nn.Sequential(
         nn.Linear(in_channels, in_channels//reduction),
         nn.ReLU(),
         nn.Linear(in_channels // reduction, in_channels),
         nn.Sigmoid()
     )
예제 #26
0
    def __init__(self, in_ch, out_ch):
        super(AttenHead, self).__init__()
        # bottleneck channels for seg and attn heads
        bot_ch = 256

        self.atten_head = nn.Sequential(
            layers.ConvBNReLU(in_ch, bot_ch, 3, padding=1, bias_attr=False),
            layers.ConvBNReLU(bot_ch, bot_ch, 3, padding=1, bias_attr=False),
            nn.Conv2D(bot_ch, out_ch, kernel_size=(1, 1), bias_attr=False),
            nn.Sigmoid())
예제 #27
0
 def __init__(self, act=None, axis=-1):
     super().__init__()
     if act is not None:
         assert act in ["softmax", "sigmoid"]
     if act == "softmax":
         self.act = nn.Softmax(axis=axis)
     elif act == "sigmoid":
         self.act = nn.Sigmoid()
     else:
         self.act = None
예제 #28
0
    def __init__(self, in_planes, ratio=16):
        super(ChannelAttention, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2D(1)
        self.max_pool = nn.AdaptiveMaxPool2D(1)

        self.fc1 = nn.Conv2D(in_planes, in_planes // 16, 1)
        self.relu1 = nn.ReLU()
        self.fc2 = nn.Conv2D(in_planes // 16, in_planes, 1)

        self.sigmoid = nn.Sigmoid()
예제 #29
0
    def __init__(self):
        super(Discriminator, self).__init__()

        self.model = nn.Sequential(
            nn.Linear(int(np.prod(img_shape)), 512),
            nn.LeakyReLU(0.2),
            nn.Linear(512, 256),
            nn.LeakyReLU(0.2),
            nn.Linear(256, 1),
            nn.Sigmoid(),
        )
예제 #30
0
    def __init__(self, act='softmax', axis=-1, reduction='mean'):
        super().__init__()

        assert act in ['softmax', 'sigmoid', None]
        self.reduction = reduction

        if act == 'softmax':
            self.act = nn.Softmax(axis=axis)
        elif act == 'sigmoid':
            self.act = nn.Sigmoid()
        else:
            self.act = None