コード例 #1
0
    def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=6):
        super(Discriminator, self).__init__()
        self._name = 'discriminator_wgan'

        layers = []
        layers.append(
            nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1))
        layers.append(nn.LeakyRelu(0.01, inplace=True))

        curr_dim = conv_dim
        for i in range(1, repeat_num):
            layers.append(
                nn.Conv2d(curr_dim,
                          curr_dim * 2,
                          kernel_size=4,
                          stride2=2,
                          padding=1))
            layers.append(nn.LeakyRelu(0.01, inplace=True))
            curr_dim = curr_dim * 2

        k_size = int(image_size / np.power(2, repeat_num))
        self.main = nn.Sequential(*layers)  # * is to unpack a list
        self.conv1 = nn.Conv2d(curr_dim,
                               1,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        self.conv2 = nn.Conv2d(curr_dim, c_dim, kernel_size=k_size, bias=False)
コード例 #2
0
    def __init__(self, state_dim, dropout_rate):
        super(Propagator, self).__init__()

        self.reset_gate = nn.Sequential(nn.Linear(state_dim * 3, state_dim),
                                        nn.LeakyRelu(),
                                        nn.Dropout(dropout_rate))
        self.update_gate = nn.Sequential(nn.Linear(state_dim * 3, state_dim),
                                         nn.LeakyRelu(),
                                         nn.Dropout(dropout_rate))
        self.tansform = nn.Sequential(nn.Linear(state_dim * 3, state_dim),
                                      nn.Tanh())
コード例 #3
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=3,
                 stride=1,
                 padding=1,
                 bias=True,
                 norm="bnorm",
                 relu=0.0):
        super().__init__()

        layers = []

        layers += [
            nn.ConvTranspose2d(in_channels=in_channels,
                               out_channels=out_channels,
                               kernel_size=kernel_size,
                               stride=stride,
                               padding=padding,
                               bias=bias)
        ]
        if not norm is None:
            if norm == "bnorm":
                layers += [nn.BatchNorm2d(num_features=out_channels)]
            elif norm == "inorm":
                layers += [nn.InstanceNorm2d(num_features=out_channels)]

        if not relu is None:
            layers += [nn.ReLU() if relu == 0.0 else nn.LeakyRelu(relu)]

        self.cbr = nn.Sequential(*layers)
コード例 #4
0
    def __init__(self, opts):
        super(Generator_cifar, self).__init__()
        self.deconv1 = nn.TransposeConv2d(in_channels=opts.z_size,
                                          out_channels=256,
                                          kernel_size=4,
                                          stride=2,
                                          padding=0)
        self.deconv2 = nn.TransposeConv2d(in_channels=256,
                                          out_channels=128,
                                          kernel_size=5,
                                          stride=2,
                                          padding=2,
                                          output_padding=1)
        self.deconv3 = nn.TransposeConv2d(in_channels=128,
                                          out_channels=64,
                                          kernel_size=5,
                                          stride=2,
                                          padding=2,
                                          output_padding=1)
        self.deconv4 = nn.TransposeConv2d(in_channels=64,
                                          out_channels=3,
                                          kernels_size=5,
                                          stride=2,
                                          padding=2,
                                          output_padding=1)

        self.bn1 = nn.BatchNorm2d(256)
        self.bn2 = nn.BatchNorm2d(128)
        self.bn3 = nn.BatchNorm2d(64)

        self.leakyrelu = nn.LeakyRelu()
        self.tanh = nn.Tanh()
コード例 #5
0
    def __init__(self, opts):
        super(Descriptor_cifar, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=3,
                               out_channels=64,
                               kernel_size=5,
                               stride=2,
                               padding=1)
        self.conv2 = nn.Conv2d(in_channels=64,
                               out_channels=128,
                               kernel_size=3,
                               stride=2,
                               padding=1)
        self.conv3 = nn.Conv2d(in_channels=128,
                               out_channels=256,
                               kernel_size=3,
                               stride=2,
                               padding=1)

        self.bn1 = nn.BatchNorm2d(64)
        self.bn2 = nn.BatchNorm2d(128)
        self.bn3 = nn.BatchNorm2d(256)

        self.fc = nn.Linear(8 * 8 * 256, opts.z_size)
        #z_size = size of latent variables

        self.leakyrelu = nn.LeakyRelu()
コード例 #6
0
ファイル: X2NN.py プロジェクト: MLgdg/X2NN
 def __init__(self,inputsize,outputsize):
     super(x2nn,self).__init__()
     self.L1=nn.Linear(inputsize,100)
     self.L2=nn.Linear(100,100)
     self.L3=nn.Linear(100,outputsize)
     #self.sig=nn.Sigmoid()
     self.sig=nn.LeakyRelu(0.2)
     self.drop=nn.Dropout(0.8)
コード例 #7
0
ファイル: model.py プロジェクト: Ossidy/VarIsing
    def __init__(self, imsize, nc, ndf, n_extra_layers=0):
        super(InforQ, self).__init__()
        assert imsize % 16 == 0, "size has to be multiple of 16"
        self.nc = nc

        main = nn.Sequential()
        main.add_module('initial.conv.{}-{}'.format(nc, ndf),
                        nn.Conv2d(nc, ndf, 4, 2, 1, bias=False))
        main.add_module('initial.relu.{}'.format(ndf),
                        nn.LeakyReLU(0.2, inplace=True))

        csize, cndf = imsize / 2, ndf
        # Extra layers
        for t in range(n_extra_layers):
            main.add_module('extra-layers-{}.{}.conv'.format(t, cndf),
                            nn.Conv2d(cndf, cndf, 3, 1, 1, bias=False))
            # main.add_module('extra-layers-{}.{}.batchnorm'.format(t, cndf),
            # 				nn.BatchNorm2d(cndf))
            main.add_module('extra-layers-{}.{}.relu'.format(t, cndf),
                            nn.LeakyRelu(0.2, inplace=True))

        while csize > 4:
            in_feat = cndf
            out_feat = cndf * 2
            main.add_module('pyramid.{}-{}.conv'.format(in_feat, out_feat),
                            nn.Conv2d(in_feat, out_feat, 4, 2, 1, bias=False))
            # main.add_module('pyramid.{}.batchnorm'.format(out_feat),
            # 				nn.BatchNorm2d(out_feat))
            main.add_module('pyramid.{}.relu'.format(out_feat),
                            nn.LeakyReLU(0.2, inplace=True))
            cndf = cndf * 2
            csize = csize / 2

        # state size. K x 4 x 4
        self.features = main

        Reg_value = nn.Sequential()
        Reg_value.add_module('Q-dense-layer-1',
                             nn.Conv2d(cndf, 500, 4, 1, 0, bias=False))
        Reg_value.add_module('flatten-layer', Flatten())
        # Reg_value.add_module('Q-dense-layer-extra',
        # 					 nn.LeakyReLU(negative_slope=0.2))
        # Reg_value.add_module('Drop1',
        # 					 nn.Dropout())
        Reg_value.add_module('ReLu',
                             nn.LeakyReLU(negative_slope=0.01, inplace=False))
        Reg_value.add_module('Linear', nn.Linear(500, 100, bias=True))
        # Reg_value.add_module('Drop2',
        # 					 nn.Dropout())

        Reg_value.add_module('ReLu-1',
                             nn.LeakyReLU(negative_slope=0.01, inplace=False))
        Reg_value.add_module('Q-dense-layer-2', nn.Linear(100, 50, bias=True))
        Reg_value.add_module('ReLu-2',
                             nn.LeakyReLU(negative_slope=0.01, inplace=False))
        Reg_value.add_module('Q-dense-layer-3', nn.Linear(50, 1, bias=True))
        self.Reg_value = Reg_value
コード例 #8
0
    def __init__(self, n_node, edge_id_dic, type_id_dic, opt):
        super(GGNN_plus, self).__init__()

        self.n_node = n_node
        self.num_edge_types = (
            len(edge_id_dic) +
            1) * 2  # + 1 because there's unk, *2 because it's directed
        print('num of edge embeddings: ', self.num_edge_types)
        self.n_types = len(type_id_dic) + 1
        print('num of type embeddings: ', self.n_types)
        self.state_dim = opt.state_dim
        self.time_steps = opt.n_steps
        self.use_bias = opt.use_bias
        self.annotation_dim = opt.annotation_dim
        self.use_cuda = opt.cuda
        self.dropout_rate = opt.dropout_rate

        # embedding for different type of edges. To use it as matrix, view each vector as [state_dim, state_dim]
        self.edgeEmbed = nn.Embedding(self.num_edge_types,
                                      opt.state_dim * opt.state_dim,
                                      sparse=False)
        if self.use_bias:
            self.edgeBias = nn.Embedding(self.num_edge_types,
                                         opt.state_dim,
                                         sparse=False)
        self.propagator = Propagator(self.state_dim, self.dropout_rate)
        # embedding for different types (classes)
        self.typeEmbed = nn.Embedding(self.n_types,
                                      self.annotation_dim,
                                      sparse=False)

        # output
        self.attention = nn.Sequential(
            nn.Linear(self.state_dim + self.annotation_dim, 1), nn.LeakyRelu())
        self.out = nn.Sequential(
            nn.Linear(self.state_dim + self.annotation_dim, 1), nn.Tanh())
        self.result = nn.Sigmoid()
コード例 #9
0
    def __int__(self, opt, a_size, n_inputs, states=True, state_input_size=4, n_channels=3):
        super(encoder, self).__init__()
        self.opt = opt
        self.a_size = a_size
        self.n_inputs = opt.ncond if n_inputs is None else n_inputs
        self.n_channels = n_channels
        # frame encoder
        if opt.layers == 3:
            assert(opt.nfeature % 4 == 0)
            self.feature_maps = (
                opt.nfeature // 4, opt.nfeature // 2, opt.nfeature)
            self.f_encoder = nn.Sequential(
                nn.Conv2d(n_channels * self.n_inputs,
                          self.feature_[0], 4, 2, 1),
                nn.Dropout2d(p=opt.dropout, inplace=True),
                nn.LeakyReLU(0.2, inplace=True),
                nn.Conv2d(self.feature_maps[0], self.feature_maps[1], 4, 2, 1),
                nn.Dropout2d(p=opt.dropout, inplace=True),
                nn.LeakyReLU(0.2, inplace=True),
                nn.Conv2d(self.feature_maps[1], self.feature_maps[2], 4, 2, 1),
            )
        elif opt.layers == 4:
            assert(opt.nfeature % 8 == 0)
            self.feature_maps = (
                opt.nfeature // 8, opt.nfeature // 4, opt.nfeature // 2, opt.nfeature)
            self.f_encoder = nn.Sequential(
                nn.Conv2d(n_channels * self.n_inputs,
                          self.feature_maps[0], 4, 2, 1),
                nn.Dropout2d(p=opt.dropout, inplace=True),
                nn.LeakyReLU(0.2, inplace=True),
                nn.Conv2d(self.feature_maps[0], self.feature_maps[1], 4, 2, 1),
                nn.Dropout2d(p=opt.dropout, inplace=True),
                nn.LeakyReLU(0.2, inplace=True),
                nn.Conv2d(self.feature_maps[1], self.feature_maps[2], 4, 2, 1),
                nn.Dropout2d(p=opt.dropot, inplace=True),
                nn.LeakyReLU(0.2, inplace=True),
                nn.Conv2d(self.feature_maps[2], self.feature_maps[3], 4, 2, 1)
            )
        if states:
            n_hidden = self.feature_maps[-1]
            # state_encoder
            self.s_encoder = nn.Sequential(
                nn.Linear(state_input_size * self.n_inputs, n_hidden),
                nn.Dropout(p=opt.dropout, inplace=True),
                nn.LeakyReLU(0.2, inplace=True),
                nn.Linear(n_hidden, n_hidden),
                nn.Dropout(p=opt.dropout, inplace=True),
                nn.LeakyRelu(0.2, inplace=True),
                nn.Linear(n_hidden, opt.hidden_size)
            )

            if a_size > 0:
                # action or cost encoder
                n_hidden = self.feature_map[-1]
                self.a_encoder = nn.Sequential(
                    nn.Linear(a_size, n_hidden),
                    nn.Dropout(p=opt.dropout, inplace=True),
                    nn.LeakyReLU(0.2, inplace=True),
                    nn.Linear(n_hidden, n_hidden),
                    nn.Dropout(p=opt.dropout, inplace=True),
                    nn.LeakyReLU(0.2, inplace=True),
                    nn.Linear(n_hidden, opt.hidden_size)
                )
コード例 #10
0
ファイル: net.py プロジェクト: garroud/LSTM_correlation
 def __init__(self, input_shape):
     super(Net, self).__init__()
     self._input_shape = input_shape;
     self.layer1 = nn.Sequential(
         nn.Conv2d(3,64,7,2,3),
         nn.LeakyRelu(0.1)
     )
     self.layer1_2 = nn.Sequential(
         nn.Conv2d(64,114,5,2,3),
         nn.LeakyRelu(0.1)
     )
     self.norm1 = nn.Batchnormalization(_input_shape[2]*_input_shape[3]/16,
                                        eps=1e-9, affine=False )
     self.corr = CorrelationLayer1D()
     self.layer2 = nn.Sequential(
         nn.Conv2d(114,60,1,1,0),
         nn.LeakyRelu(0.1)
     )
     #input_size = shift_size + 60
     self.layer3 = nn.Sequential(
         nn.Conv2d(100,239,5,2,2)
         nn.LeakyRelu(0.1)
     )
     self.layer4 = nn.Sequential(
         nn.Conv2d(239,244,3,1,1)
         nn.LeakyRelu(0.1)
     )
     self.layer5 = nn.Sequential(
         nn.Conv2d(244,324,3,2,1)
         nn.LeakyRelu(0.1)
     )
     self.layer6 = nn.Sequential(
         nn.Conv2d(324,397,3,1,1)
         nn.LeakyRelu(0.1)
     )
     self.layer7 = nn.Sequential(
         nn.Conv2d(397,250,3,2,1)
         nn.LeakyRelu(0.1)
     )
     self.layer8 = nn.Sequential(
         nn.Conv2d(250,230,3,1,1)
         nn.LeakyRelu(0.1)
     )
     self.layer9 = nn.Sequential(
         nn.Conv2d(230,35,3,2,1)
         nn.LeakyRelu(0.1)
     )
     self.layer10 = nn.Sequential(
         nn.Conv2d(35,15,3,1,1)
         nn.LeakyRelu(0.1)
     )
     self.prdflow6 = nn.Sequential(
         nn.Conv2d(15,1,3,1,1)
         nn.Relu()
     )
     self.layer11 = nn.Sequential(
         nn.UpsamplingNearest2d(scale_factor = 2)
         nn.Conv2d(15,512,3,1,1)
         nn.LeakyRelu(0.1)
     )
     self.layer12 = nn.Sequential(
         nn.UpsamplingNearest2d(scale_factor = 2)
         nn.Conv2d(1,1,3,1,1)
     )
     #input_size = 230 + 512 + 1
     self.layer13 = nn.Conv2d(743, 512,3,1,1)
     self.layer14 = nn.Conv2d(512,1, 3,1,1)
コード例 #11
0
 def __init__(self, in_channels:int, out_channels:int, use_batchnorm_activation:bool = True, activation = nn.LeakyRelu(0.1), **kwargs):
     super().__init__()
     self.use_batchnorm_activation = use_batchnorm_activation
     self.conv = nn.Conv2d(in_channels, out_channels, bias = not use_batchnorm_activation **kwargs) # if using Batch Normalization and Activation Function, don't use Bias
     self.batchnorm = nn.BatchNorm2d(out_channels)
     self.activation = activation