Пример #1
0
    def add_pseudoinputs(self):

        nonlinearity = nn.Hardtanh(min_val=0.0, max_val=1.0)

        self.means = NonLinear(self.args.number_components,
                               np.prod(self.args.input_size),
                               bias=False,
                               activation=nonlinearity)

        # init pseudo-inputs
        if self.args.use_training_data_init:
            self.means.linear.weight.data = self.args.pseudoinputs_mean
        else:
            normal_init(self.means.linear, self.args.pseudoinputs_mean,
                        self.args.pseudoinputs_std)

        # create an idle input for calling pseudo-inputs
        self.idle_input = Variable(torch.eye(self.args.number_components,
                                             self.args.number_components),
                                   requires_grad=False)
        if self.args.cuda:
            self.idle_input = self.idle_input.cuda()
Пример #2
0
 def create_recursive_unit(self, word, index,
                           parent_h):  #gated recurrent unit
     #calculate hidden state of a node
     #transformed input vector of current node: x_hat = x * E
     child_xe = torch.matmul(self.E[:, index], word)
     #update gate vector: help model determine how much past information
     #from previous time steps needs to be passed along to the future.
     hard_sigmoid = nn.Hardtanh(0, 1)
     z = hard_sigmoid(
         torch.matmul(self.W_z, child_xe) +
         torch.matmul(self.U_z, parent_h) + self.b_z)
     #reset gate vector: decide how much of the past information to forget
     r = hard_sigmoid(
         torch.matmul(self.W_r, child_xe) +
         torch.matmul(self.U_r, parent_h) + self.b_r)
     #hidden state of current node
     c = torch.tanh(
         torch.matmul(self.W_h, child_xe) +
         torch.matmul(self.U_h, (parent_h * r)) + self.b_h)
     #vector which holds information for current unit and passes it down to the network
     h = (1 - z) * parent_h + z * c
     return h  #hidden state computed for current node
    def __init__(self, proj_dim, proj_act, proj_norm, input_dim):
        super(R2H, self).__init__()

        # Reading options:
        self.proj_dim    = proj_dim
        self.proj_act    = proj_act
        self.proj_norm   = proj_norm
        self.input_dim   = input_dim

        # Projection layer initialization:
        self.proj = nn.Linear(self.input_dim, self.proj_dim)
        if self.proj_act == "tanh":
            self.p_activation = nn.Tanh()
        elif self.proj_act == "hardtanh":
            self.p_activation = nn.Hardtanh()
        else:
            self.p_activation = nn.ReLU()

        self.out_act = nn.Tanh()

        # Decoding layer initialization
        self.d  = QuaternionLinearAutograd(self.proj_dim, self.input_dim)
 def __init__(self,
              input_channels,
              output_channels,
              stride=1,
              drop=0,
              batch_norm=True,
              nl="relu",
              bias=True,
              gated=False,
              smaller_kernel=False):
     super().__init__()
     if drop > 0:
         self.dropout = nn.Dropout2d(drop)
     self.deconv = nn.ConvTranspose2d(
         input_channels,
         output_channels,
         bias=bias,
         stride=stride,
         kernel_size=(2 if smaller_kernel else 4) if stride == 2 else 3,
         padding=0 if (stride == 2 and smaller_kernel) else 1)
     if batch_norm:
         self.bn = nn.BatchNorm2d(output_channels)
     if gated:
         self.gate = nn.ConvTranspose2d(
             input_channels,
             output_channels,
             bias=False,
             stride=stride,
             kernel_size=(2 if smaller_kernel else 4) if stride == 2 else 3,
             padding=0 if (stride == 2 and smaller_kernel) else 1)
         self.sigmoid = nn.Sigmoid()
     if isinstance(nl, nn.Module):
         self.nl = nl
     elif nl in ("sigmoid", "hardtanh"):
         self.nl = nn.Sigmoid() if nl == "sigmoid" else nn.Hardtanh(
             min_val=-4.5, max_val=0)
     elif not nl == "none":
         self.nl = nn.ReLU() if nl == "relu" else (
             nn.LeakyReLU() if nl == "leakyrelu" else modules.Identity())
Пример #5
0
    def __init__(self, in_dim, latent_dim, num_centers, enc_layers, dec_layers,
                 act, out_fn, rbf_beta, rec_b):
        """Constructor specs.
        
        Params:
            in_dim:         int - input space dimensions
            latent_dim:     int - latent space dimensions
            num_centers:    int - number of centers for the RBF kernel
            enc_layers:     list[int] - number of units per encoder layer
            dec_layers:     list[int] - number of units per decoder layer
            act:            torch.nn.Module - network activation functions
            out_fn:         torch.nn.Module - network output function
            rbf_beta:       float - the bandwidth of the RBF kernel
            rec_b:          float - a small constant added to the reciprocal
                            for numerically stable precision 
                            computations
        """

        super(RVAE, self).__init__()
        self.num_centers = num_centers
        self.in_dim = in_dim
        self.latent_dim = latent_dim
        self._mean_warmup = True
        self.switch = True
        self.encoder = MLP(in_dim, enc_layers, act, None)
        self.q_mu = nn.Sequential(nn.Linear(enc_layers[-1], latent_dim))
        self.q_t = nn.Sequential(nn.Linear(enc_layers[-1], 1), nn.Softplus(),
                                 nn.Hardtanh(min_val=1e-4, max_val=5.))
        self.dummy_pmu = MLP(latent_dim, dec_layers, act, in_dim, out_fn)
        self.p_mu = MLP(latent_dim, dec_layers, act, in_dim, out_fn)
        self.p_sigma = nnj.Sequential(
            nnj.RBF(self.latent_dim, num_points=num_centers, beta=rbf_beta),
            nnj.PosLinear(num_centers, self.in_dim, bias=False),
            nnj.Reciprocal(b=rec_b), nnj.Sqrt())
        self._latent_codes = None
        self.pr_means = torch.nn.Parameter(torch.zeros(latent_dim),
                                           requires_grad=True)
        self.pr_t = torch.nn.Parameter(torch.ones(1), requires_grad=True)
Пример #6
0
    def __init__(self, args, output_size, cuda):
        super(Decoder, self).__init__()
        self.hidden_size = 2 * args.hidden_dim  #32
        self.embedding_dim = args.embed_dim  #100
        self.is_stochastic = args.is_stochastic  #True
        self.max_decoding_length = args.max_decoding_length  #75
        self.cuda = cuda

        # Embedding layer
        self.embed = nn.Embedding(num_embeddings=output_size,
                                  embedding_dim=self.hidden_size)
        # LSTM cells
        self.lstm_cells = nn.ModuleList([
            CustomLSTMCell(input_size=2 * self.hidden_size,
                           hidden_size=2 * self.hidden_size),
            CustomLSTMCell(input_size=2 * self.hidden_size,
                           hidden_size=2 * self.hidden_size),
            CustomLSTMCell(input_size=2 * self.hidden_size,
                           hidden_size=self.hidden_size)
        ])
        self.locked_dropouts = nn.ModuleList([
            LockedDropout(dropout=args.locked_dropout),
            LockedDropout(dropout=args.locked_dropout),
            LockedDropout(dropout=args.locked_dropout)
        ])
        # Attention
        self.linear = nn.Linear(in_features=self.hidden_size,
                                out_features=self.hidden_size)

        # For character projection
        self.pl1 = nn.Linear(in_features=2 * self.hidden_size,
                             out_features=self.hidden_size)
        # self.activation = nn.LeakyReLU()
        self.activation = nn.Hardtanh(inplace=True)
        self.pl2 = nn.Linear(in_features=self.hidden_size,
                             out_features=output_size)
        # self.logsm = nn.LogSoftmax(dim=1)
        self.pl2.weight = self.embed.weight
Пример #7
0
 def __init__(self, num_classes=10):
     super(VGG_SMALL_1W1A, self).__init__()
     self.conv0 = nn.Conv2d(3, 128, kernel_size=3, padding=1, bias=False)
     self.bn0 = nn.BatchNorm2d(128)
     self.conv1 = BinarizeConv2d(128,
                                 128,
                                 kernel_size=3,
                                 padding=1,
                                 bias=False)
     self.pooling = nn.MaxPool2d(kernel_size=2, stride=2)
     self.bn1 = nn.BatchNorm2d(128)
     self.nonlinear = nn.Hardtanh(inplace=True)
     self.conv2 = BinarizeConv2d(128,
                                 256,
                                 kernel_size=3,
                                 padding=1,
                                 bias=False)
     self.bn2 = nn.BatchNorm2d(256)
     self.conv3 = BinarizeConv2d(256,
                                 256,
                                 kernel_size=3,
                                 padding=1,
                                 bias=False)
     self.bn3 = nn.BatchNorm2d(256)
     self.conv4 = BinarizeConv2d(256,
                                 512,
                                 kernel_size=3,
                                 padding=1,
                                 bias=False)
     self.bn4 = nn.BatchNorm2d(512)
     self.conv5 = BinarizeConv2d(512,
                                 512,
                                 kernel_size=3,
                                 padding=1,
                                 bias=False)
     self.bn5 = nn.BatchNorm2d(512)
     self.fc = nn.Linear(512 * 4 * 4, num_classes)
     self._initialize_weights()
Пример #8
0
    def __init__(self):
        super(VAE, self).__init__()

        # Encoder
        self.conv1 = nn.Conv2d(1, 8, kernel_size=3, stride=1, padding=1)
        self.conv2 = nn.Conv2d(8, 32, kernel_size=2, stride=2, padding=0)
        self.conv3 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1)
        self.conv4 = nn.Conv2d(32, 8, kernel_size=3, stride=2, padding=1)
        self.fc1 = nn.Linear(8 * 64 * 64, 512)

        # Latent space
        self.fc21 = nn.Linear(512, 32)
        self.fc22 = nn.Linear(512, 32)

        # Decoder
        self.fc3 = nn.Linear(32, 512)
        self.fc4 = nn.Linear(512, 8 * 64 * 64)
        self.deconv1 = nn.ConvTranspose2d(8,
                                          32,
                                          kernel_size=3,
                                          stride=2,
                                          padding=1,
                                          output_padding=1)
        self.deconv2 = nn.ConvTranspose2d(32,
                                          32,
                                          kernel_size=3,
                                          stride=1,
                                          padding=1)
        self.deconv3 = nn.ConvTranspose2d(32,
                                          8,
                                          kernel_size=2,
                                          stride=2,
                                          padding=0)
        self.conv5 = nn.Conv2d(8, 1, kernel_size=3, stride=1, padding=1)

        self.relu = nn.ReLU()
        #self.sigmoid = nn.Sigmoid()
        self.ht = nn.Hardtanh()
Пример #9
0
    def __init__(self, window=200, subwindow=40, dropout=0):
        super().__init__()

        locn_start = float(window / 2) - float(subwindow / 2)
        locn_end = float(window / 2) + float(subwindow / 2)

        c1 = nn.Sequential(nn.Conv1d(1, 10, 5, padding=4), nn.MaxPool1d(2),
                           nn.SELU())

        c2 = nn.Sequential(nn.Conv1d(10, 15, 3, padding=2, dilation=2),
                           nn.MaxPool1d(2), nn.SELU())

        c3 = nn.Sequential(nn.Conv1d(15, 15, 3, padding=4, dilation=4),
                           nn.MaxPool1d(2), nn.SELU())

        self.feature_extractor = nn.Sequential(c1, c2, c3)

        self.c4 = nn.Sequential(
            nn.Conv1d(15 * np.int32(window / subwindow),
                      30,
                      3,
                      padding=4,
                      dilation=4), nn.SELU())

        self.classifier = nn.Sequential(nn.Linear(150, 50), nn.SELU(),
                                        nn.AlphaDropout(dropout),
                                        nn.Linear(50, 10), nn.SELU(),
                                        nn.AlphaDropout(dropout),
                                        nn.Linear(10, 1))

        self.regressor = nn.Sequential(nn.Linear(150, 50), nn.SELU(),
                                       nn.AlphaDropout(dropout),
                                       nn.Linear(50, 10), nn.SELU(),
                                       nn.AlphaDropout(dropout),
                                       nn.Linear(10, 1),
                                       nn.Hardtanh(locn_start, locn_end))

        self._initialize_submodules()
    def __init__(self, nClasses):
        super(Net, self).__init__()

        self.nClasses = nClasses
        self.relu = nn.ReLU()
        self.hardtanh = nn.Hardtanh()
        self.drop = nn.Dropout2d(0.4)
        self.bn1 = nn.BatchNorm2d(128)
        self.conv1 = nn.Conv2d(3, 128, kernel_size=3, stride=1, padding=1)
        self.bn2 = nn.BatchNorm2d(128)
        self.conv2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)

        self.maxpool1 = nn.MaxPool2d(kernel_size=2)

        self.bn3 = nn.BatchNorm2d(128)
        self.conv3 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1)
        self.bn4 = nn.BatchNorm2d(256)
        self.conv4 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)

        self.maxpool2 = nn.MaxPool2d(kernel_size=2)

        self.bn5 = nn.BatchNorm2d(256)
        self.conv5 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1)
        self.bn6 = nn.BatchNorm2d(512)
        self.conv6 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)

        self.maxpool3 = nn.MaxPool2d(kernel_size=2)

        self.bn7 = nn.BatchNorm2d(512)
        self.conv7 = nn.Conv2d(512, 1024, kernel_size=4, stride=1)
        #self.bn8 = nn.BatchNorm2d(1024,affine=False)
        #self.conv8 = BinConv2d(1024, 1024, kernel_size = 1, stride = 1)
        self.bn8 = nn.BatchNorm2d(1024)
        self.conv8 = nn.Conv2d(1024,
                               nClasses,
                               kernel_size=1,
                               stride=1,
                               padding=0)
Пример #11
0
    def makeConv(self,
                 num_channels,
                 conv_strides,
                 conv_kernel_sizes,
                 conv_num_features,
                 normalization,
                 nonlinearity='hardtanh'):
        assert (len(conv_strides) == len(conv_kernel_sizes) ==
                len(conv_num_features))

        conv_cum_stride = 1.0
        for i in range(len(conv_strides)):
            conv_cum_stride *= conv_strides[i][0]

        self.conv_cumative_stride = conv_cum_stride
        self.conv_activation = {
            'tanh': nn.Tanh(),
            'relu': nn.ReLU(inplace=True),
            'hardtanh': nn.Hardtanh(0, 20, inplace=True),
            'leakyrelu': nn.LeakyReLU(0.1, inplace=True)
        }[nonlinearity]

        layers = []
        for nf, ks, cs, in zip(conv_num_features, conv_kernel_sizes,
                               conv_strides):
            layers.extend([
                nn.Conv2d(num_channels,
                          nf,
                          kernel_size=ks,
                          stride=cs,
                          padding=((ks[0] - 1) // 2, (ks[1] - 1) // 2)),
                Normalization(normalization, 2, nf), self.conv_activation
            ])
            num_channels = nf
        self.conv = nn.Sequential(*layers)

        # Post convolution layer with the convention data layout (allows hooking probes and heads)
        self.post_conv = Identity()
Пример #12
0
    def __init__(self):
        super(BinAlexNet, self).__init__()
        self.features = nn.Sequential(
            BinarizeConv2d(3, 64, kernel_size=5, padding=1, bias=False),  # out dim = 64*30*30
            nn.MaxPool2d(kernel_size=2, stride=2),  # out dim = 64 * 15* 15
            nn.BatchNorm2d(64),
            nn.Hardtanh(),

            BinarizeConv2d(64, 192, kernel_size=5, padding=1, bias=False),  # out dim = 192 * 13 * 13
            nn.MaxPool2d(kernel_size=2, stride=2),  # out dim = 192 * 6 * 6
            nn.BatchNorm2d(192),
            nn.Hardtanh(inplace=True),

            BinarizeConv2d(192, 384, kernel_size=3, padding=1, bias=False),  # out dim = 384 * 6 * 6
            nn.BatchNorm2d(384),
            nn.Hardtanh(),

            BinarizeConv2d(384, 256, kernel_size=3, padding=1, bias=False),  # out dim = 256 * 6 * 6
            nn.BatchNorm2d(256),
            nn.Hardtanh(),

            BinarizeConv2d(256, 256, kernel_size=3, padding=1, bias=False),  # out dim = 256 * 6 * 6
            nn.MaxPool2d(kernel_size=3, stride=2),  # out dim = 256 * 4 * 4
            nn.BatchNorm2d(256),
            nn.Hardtanh()
        )

        self.classifier = nn.Sequential(
            BinarizeLinear(256 * 2 * 2, 4096, bias=False),
            nn.BatchNorm1d(4096),
            nn.Hardtanh(),
            # nn.Dropout(0.5),

            BinarizeLinear(4096, 2048, bias=False),
            nn.BatchNorm1d(2048),
            nn.Hardtanh(),
            # nn.Dropout(0.5),

            BinarizeLinear(2048, 10)
        )
Пример #13
0
    def __init__(self,
                 h_size,
                 max_output_degree,
                 num_classes,
                 emb_module=nn.Identity,
                 output_module=None):
        super(CountStructCell, self).__init__()

        self.h_size = h_size
        self.max_output_degree = max_output_degree
        self.output_module = output_module
        self.emb_module = emb_module
        self.num_classes = num_classes

        self.bottom_parent_h = nn.Parameter(th.zeros(h_size),
                                            requires_grad=False)
        self.bottom_parent_out = nn.Parameter(th.zeros(num_classes),
                                              requires_grad=False)

        self.rec = nn.RNNCell(h_size, h_size, nonlinearity='relu')
        self.linear = nn.Linear(h_size, 1)
        self.linear2 = nn.Linear(h_size, h_size)
        self.count = nn.Hardtanh(0, max_output_degree)
Пример #14
0
    def __init__(self, n_classes, deno):
        super(Graph_Sim_Mill, self).__init__()
        
        self.num_classes = n_classes
        self.deno = deno

        num_layers = 3
        lin_size = 512
        self.linear_layers = nn.ModuleList()
        for num_layer in range(num_layers): 
            self.linear_layers.append(nn.Linear(2048, lin_size))

        self.graph_layers = nn.ModuleList()
        in_size = 2048
        for num_layer in range(num_layers): 
            self.graph_layers.append(Graph_Layer_Wrapper(in_size,lin_size))
            in_size = lin_size        

        self.last_layer = []
        self.last_layer.append(nn.Hardtanh())
        self.last_layer.append(nn.Dropout(0.5))
        self.last_layer.append(nn.Linear(2048,n_classes))
        self.last_layer = nn.Sequential(*self.last_layer)
Пример #15
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = BinarizeConv2d(inplanes,
                                 planes,
                                 kernel_size=1,
                                 bias=False)
     self.bn1 = nn.BatchNorm2d(planes)
     self.conv2 = BinarizeConv2d(planes,
                                 planes,
                                 kernel_size=3,
                                 stride=stride,
                                 padding=1,
                                 bias=False)
     self.bn2 = nn.BatchNorm2d(planes)
     self.conv3 = BinarizeConv2d(planes,
                                 planes * 4,
                                 kernel_size=1,
                                 bias=False)
     self.bn3 = nn.BatchNorm2d(planes * 4)
     # self.tanh = nn.Hardtanh(inplace=True)
     self.tanh = nn.Hardtanh()
     self.downsample = downsample
     self.stride = stride
Пример #16
0
 def __init__(self, opt):
     super(DecodersIntegralWarper2, self).__init__()
     self.imagedimension = opt.imgSize
     self.ngpu = opt.ngpu
     self.idim = opt.idim
     self.wdim = opt.wdim
     self.decoderI = waspDecoder(opt,
                                 ngpu=self.ngpu,
                                 nz=opt.idim,
                                 nc=opt.nc,
                                 ngf=opt.ngf,
                                 lb=0,
                                 ub=1)
     self.decoderW = waspDecoderTanh(opt,
                                     ngpu=self.ngpu,
                                     nz=opt.wdim,
                                     nc=2,
                                     ngf=opt.ngf,
                                     lb=0,
                                     ub=0.1)
     self.warper = waspWarper(opt)
     self.integrator = waspGridSpatialIntegral(opt)
     self.cutter = nn.Hardtanh(-1, 1)
Пример #17
0
    def __init__(self, z_dim, **kwargs):
        super(EncoderMnistConv, self).__init__()

        self.q_z_layers = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            # 12
            nn.Conv2d(32, 64, kernel_size=5, stride=2, padding=0),
            nn.ReLU(inplace=True),
            # 4
            nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=0),
            nn.ReLU(inplace=True),
            # 2
            nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1),
            nn.ReLU(inplace=True),
        )

        self.q_z_mean = nn.Sequential(
            nn.Conv2d(256, z_dim, kernel_size=3, stride=2, padding=1),
            nn.Flatten())
        self.q_z_logvar = nn.Sequential(
            nn.Conv2d(256, z_dim, kernel_size=3, stride=2, padding=1),
            nn.Hardtanh(min_val=-10., max_val=4.), nn.Flatten())
Пример #18
0
 def __init__(self, opt):
     super(Dense_DecodersIntegralWarper2_Intrinsic, self).__init__()
     self.imagedimension = opt.imgSize
     self.ngpu = opt.ngpu
     self.idim = opt.idim
     self.sdim = opt.sdim
     self.tdim = opt.tdim
     self.wdim = opt.wdim
     # Lighting Net
     self.lightNet = LightingTransfer(opt)
     # shading decoder
     self.decoderS = waspDenseDecoder(opt, ngpu=self.ngpu, nz=self.sdim, nc=1, ngf=opt.ngf, lb=0, ub=1)
     # albedo decoder
     self.decoderT = waspDenseDecoder(opt, ngpu=self.ngpu, nz=opt.tdim, nc=opt.nc, ngf=opt.ngf, lb=0, ub=1)
     # deformation decoder
     self.decoderW = waspDenseDecoder(opt, ngpu=self.ngpu, nz=opt.wdim, nc=2, ngf=opt.ngf, lb=0, ub=1, activation=nn.Tanh, args=[], f_activation=nn.Sigmoid, f_args=[])
     # shading*albedo=texture
     self.intrinsicComposer = waspIntrinsicComposer(opt)
     # deformation offset decoder
     self.warper   = waspWarper(opt)
     # spatial intergrator for deformation field
     self.integrator = waspGridSpatialIntegral(opt)
     self.cutter = nn.Hardtanh(-1,1)
Пример #19
0
 def __init__(self, with_bn=False, threshold=3):
     super(YeNet, self).__init__()
     self.with_bn = with_bn
     self.preprocessing = SRM_conv2d(1, 0)
     self.TLU = nn.Hardtanh(-threshold, threshold, True)
     if with_bn:
         self.norm1 = nn.BatchNorm2d(30)
     else:
         self.norm1 = lambda x: x
     self.block2 = ConvBlock(30, 30, 3, with_bn=self.with_bn)
     self.block3 = ConvBlock(30, 30, 3, with_bn=self.with_bn)
     self.block4 = ConvBlock(30, 30, 3, with_bn=self.with_bn)
     self.pool1 = nn.AvgPool2d(2, 2)
     self.block5 = ConvBlock(30, 32, 5, with_bn=self.with_bn)
     self.pool2 = nn.AvgPool2d(3, 2)
     self.block6 = ConvBlock(32, 32, 5, with_bn=self.with_bn)
     self.pool3 = nn.AvgPool2d(3, 2)
     self.block7 = ConvBlock(32, 32, 5, with_bn=self.with_bn)
     self.pool4 = nn.AvgPool2d(3, 2)
     self.block8 = ConvBlock(32, 16, 3, with_bn=self.with_bn)
     self.block9 = ConvBlock(16, 16, 3, 3, with_bn=self.with_bn)
     self.ip1 = nn.Linear(3 * 3 * 16, 2)
     self.reset_parameters()
Пример #20
0
    def __init__(self,
                 scale=32,
                 n_channels=3,
                 n_classes=3,
                 drop_rate=0.5,
                 threshold=0.07,
                 max_n=50):
        super(UNet, self).__init__()

        # Unet part for heatmap
        self.inc = inconv(n_channels, scale)
        self.down1 = down(scale, scale * 2)
        self.down2 = down(scale * 2, scale * 4)
        self.down3 = down(scale * 4, scale * 8)
        self.down4 = down(scale * 8, scale * 8)
        self.up1 = up(scale * 16, scale * 4, drop_rate)
        self.up2 = up(scale * 8, scale * 2, drop_rate)
        self.up3 = up(scale * 4, scale, drop_rate)
        self.up4 = up(scale * 2, scale, drop_rate)
        self.outc = outconv(scale, n_classes)
        self.sigmoid = nn.Sigmoid()
        self.thresh = threshold
        self.to_max = nn.Hardtanh(max_val=50)
Пример #21
0
    def init_pseudoinputs(self, pseudo_inputs):
        """
        Adds and initialises additional layer for pseudoinput generation
        pseudo_inputs: either random training data or None
        """

        self.pseudo_inputs = pseudo_inputs
        self.pseudo_inputs.requires_grad = False

        if pseudo_inputs is None:
            # initialise dummy inputs
            if self.is_cuda:
                self.dummy_inputs = torch.eye(self.num_pseudos).cuda()
            else:
                self.dummy_inputs = torch.eye(self.num_pseudos)
            self.dummy_inputs.requires_grad = False
            # initialise layers for learning pseudoinputs
            self.pseudo_layer = nn.Linear(self.num_pseudos, 784, bias=False)
            self.pseudo_layer.weight.data.normal_(
                -0.05, 0.01)  #default in experiment parser
            self.pseudo_nonlin = nn.Hardtanh(min_val=0.0, max_val=1.0)
        elif self.is_cuda:
            self.pseudo_inputs = self.pseudo_inputs.cuda()
Пример #22
0
	def forward(self, source,target,face,eyes):
		saliency_256 = self.saliency_pathway(target)
		saliency_output = self.last_conv(saliency_256)
		saliency_output = self.relu_saliency(saliency_output)
		saliency_output = saliency_output.view(-1,169)
		cone_parameters = self.cone_pathway(face)
		head_v = cone_parameters[:,0:3]
		variance = nn.Hardtanh(0.5, 0.99)(cone_parameters[:,3])
		R,t,sigmoid = self.transformation_path(source,target)
		projection = self.projection(eyes,head_v,R,t,variance)
		projection_simoid = torch.mul(projection,sigmoid.view(-1,1).expand_as(projection))
		
		input_sigmoid = torch.cat((saliency_output,projection_simoid),1)
		output_sigmoid_l1 = nn.ReLU()(self.sigmoid1(input_sigmoid))
		output_sigmoid_l2 = self.sigmoid2(output_sigmoid_l1)
		output_sigmoid_l2 = nn.Sigmoid()(output_sigmoid_l2)

		output = torch.mul(projection_simoid,saliency_output)
		output = self.linear_final(output)
		output = nn.Softmax()(output)
		output = output.view(-1,1,self.side,self.side)

		return output,output_sigmoid_l2
 def __init__(self, gan, samples_per_row=4, session=None):
     BaseSampler.__init__(self, gan, samples_per_row)
     self.latent1 = self.gan.latent.next()
     self.latent2 = self.gan.latent.next()
     self.velocity = 2/30.0
     direction = self.gan.latent.next()
     self.origin = direction
     self.pos = self.latent1
     self.hardtanh = nn.Hardtanh()
     g_params = self.gan.latent_parameters()
     if self.latent1.shape[1] // 2 == g_params[0].shape[1]:
         #recombine a split
         g_params = [torch.cat([p1, p2], 1) for p1, p2 in zip(g_params[:len(g_params)//2], g_params[len(g_params)//2:])]
         
     self.eigvec = torch.svd(torch.cat(g_params, 0)).V
     #self.eigvec = torch.svd(list(self.gan.g_parameters())[0]).V
     self.index = 0
     self.direction = self.eigvec[:, self.index].unsqueeze(0)
     self.direction = self.direction / torch.norm(self.direction)
     self.ones = torch.ones_like(self.direction, device="cuda:0")
     self.mask = torch.cat([torch.zeros([1, direction.shape[1]//2]), torch.ones([1, direction.shape[1]//2])], dim=1).cuda()
     self.mask = torch.ones_like(self.mask).cuda()
     self.steps = 30
Пример #24
0
def test(img_path, model_path, show=False):
    imgorg = cv2.imread(img_path)
    imgorg = cv2.cvtColor(imgorg, cv2.COLOR_BGR2RGB)
    w, h = imgorg.shape[0], imgorg.shape[1]
    img = cv2.resize(imgorg, (256, 256))
    img = img[:, :, ::-1]
    img = img.astype(float) / 255
    img = img.transpose(2, 0, 1)
    img = np.expand_dims(img, 0)
    img = torch.from_numpy(img).float()

    htan = nn.Hardtanh(0.0, 1.0)
    model = get_model('unetnc', n_classes=3, in_channels=3)
    state = convert_state_dict(torch.load(model_path)['model_state'])

    model.load_state_dict(state)
    model.eval()
    model.cuda()
    images = Variable(img.cuda())

    with torch.no_grad():
        output = model(images)
        pred = htan(output)

    pred = pred.cpu().detach().numpy()[0]

    if show:
        pred = pred.transpose((1, 2, 0))
        pred = cv2.resize(pred, (h, w), interpolation=cv2.INTER_NEAREST)
        norm = cv2.cvtColor(pred, cv2.COLOR_RGB2BGR)
        shade = cv2.cvtColor(pred, cv2.COLOR_BGR2GRAY)
        print(pred)
        _, axis = plt.subplots(1, 3)
        axis[0].imshow(imgorg)
        axis[1].imshow(norm)
        axis[2].imshow(shade, cmap='gray')
        plt.show()
Пример #25
0
    def __init__(self, args, p_samp=False):
        super().__init__(args)

        self.p_samp = p_samp

        if self.args.q_x_prior == "marginal":
            pass
        elif self.args.q_x_prior == "vampprior":
            # add z pseudo-inputs
            nonlinearity = nn.Hardtanh(min_val=0.0, max_val=1.0)

            self.means_z1 = NonLinear(self.args.number_components,
                                      self.args.z1_size,
                                      bias=False,
                                      activation=nonlinearity)
            self.means_z2 = NonLinear(self.args.number_components,
                                      self.args.z2_size,
                                      bias=False,
                                      activation=nonlinearity)

            # init pseudo-inputs
            normal_init(self.means_z1.linear, 0.05, 0.01)
            normal_init(self.means_z2.linear, 0.05, 0.01)

            # create an idle input for calling pseudo-inputs
            self.idle_input_z = Variable(torch.eye(
                self.args.number_components, self.args.number_components),
                                         requires_grad=False)
            if self.args.cuda:
                self.idle_input_z = self.idle_input_z.cuda()

        else:
            raise ValueError(
                "Unknown q_x_prior = {q_x_prior}. Expected: marginal, vampprior."
                .format(q_x_prior=self.args.q_x_prior))

        return sample_x
Пример #26
0
    def __init__(self, args):
        super(Encoder_z_prior, self).__init__()

        self.input_size = args.input_size

        self.n_z = args.z_size

        if args.dataset_name == 'freyfaces':
            h_size = 210
        elif args.dataset_name.startswith(
                'cifar10'
        ) or args.dataset_name == 'coil20' or args.dataset_name == 'svhn':
            h_size = 384
        elif args.dataset_name == 'usps':
            h_size = 96
        elif args.dataset_name == 'celeba':
            h_size = 1536
        else:
            h_size = 294

        self.main = nn.Sequential(GatedConv2d(self.input_size[0], 32, 7, 1, 3),
                                  GatedConv2d(32, 32, 3, 2, 1),
                                  GatedConv2d(32, 64, 5, 1, 2),
                                  GatedConv2d(64, 64, 3, 2, 1),
                                  GatedConv2d(64, 6, 3, 1, 1))

        # linear layers
        self.p_z_mean = NonLinear(h_size, self.n_z, activation=None)
        self.p_z_logvar = NonLinear(h_size,
                                    self.n_z,
                                    activation=nn.Hardtanh(min_val=-6.,
                                                           max_val=2.))

        # weights initialization
        for m in self.modules():
            if isinstance(m, nn.Linear):
                he_init(m)
Пример #27
0
def make_tr_dec_layers(cfg, in_channels=0, use_bn='b', use_sgm='sigmoid'):   #trainable decoder
    if in_channels < 1:
        in_channels = cfg[0]
    layers = [ nn.ReflectionPad2d((1,1,1,1)),
            nn.Conv2d(in_channels, cfg[0], kernel_size=3, padding=0),
            nn.LeakyReLU(0.2, True)]  #first layer without BN
    in_channels = cfg[0]
    i = 1
    while i < len(cfg):
        v = cfg[i]
        if use_bn == 'in':
            layers += [nn.InstanceNorm2d(in_channels, affine=True)]
        elif use_bn == 'b':
            layers += [nn.BatchNorm2d(in_channels)]
        else:
            print 'make_tr_dec: unknown bn'
        if v == 'M':
            i += 1
            v = cfg[i]
            conv2d = nn.ConvTranspose2d(in_channels, v, kernel_size=4, stride=2, padding=1, bias=(not use_bn))
            layers += [conv2d, nn.LeakyReLU(0.2, True)]
        else:
            conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=0, bias=(not use_bn))
            layers += [nn.ReflectionPad2d((1,1,1,1)), conv2d, nn.LeakyReLU(0.2, True)]
        in_channels = v
        i += 1

    layers += [nn.Conv2d(in_channels, 3, kernel_size=1, padding=0)]  #last layer, create image
    if use_sgm == 'sigmoid':  #constrained the pixel value to be 0~1
        layers += [nn.Sigmoid()]
    elif use_sgm == 'tanh':
        layers += [nn.Tanh()]
    elif use_sgm == 'hard':
        layers += [nn.Hardtanh(min_val=0)]
    elif use_sgm.lower() != 'none':
        print 'unknow last decoder layer flag:', use_sgm
    return nn.Sequential(*layers)
Пример #28
0
    def __init__(self,
                 input_size=24,
                 lin_layer_sizes=tuple([100]),
                 output_size=500,
                 lin_layer_dropouts=tuple([0.])):

        super().__init__()
        self.droput_layers = nn.ModuleList(
            [nn.Dropout(prob) for prob in lin_layer_dropouts])

        # helper to get cosine distance
        self.cosine = nn.modules.distance.CosineSimilarity()

        # input size
        self.input_size = input_size
        self.relu = nn.Hardtanh(0., 1.)

        # Linear Layers
        first_lin_layer = nn.Linear(self.input_size, lin_layer_sizes[0])

        self.lin_layers = nn.ModuleList([first_lin_layer] + [
            nn.Linear(lin_layer_sizes[i], lin_layer_sizes[i + 1])
            for i in range(len(lin_layer_sizes) - 1)
        ])

        for lin_layer in self.lin_layers:
            nn.init.kaiming_normal_(lin_layer.weight.data)

        # Output Layer
        self.output_layer = nn.Linear(lin_layer_sizes[-1], output_size)
        nn.init.kaiming_normal_(self.output_layer.weight.data)

        # Batch Norm Layers
        self.first_bn_layer = nn.BatchNorm1d(self.input_size)
        self.bn_layers = nn.ModuleList(
            [nn.BatchNorm1d(size) for size in lin_layer_sizes])
Пример #29
0
    def __init__(self, n_classes, deno):
        super(Graph_Sim_Mill, self).__init__()
        
        self.num_classes = n_classes
        self.deno = deno

        # num_layers = 2
        # in_out = [2048,512,1024]
        # print 'NUM LAYERS', num_layers, in_out

        num_layers = 1
        in_out = [512,512]
        non_lin = 'HT'
        # softmax = True

        print 'NUM LAYERS', num_layers, in_out

        self.linear_layers = nn.ModuleList()
        for num_layer in range(2*num_layers): 
            self.linear_layers.append(nn.Linear(2048, in_out[num_layer], bias = False))
            nn.init.orthogonal_(self.linear_layers[num_layer].weight)

        
        # self.graph_layers = nn.ModuleList()
        for num_layer in range(num_layers): 
            self.graph_layer = Graph_Layer_Wrapper(2048,1024, non_lin)
            

        self.last_layer = []
        if non_lin=='rl':
            self.last_layer.append(nn.ReLU())
        else:
            self.last_layer.append(nn.Hardtanh())
        self.last_layer.append(nn.Dropout(0.5))
        self.last_layer.append(nn.Linear(2048,n_classes))
        self.last_layer = nn.Sequential(*self.last_layer)
Пример #30
0
    def __init__(self,
                 in_planes,
                 planes,
                 kernel_size,
                 stride,
                 padding,
                 binarynum=1,
                 pooling=False,
                 act=None,
                 dilation=1,
                 groups=1,
                 bias=False):
        super(baseBlock, self).__init__()

        self.conv = nn.Conv2d(in_planes,
                              planes,
                              kernel_size=kernel_size,
                              stride=stride,
                              padding=padding,
                              dilation=dilation,
                              groups=groups,
                              bias=bias)

        self.bn = nn.BatchNorm2d(planes)

        if act == "relu":
            self.act = nn.ReLU(inplace=True)
        elif act == "htanh":
            self.act = nn.Hardtanh(inplace=True)
        else:
            self.act = None

        if pooling:
            self.pooling = nn.MaxPool2d(kernel_size=2, stride=2)
        else:
            self.pooling = None