コード例 #1
0
ファイル: network.py プロジェクト: jmnie/Video-Caption-CV-NLP
    def hybrid_forward(self, F, x):
        residual = x
        x = self.bn1(x)
        x = F.Activation(x, act_type='relu')
        if self.downsample:
            residual = self.downsample(x)
        x = self.conv1(x)

        x = self.bn2(x)
        x = F.Activation(x, act_type='relu')
        x = self.conv2(x)

        return x + residual
コード例 #2
0
ファイル: GRU.py プロジェクト: kyocen/Mxnet_Tutorial
    def GRU_Cell(input, state):
        for x in input:
            z_t = nd.Activation(nd.FullyConnected(data=x,weight=wxz,no_bias=True,num_hidden=num_hidden)+
                                nd.FullyConnected(data=state,weight=whz,no_bias=True,num_hidden=num_hidden)+bz,act_type="sigmoid")
            r_t = nd.Activation(nd.FullyConnected(data=x,weight=wxr,no_bias=True,num_hidden=num_hidden)+
                                nd.FullyConnected(data=state,weight=whr,no_bias=True,num_hidden=num_hidden)+br,act_type="sigmoid")
            g_t = nd.Activation(nd.FullyConnected(data=x,weight=wxh,no_bias=True,num_hidden=num_hidden)+
                                nd.FullyConnected(data=r_t*state,weight=whh,no_bias=True,num_hidden=num_hidden)+bh,act_type="tanh")

            state = nd.multiply(z_t,state) + nd.multiply(1-z_t,g_t)

        output = nd.FullyConnected(data=state, weight=why, bias=by, num_hidden=num_outputs)
        output = nd.softmax(data=output)
        return output, state
コード例 #3
0
    def network(X,dropout=0.0):
        
        #encoder
        H1 = nd.Activation(nd.FullyConnected(data=X , weight=W1 , bias=B1 , num_hidden=num_hidden1), act_type="sigmoid")
        H1 = nd.Dropout(data=H1 , p=dropout) # apply dropout layer!!!
        H2 = nd.Activation(nd.FullyConnected(data=H1 , weight=W2 , bias=B2 , num_hidden=num_hidden2), act_type="sigmoid")
        H2 = nd.Dropout(data=H2 , p=dropout) # apply dropout layer!!!

        #decoder
        H3 = nd.Activation(nd.FullyConnected(data=H2 , weight=W3 , bias=B3 , num_hidden=num_hidden1_), act_type="sigmoid")
        H3 = nd.Dropout(data=H3 , p=dropout) # apply dropout layer!!!
        H4 = nd.Activation(nd.FullyConnected(data=H3 , weight=W4 , bias=B4 , num_hidden=num_hidden2_), act_type="sigmoid")
        H4 = nd.Dropout(data=H4 , p=dropout) # apply dropout layer!!!
        H5 = nd.Activation(nd.FullyConnected(data=H4 , weight=W5 , bias=B5 , num_hidden=num_outputs), act_type="sigmoid")
        out = H5
        return out
コード例 #4
0
    def forward(self, x):
        inp = x.shape[1]
        oup = self.width_opt[self.idx]

        x = nd.Convolution(x, weight=self.conv_weight.data()[:oup,:inp,:,:], kernel=(3, 3), stride=self.stride, pad=(1, 1), num_filter=oup, no_bias=True)
        x = nd.BatchNorm(x, self.gamma[self.idx].data(), self.beta[self.idx].data(), self.moving_mean[self.idx].data(), self.moving_var[self.idx].data())
        x = nd.Activation(x, act_type='relu')

        return x
コード例 #5
0
ファイル: network.py プロジェクト: jmnie/Video-Caption-CV-NLP
    def hybrid_forward(self, F, x):
        residual = x

        x = self.body(x)

        if self.downsample:
            residual = self.downsample(residual)

        x = F.Activation(x + residual, act_type='relu')
        return x
コード例 #6
0
    def LSTM_Cell(input, h_state, c_state):
        for x in input:
            f_t = nd.Activation(nd.FullyConnected(
                data=x, weight=wxhf, no_bias=True, num_hidden=num_hidden) +
                                nd.FullyConnected(data=h_state,
                                                  weight=whhf,
                                                  no_bias=True,
                                                  num_hidden=num_hidden) + bhf,
                                act_type="sigmoid")
            i_t = nd.Activation(nd.FullyConnected(
                data=x, weight=wxhi, no_bias=True, num_hidden=num_hidden) +
                                nd.FullyConnected(data=h_state,
                                                  weight=whhi,
                                                  no_bias=True,
                                                  num_hidden=num_hidden) + bhi,
                                act_type="sigmoid")
            o_t = nd.Activation(nd.FullyConnected(
                data=x, weight=wxho, no_bias=True, num_hidden=num_hidden) +
                                nd.FullyConnected(data=h_state,
                                                  weight=whho,
                                                  no_bias=True,
                                                  num_hidden=num_hidden) + bho,
                                act_type="sigmoid")
            g_t = nd.Activation(nd.FullyConnected(
                data=x, weight=wxhg, no_bias=True, num_hidden=num_hidden) +
                                nd.FullyConnected(data=h_state,
                                                  weight=whhg,
                                                  no_bias=True,
                                                  num_hidden=num_hidden) + bhg,
                                act_type="tanh")
            c_state = nd.multiply(f_t, c_state) + nd.multiply(i_t, g_t)
            h_state = nd.multiply(o_t, nd.tanh(c_state))

        output = nd.FullyConnected(data=h_state,
                                   weight=why,
                                   bias=by,
                                   num_hidden=num_outputs)
        output = nd.softmax(data=output)
        return output, h_state, c_state
コード例 #7
0
    def network(X,drop_rate=0.0): # formula : output_size=((input−weights+2*Padding)/Stride)+1
        #data size 
        # MNIST,FashionMNIST = (batch size , 1 , 28 ,  28)
        # CIFAR = (batch size , 3 , 32 ,  32)

        C_H1=nd.Activation(data= nd.Convolution(data=X , weight = W1 , bias = B1 , kernel=(3,3) , stride=(1,1)  , num_filter=60) , act_type="relu") # MNIST : result = ( batch size , 60 , 26 , 26) , CIFAR10 : : result = ( batch size , 60 , 30 , 30) 
        P_H1=nd.Pooling(data = C_H1 , pool_type = "max" , kernel=(2,2), stride = (2,2)) # MNIST : result = (batch size , 60 , 13 , 13) , CIFAR10 : result = (batch size , 60 , 15 , 15)
        C_H2=nd.Activation(data= nd.Convolution(data=P_H1 , weight = W2 , bias = B2 , kernel=(6,6) , stride=(1,1) , num_filter=30), act_type="relu") # MNIST :  result = ( batch size , 30 , 8 , 8), CIFAR10 :  result = ( batch size , 30 , 10 , 10)
        P_H2=nd.Pooling(data = C_H2 , pool_type = "max" , kernel=(2,2), stride = (2,2)) # MNIST : result = (batch size , 30 , 4 , 4) , CIFAR10 : result = (batch size , 30 , 5 , 5)
        P_H2 = nd.flatten(data=P_H2)

        '''FullyConnected parameter
        • data: (batch_size, input_dim)
        • weight: (num_hidden, input_dim)
        • bias: (num_hidden,)
        • out: (batch_size, num_hidden)
        '''
        F_H1 =nd.Activation(nd.FullyConnected(data=P_H2 , weight=W3 , bias=B3 , num_hidden=120),act_type="sigmoid")
        F_H1 =nd.Dropout(data=F_H1, p=drop_rate)
        F_H2 =nd.Activation(nd.FullyConnected(data=F_H1 , weight=W4 , bias=B4 , num_hidden=64),act_type="sigmoid")
        F_H2 =nd.Dropout(data=F_H2, p=drop_rate)
        softmax_Y = nd.softmax(nd.FullyConnected(data=F_H2 ,weight=W5 , bias=B5 , num_hidden=10))
        return softmax_Y
コード例 #8
0
ファイル: net.py プロジェクト: WalterMa/StyleTransfer
    def forward(self, X):
        h = F.Activation(self.conv1_1(X), act_type='relu')
        h = F.Activation(self.conv1_2(h), act_type='relu')
        relu1_2 = h
        h = F.Pooling(h, pool_type='max', kernel=(2, 2), stride=(2, 2))

        h = F.Activation(self.conv2_1(h), act_type='relu')
        h = F.Activation(self.conv2_2(h), act_type='relu')
        relu2_2 = h
        h = F.Pooling(h, pool_type='max', kernel=(2, 2), stride=(2, 2))

        h = F.Activation(self.conv3_1(h), act_type='relu')
        h = F.Activation(self.conv3_2(h), act_type='relu')
        h = F.Activation(self.conv3_3(h), act_type='relu')
        relu3_3 = h
        h = F.Pooling(h, pool_type='max', kernel=(2, 2), stride=(2, 2))

        h = F.Activation(self.conv4_1(h), act_type='relu')
        h = F.Activation(self.conv4_2(h), act_type='relu')
        h = F.Activation(self.conv4_3(h), act_type='relu')
        relu4_3 = h

        return [relu1_2, relu2_2, relu3_3, relu4_3]
コード例 #9
0
    def margin_loss(self, pick_fc):
        args = self.args
        import math

        m = args.margin_m
        s = args.margin_s
        assert s > 0.0
        #assert m >= 0.1
        assert m < (math.pi / 2)

        # cos_t * s
        cos_t = pick_fc / s
        cos_m = math.cos(m)
        sin_m = math.sin(m)
        mm = math.sin(math.pi - m) * m  #sin(pi-m)*m=sin(m)*m
        # threadhold = 0.0
        threshold = math.cos(math.pi - m)  # threshold < -cos(m)
        if args.easy_margin:
            cond = nd.Activation(data=cos_t, act_type='relu')
        else:
            cond_v = cos_t - threshold
            cond = nd.Activation(data=cond_v, act_type='relu')
        body = cos_t * cos_t
        body = 1.0 - body
        sin_t = nd.sqrt(body)  #mx.sym.sqrt(body)
        new_zy = cos_t * cos_m  # cos(t+m) = c*c - s*s
        b = sin_t * sin_m
        new_zy = new_zy - b
        new_zy = new_zy * s
        if args.easy_margin:
            zy_keep = pick_fc
        else:
            zy_keep = pick_fc - s * mm  # zy-s*sin(m)*m = s*cos(t) - s*m*sin(m)
        new_zy = nd.where(
            cond, new_zy, zy_keep
        )  # cond < 0, zy_keep= s*cos(theta) or s*cos(theta)-s*m*sin(m)
        return new_zy
コード例 #10
0
ファイル: super_resolution.py プロジェクト: mview/mxnet
 def forward(self, x):
     x = F.Activation(self.conv1(x), act_type='relu')
     x = F.Activation(self.conv2(x), act_type='relu')
     x = F.Activation(self.conv3(x), act_type='relu')
     return _rearrange(self.conv4(x), F, self.upscale_factor)
コード例 #11
0
 def forward(self, input, training=True):
     if self.activation != 'linear':
         if training:
             if self.use_bias:
                 return F.Activation(
                     F.FullyConnected(input,
                                      self.w_mu.data() +
                                      self.w_sigma.data() * self.w_epsilon,
                                      self.b_mu.data() +
                                      self.b_sigma.data() * self.b_epsilon,
                                      num_hidden=self.units),
                     self.activation)
             else:
                 return F.Activation(
                     F.FullyConnected(input,
                                      self.w_mu.data() +
                                      self.w_sigma.data() * self.w_epsilon,
                                      no_bias=True,
                                      num_hidden=self.units),
                     self.activation)
         else:
             if self.use_bias:
                 return F.Activation(
                     F.FullyConnected(input,
                                      self.w_mu.data(),
                                      self.b_mu.data(),
                                      num_hidden=self.units),
                     self.activation)
             else:
                 return F.Activation(
                     F.FullyConnected(input,
                                      self.w_mu.data(),
                                      no_bias=True,
                                      num_hidden=self.units),
                     self.activation)
     else:
         if training:
             if self.use_bias:
                 return F.FullyConnected(
                     input,
                     self.w_mu.data() +
                     self.w_sigma.data() * self.w_epsilon,
                     self.b_mu.data() +
                     self.b_sigma.data() * self.b_epsilon,
                     num_hidden=self.units)
             else:
                 return F.FullyConnected(
                     input,
                     self.w_mu.data() +
                     self.w_sigma.data() * self.w_epsilon,
                     no_bias=True,
                     num_hidden=self.units)
         else:
             if self.use_bias:
                 return F.FullyConnected(input,
                                         self.w_mu.data(),
                                         self.b_mu.data(),
                                         num_hidden=self.units)
             else:
                 return F.FullyConnected(input,
                                         self.w_mu.data(),
                                         no_bias=True,
                                         num_hidden=self.units)
コード例 #12
0
ファイル: model.py プロジェクト: kyocen/Mxnet_Tutorial
    def network(
        X,
        drop_rate=0.0
    ):  # formula : output_size=((input−weights+2*Padding)/Stride)+1
        #data size
        # MNIST,FashionMNIST = (batch size , 1 , 28 ,  28)
        # CIFAR = (batch size , 3 , 32 ,  32)

        # builtin The BatchNorm function moving_mean, moving_var does not work.
        C_H1 = nd.Activation(
            data=nd.BatchNorm(data=nd.Convolution(data=X,
                                                  weight=W1,
                                                  bias=B1,
                                                  kernel=(3, 3),
                                                  stride=(1, 1),
                                                  num_filter=60),
                              gamma=gamma1,
                              beta=beta1,
                              moving_mean=ma1,
                              moving_var=mv1,
                              momentum=0.9,
                              fix_gamma=False,
                              use_global_stats=True),
            act_type="relu"
        )  # MNIST : result = ( batch size , 60 , 26 , 26) , CIFAR10 : : result = ( batch size , 60 , 30 , 30)
        P_H1 = nd.Pooling(
            data=C_H1, pool_type="avg", kernel=(2, 2), stride=(2, 2)
        )  # MNIST : result = (batch size , 60 , 13 , 13) , CIFAR10 : result = (batch size , 60 , 15 , 15)
        C_H2 = nd.Activation(
            data=nd.BatchNorm(data=nd.Convolution(data=P_H1,
                                                  weight=W2,
                                                  bias=B2,
                                                  kernel=(6, 6),
                                                  stride=(1, 1),
                                                  num_filter=30),
                              gamma=gamma2,
                              beta=beta2,
                              moving_mean=ma2,
                              moving_var=mv2,
                              momentum=0.9,
                              fix_gamma=False,
                              use_global_stats=True),
            act_type="relu"
        )  # MNIST :  result = ( batch size , 30 , 8 , 8), CIFAR10 :  result = ( batch size , 30 , 10 , 10)
        P_H2 = nd.Pooling(
            data=C_H2, pool_type="avg", kernel=(2, 2), stride=(2, 2)
        )  # MNIST : result = (batch size , 30 , 4 , 4) , CIFAR10 : result = (batch size , 30 , 5 , 5)
        P_H2 = nd.flatten(data=P_H2)
        '''FullyConnected parameter
        • data: (batch_size, input_dim)
        • weight: (num_hidden, input_dim)
        • bias: (num_hidden,)
        • out: (batch_size, num_hidden)
        '''
        F_H1 = nd.Activation(nd.BatchNorm(data=nd.FullyConnected(
            data=P_H2, weight=W3, bias=B3, num_hidden=120),
                                          gamma=gamma3,
                                          beta=beta3,
                                          moving_mean=ma3,
                                          moving_var=mv3,
                                          momentum=0.9,
                                          fix_gamma=False,
                                          use_global_stats=True),
                             act_type="relu")
        F_H1 = nd.Dropout(data=F_H1, p=drop_rate)
        F_H2 = nd.Activation(nd.BatchNorm(data=nd.FullyConnected(
            data=F_H1, weight=W4, bias=B4, num_hidden=64),
                                          gamma=gamma4,
                                          beta=beta4,
                                          moving_mean=ma4,
                                          moving_var=mv4,
                                          momentum=0.9,
                                          fix_gamma=False,
                                          use_global_stats=True),
                             act_type="relu")
        F_H2 = nd.Dropout(data=F_H2, p=drop_rate)
        #softmax_Y = nd.softmax(nd.FullyConnected(data=F_H2 ,weight=W5 , bias=B5 , num_hidden=10))
        out = nd.FullyConnected(data=F_H2, weight=W5, bias=B5, num_hidden=10)
        return out
コード例 #13
0
 def decode(self, z):
     h3 = nd.Activation(self.fc3(z), 'relu')
     return nd.Activation(self.fc4(h3), 'sigmoid')
コード例 #14
0
 def encode(self, x):
     h1 = nd.Activation(self.fc1(x), 'relu')
     return self.fc21(h1), self.fc22(h1)
コード例 #15
0
import numpy as np
コード例 #16
0
ファイル: model.py プロジェクト: kyocen/Mxnet_Tutorial
import mxnet as mx