示例#1
0
 def forward(self, x):
     x, trans = self.feat(x)
     x = nd.relu(self.bn1(self.fc1(x)))
     x = nd.relu(self.bn2(self.fc2(x)))
     x = self.dp(x)
     x = self.fc3(x)
     return x, trans
示例#2
0
    def forward(self, X):
        Y = nd.relu(self.bn1(self.conv1(X)))
        Y = self.bn2(self.conv2(Y))
        if self.conv3:
            X = self.conv3(X)

        return nd.relu(Y + X)
示例#3
0
文件: resnet.py 项目: z01nl1o02/tests
 def forward(self,x):
     out = self.bn1( self.conv1(x) )
     out = nd.relu(out)
     out = self.bn2( self.conv2(out) )
     if not self.same_shape:
         x = self.conv3(x)
     return nd.relu(out + x)
 def forward(self, x, use_branch1):
     root = nd.relu(self.dense1(nd.relu(self.dense0(x))))
     if use_branch1:
         out = self.dense3_1(root)
     else:
         out = self.dense3_2(root)
     return out
def net(X, verbose=False):
    X = X.reshape((batch_size, 1, 28, 28))
    out1 = nd.Convolution(data=X,
                          weight=w1,
                          bias=b1,
                          kernel=w1.shape[2:],
                          num_filter=w1.shape[0])
    out2 = nd.relu(out1)
    out3 = nd.Pooling(data=out2, pool_type="max", kernel=(2, 2), stride=(2, 2))
    out4 = nd.Convolution(data=out3,
                          weight=w2,
                          bias=b2,
                          kernel=w2.shape[2:],
                          num_filter=w2.shape[0])
    out5 = nd.relu(out4)
    out6 = nd.Pooling(data=out5, pool_type="max", kernel=(2, 2), stride=(2, 2))
    out7 = nd.dot(nd.flatten(out6), w3) + b3
    out8 = nd.relu(out7)
    out9 = nd.dot(out8, w4) + b4
    if verbose:
        print('1st conv block:', out3.shape)
        print('2nd conv block:', out5.shape)
        print('2nd conv block:', out6.shape)
        print('2nd conv block:', out7.shape)
        print('1st dense:', out8.shape)
        print('2nd dense:', out9.shape)
        # print('output:', out9)
    return out9
示例#6
0
文件: model.py 项目: tsintian/d2l-en
 def forward(self, X):
     """Forward function"""
     Y = nd.relu(self.bn1(self.conv1(X)))
     Y = self.bn2(self.conv2(Y))
     if self.conv3:
         X = self.conv3(X)
     return nd.relu(Y + X)
示例#7
0
	def forward(self, x):
		x = nd.relu(self.dense0(x))
		x = nd.relu(self.dense1(x))
		x = nd.relu(self.dense2(x))
		
		x = self.dense3(x)
		return x
示例#8
0
def net(X):
    X = X.reshape((-1, num_inputs))
    h1 = nd.relu(nd.dot(X, W1) + b1)
    h1 = dropout(h1, drop_prob1)
    h2 = nd.relu(nd.dot(h1, W2) + b2)
    h2 = dropout(h2, drop_prob2)
    return nd.dot(h2, W3) + b3
示例#9
0
def net(X, verbose=False):
    h1_conv = nd.Convolution(data=X,
                             weight=w1,
                             bias=b1,
                             kernel=w1.shape[2:],
                             num_filter=w1.shape[0])
    h1_activation = nd.relu(h1_conv)
    h1 = nd.Pooling(data=h1_activation,
                    pool_type="max",
                    kernel=(2, 2),
                    stride=(2, 2))

    h2_conv = nd.Convolution(data=h1,
                             weight=w2,
                             bias=b2,
                             kernel=w2.shape[2:],
                             num_filter=w2.shape[0])
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type="max",
                    kernel=(2, 2),
                    stride=(2, 2))
    h2 = nd.flatten(h2)

    h3_linear = nd.dot(h2, w3) + b3
    h3 = nd.relu(h3_linear)

    h4_linear = nd.dot(h3, w4) + b4
    return h4_linear
示例#10
0
 def forward(self, X):
     """Forward function"""
     Y = nd.relu(self.bn1(self.conv1(X)))
     Y = self.bn2(self.conv2(Y))
     if self.conv3:
         X = self.conv3(X)
     return nd.relu(Y + X)
示例#11
0
    def _net(self, in_data, params, training_mode=True):
        if training_mode:
            dot = nd.batch_dot
        else:
            dot = nd.dot

        # if there are no hidden layers, just compute the output as a linear combination
        if len(self._hidden_layers) == 0:
            return (dot(in_data, params['W0']) + params['b0']).reshape(
                (in_data.shape[0], self._output_layer))

        #  Compute the first hidden layer
        h0_linear = dot(in_data, params['W0']) + params['b0']
        h0 = nd.relu(h0_linear)

        #  Compute the (i+1)^th hidden layer
        hprevious = h0
        for i in range(1, len(self._hidden_layers)):
            str_index = str(i)
            hcurrent_linear = dot(
                hprevious, params['W' + str_index]) + params['b' + str_index]
            hcurrent = nd.relu(hcurrent_linear)
            hprevious = hcurrent

        str_index = str(len(self._hidden_layers))
        yhat_linear = dot(hprevious,
                          params['W' + str_index]) + params['b' + str_index]

        return yhat_linear.reshape((in_data.shape[0], self._output_layer))
示例#12
0
    def forward(self, x):

        if self.routing is not None:
            routing_weight = nd.softmax(nd.zeros(shape=(1, 1, self.num_points),
                                                 ctx=x.context),
                                        axis=2)
        trans = self.stn(x)
        x = nd.transpose(x, (0, 2, 1))
        x = nd.batch_dot(x, trans)
        x = nd.transpose(x, (0, 2, 1))
        x = nd.relu(self.bn1(self.conv1(x)))
        pointfeat = x
        x = nd.relu(self.bn2(self.conv2(x)))
        x = self.bn3(self.conv3(x))
        if self.routing is not None:
            s = nd.sum(x * routing_weight, axis=2, keepdims=True)
            # v = Squash(s, axis=1)
            for _ in range(self.routing):
                routing_weight = routing_weight + nd.sum(
                    x * s, axis=1, keepdims=True)
                c = nd.softmax(routing_weight, axis=2)
                s = nd.sum(x * c, axis=2, keepdims=True)
                # v = Squash(s, axis=1)
            x = s
        else:
            x = self.mp1(x)
        if self.global_feat:
            return x, trans
        else:
            x = x.repeat(self.num_points, axis=2)
            return nd.concat(x, pointfeat, dim=1), trans
示例#13
0
 def forward(self, x):
     x, trans = self.feat(x)
     x = nd.relu(self.bn1(self.fc1(x)))
     x = nd.relu(self.bn2(self.fc2(x)))
     x = self.fc3(x)
     # return nd.log_softmax(x, axis=-1), trans
     return x, trans
示例#14
0
 def fc_architecture(inputs, state={}):
     dense1 = nd.relu(nd.dot(inputs, self.Wdense1) + self.bdense1)
     dense2 = nd.relu(nd.dot(dense1, self.Wdense2) + self.bdense2)
     dense3 = nd.relu(nd.dot(dense2, self.Wdense3) + self.bdense3)
     dense4 = nd.relu(nd.dot(dense3, self.Wdense4) + self.bdense4)
     qvalues = nd.dot(dense4, self.Wdense5) + self.bdense5
     return (qvalues, {})
示例#15
0
def lenet(X, params):
    h1_conv = nd.Convolution(data=X,
                             weight=params[0],
                             bias=params[1],
                             kernel=(3, 3),
                             num_filter=20)
    h1_act = nd.relu(data=h1_conv)
    h1_pool = nd.Pooling(data=h1_act,
                         pool_type="avg",
                         kernel=(2, 2),
                         stride=(2, 2))
    h2_conv = nd.Convolution(data=h1_pool,
                             weight=params[2],
                             bias=params[3],
                             kernel=(5, 5),
                             num_filter=50)
    h2_act = nd.relu(data=h2_conv)
    h2_pool = nd.Pooling(data=h2_act,
                         pool_type="avg",
                         kernel=(2, 2),
                         stride=(2, 2))
    h2_flatten = nd.Flatten(data=h2_pool)
    h3_fc = nd.dot(h2_flatten, params[4]) + params[5]
    h3_act = nd.relu(data=h3_fc)
    h4_fc = nd.dot(h3_act, params[6]) + params[7]
    return h4_fc
示例#16
0
    def compute_LSTM_feat(self, program, parameters, index):
        program = program.permute(1, 0, 2)
        parameters = parameters.permute(1, 0, 2)
        bsz = program.x.shape[1]
        init = self.init_hidden(bsz)

        # program linear transform
        dim1 = program.x.shape
        program = program.reshape(-1, self.vocab_size + 1)
        x1 = nd.relu(self.pgm_embed(program))
        x1 = x1.reshape(dim1[0], dim1[1], -1)

        # parameter linear transform
        dim2 = parameters.x.shape
        parameters = parameters.reshape(-1, self.max_param)
        x2 = nd.relu(self.param_embed(parameters))
        x2 = x2.reshape(dim2[0], dim2[1], -1)

        # LSTM to aggregate programs and parameters
        x = nd.concat([x1, x2], axis=2)
        out, hidden = self.lstm(x, init)

        # select desired step aggregated features
        index = index.expand_dims(axis=1).broadcast_to(
            (-1, out.x.shape[2])).expand_dims(axis=0)
        pgm_param_feat = gather(out, dim=0, index=index).squeeze()
        #pgm_param_feat = nd.relu(self.pgm_param_feat(pgm_param_feat))

        return pgm_param_feat
示例#17
0
def lenet(X, params):
    h1_conv = nd.Convolution(data=X,
                             weight=params[0],
                             bias=params[1],
                             kernel=(3, 3),
                             num_filter=20)
    h1_activation = nd.reshape(h1_conv)
    h1 = nd.Pooling(data=h1_activation,
                    pool_type='avg',
                    kernel=(2, 2),
                    stride=(2, 2))

    h2_conv = nd.Convolution(data=h1,
                             weight=params[2],
                             bias=params[3],
                             kernel=(5, 5),
                             num_filter=50)
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type='avg',
                    kernel=(2, 2),
                    stride=(2, 2))
    h2 = nd.flatten(h2)

    h3_linear = nd.dot(h2, params[4]) + params[5]
    h3 = nd.relu(h3_linear)
    y_hat = nd.dot(h3, params[6]) + params[7]
    return y_hat
def LetNet_Direct(X, verbose=False):
    # 初始化参数
    params = initialize_params()
    W1, b1, W2, b2, W3, b3, W4, b4 = params
    # 1. 将数据集copy到GPU
    X = X.as_in_context(ctx)
    
    # 2. 定义卷积层
    # 2.1 卷积层一
    h1_conv = nd.Convolution(data=X, weight=W1, bias=b1,  kernel=W1.shape[2:],num_filter=W1.shape[0])
    h1_activation = nd.relu(h1_conv)
    h1 = nd.Pooling(data=h1_activation, pool_type='max', kernel=(2,2),stride=(2,2))
    # 2.2 卷积层二
    h2_conv = nd.Convolution(data=h1, weight=W2, bias=b2,kernel=W2.shape[2:], num_filter=W2.shape[0])
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation, pool_type='max', kernel=(2,2), stride=(2,2))  #Plooing的Kernel值,决定输出图像大小
    # Flatten成2-dimens矩阵,以作为Dense层输入
    h2 = nd.flatten(h2)    

    # 3. 定义全连接层
    # 3.1 第一层全连接:激活函数非线性
    h3_linear = nd.dot(h2, W3) + b3
    h3 = nd.relu(h3_linear)
    # 3.2 第二层全连接
    h4 = nd.dot(h3_linear, W4) + b4
    
    # 是否显示详细信息:各层代码块的输出shape
    if verbose:
        print('1st conv block: ', h1.shape)
        print('2st conv block: ', h2.shape)
        print('3st dense: ', h3.shape)
        print('4st dense: ', h4.shape)
    return h4
示例#19
0
    def forward(self,X):
        Y= nd.relu(self.batch_norm1(self.cov1(X)))
        Y= self.batch_norm2(self.cov2(Y))
        if self.cov3:
            X=self.cov3(X)

        return nd.relu(Y+X)
示例#20
0
def net(x):
    # x = x.as_in_context(w1.context)
    # first conv layer
    # (bs, 1, 28, 28) ==> (bs, 20, 12, 12)
    h1_conv = nd.Convolution(data=x,
                             weight=w1,
                             bias=b1,
                             kernel=w1.shape[2:],
                             num_filter=w1.shape[0])
    h1_activation = nd.relu(h1_conv)
    h1 = nd.Pooling(data=h1_activation,
                    pool_type='max',
                    kernel=(2, 2),
                    stride=(2, 2))
    # second conv layer
    # (bs, 20, 12, 12) ==> (bs, 50, 5, 5) ==> (bs, 50*5*5)
    h2_conv = nd.Convolution(data=h1,
                             weight=w2,
                             bias=b2,
                             kernel=w2.shape[2:],
                             num_filter=w2.shape[0])
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type='max',
                    kernel=(2, 2),
                    stride=(2, 2))
    h2 = h2.flatten()
    # first fc layer
    # (bs, 1250) ==> (bs, 128)
    h3 = nd.relu(nd.dot(h2, w3) + b3)
    # second fc layer
    # (bs, 128) ==> (bs, 10)
    h4 = nd.dot(h3, w4) + b4
    return h4
示例#21
0
    def forward(self, x, *args):
        out = nd.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        if not self.same_shape:
            x = self.conv3

        return nd.relu(out + x)
示例#22
0
 def forward(self, x):
     x, trans = self.feat(x)
     x = nd.relu(self.bn1(self.conv1(x)))
     x = nd.relu(self.bn2(self.conv2(x)))
     x = nd.relu(self.bn3(self.conv3(x)))
     x = self.conv4(x)
     x = nd.transpose(x, (0, 2, 1))
     return x, trans
示例#23
0
 def forward(self, x):
     x = nd.relu(self.dense0(x))
     print("Hidden Representation 1: %s" % x)
     x = nd.relu(self.dense1(x))
     print("Hidden Representation 1: %s" % x)
     x = self.dense2(x)
     print("Network output: %s" % x)
     return x
示例#24
0
    def forward(self, x):
        out = nd.relu(self.bn_1(self.conv_1(x)))
        out = self.bn_2(self.conv_2(out))

        if not self.is_same_shape:
            x = self.conv_3(x)

        return nd.relu(out + x)
def contrastive_loss(net,data,label):
    label = label.reshape(-1, 1)
    label_mat = nd.relu(-nd.abs(label - label.T) + 1).astype('float32')
    vec = net(data)
    vec = nd.Flatten(vec)
    dist_self = nd.sum(nd.square(vec), axis=1, keepdims=True)
    dist_mat = nd.broadcast_add(dist_self, dist_self.T) - 2 * nd.dot(vec, vec.T)
    loss = label_mat * dist_mat + nd.relu((1.0 - dist_mat * (1 - label_mat))).astype('float32')
    return loss
示例#26
0
    def forward(self, X):
        conv1 = self.conv1(X)
        bn1 = self.bn1(conv1)
        relu1 = nd.relu(bn1)

        conv2 = self.conv2(relu1)
        bn2 = self.bn2(conv2)

        skip = self.skip(X)
        return nd.relu(bn2 + skip)
 def postprocess(self, x, embed_test):
     output = nd.relu(x)
     output = self.conv_post_1(output)
     output = nd.relu(output)
     output = self.conv_post_2(output)
     output = nd.broadcast_axis(output, axis=1, size=24)
     embed_result = nd.concat(output, embed_test, dim=2)
     output = self.outputLayer(self.net(embed_result))
     output = output.reshape(output.shape[0], -1)
     return output
示例#28
0
def net(X, is_training=False):
    X = X.reshape((-1, num_inputs))  # -1表示Numpy会根据剩下的维度计算出数组的另外一个shape属性值
    # 第一层全连接
    h1 = nd.relu(nd.dot(X, w1) + b1)
    # 在第一层全连接后添加丢弃层
    if is_training: h1 = dropout(h1, drop_prob1)
    # 第二层全连接
    h2 = nd.relu(nd.dot(h1, w2) + b2)
    # 在第二层全连接后添加丢弃层
    if is_training: h2 = dropout(h2, drop_prob2)
    return nd.dot(h2, w3) + b3
示例#29
0
 def forward(self, x):
     x = nd.relu(self.bn1(self.conv1(x)))
     x = nd.relu(self.bn2(self.conv2(x)))
     x = nd.relu(self.bn3(self.conv3(x)))
     x = nd.flatten(x).expand_dims(0)
     #x, self.states = self.lstm(x, self.states)
     x = self.dense1(x)
     x = self.dense2(x)
     probs = self.action_pred(x)
     values = self.value_pred(x)
     return mx.ndarray.softmax(probs), values
示例#30
0
def train(X):
    drop_prob1 = 0.2
    drop_prob2 = 0.5
    X = X.reshape((-1, num_inputs))

    # 第一层全连接
    h1 = dropout(nd.relu(nd.dot(X, W1) + b1), drop_prob1)

    h2 = dropout(nd.relu(nd.dot(h1, W2) + b2), drop_prob2)

    return nd.dot(h2, W3) + b3
示例#31
0
 def forward(self, x):
     # batchsize = x.shape[0]
     x, trans = self.feat(x)
     x = nd.relu(self.bn1(self.conv1(x)))
     x = nd.relu(self.bn2(self.conv2(x)))
     x = nd.relu(self.bn3(self.conv3(x)))
     x = self.conv4(x)
     x = x.transpose((0, 2, 1))
     # x = x.log_softmax(axis=-1)
     # x = x.reshape(batchsize, self.num_points, self.k)
     return x, trans
示例#32
0
def net(X):
    X = X.reshape((-1, num_inputs))
    # 第一层全连接。
    h1 = nd.relu(nd.dot(X, W1) + b1)
    # 在第一层全连接后添加丢弃层。
    h1 = dropout(h1, drop_prob1)
    # 第二层全连接。
    h2 = nd.relu(nd.dot(h1, W2) + b2)
    # 在第二层全连接后添加丢弃层。
    h2 = dropout(h2, drop_prob2)
    return nd.dot(h2, W3) + b3
示例#33
0
def net(X):
	X = X.reshape((-1,num_inputs))
	# first layer
	h1 = nd.relu(nd.dot(X,W1)+b1)
	# drop out 
	h1 = dropout(h1,drop_prob1)
	# second layer
	h2 = nd.relu(nd.dot(h1,W2)+b2)
	# drop out
	h2 = dropout(h2,drop_prob2)

	return nd.dot(h2,W3)+b3
示例#34
0
 def forward(self,x):
     out = self.bn1( self.conv1(x) )
     return nd.relu(out)
示例#35
0
 def forward(self, x):
     linear = nd.dot(x, self.weight.data()) + self.bias.data()
     return nd.relu(linear)
示例#36
0
 def forward(self, x):
     return self.dense1(nd.relu(self.dense0(x)))
示例#37
0
文件: utils.py 项目: tsintian/d2l-zh
 def forward(self, X):
     Y = nd.relu(self.bn1(self.conv1(X)))
     Y = self.bn2(self.conv2(Y))
     if self.conv3:
         X = self.conv3(X)
     return nd.relu(Y + X)