Exemple #1
0
def net(X):
    X = X.reshape(-1, num_inputs)
    H1 = npx.relu(np.dot(X, W1) + b1)
    if autograd.is_training():
        H1 = dropout(H1, drop_prob1)
    H2 = npx.relu(np.dot(H1, W2) + b2)
    if autograd.is_training():
        H2 = dropout(H2, drop_prob2)
    return np.dot(H2, W3) + b3
Exemple #2
0
    def forward(self, _input_layer):

        x = self.BN1(_input_layer)
        x = FFx.relu(x)
        x = self.conv1(x)

        x = self.BN2(x)
        x = FFx.relu(x)
        x = self.conv2(x)

        return x
Exemple #3
0
    def forward(self, input):

        out = mx.contrib.nd.BilinearResize2D(input.as_nd_ndarray(),
                                             scale_height=2.,
                                             scale_width=2.)
        out = out.as_np_ndarray()
        out = self.conv1(out)
        out = FFx.relu(out)
        out = self.conv2(out)
        out = FFx.relu(out)

        return out
Exemple #4
0
    def forward(self, input1, input2):

        out = mx.contrib.nd.BilinearResize2D(input1.as_nd_ndarray(),
                                             scale_height=2.,
                                             scale_width=2.)
        out = out.as_np_ndarray()
        out = self.conv1(out)
        out = FFx.relu(out)
        out2 = self.conv2(FF.concatenate([out, input2], axis=1))
        out2 = FFx.relu(out2)

        return out2
    def forward(self, x):
        y = self.conv1(x)
        y = self.bn1(y)
        y = npx.relu(y)
        y = self.conv2(y)
        y = self.bn2(y)
        y = npx.relu(y)
        y = self.conv3(y)
        y = self.bn3(y)

        if self.conv_skip:
            x = self.conv_skip(x)
            x = self.bn_skip(x)

        return npx.relu(x + y)
Exemple #6
0
    def forward(self, user_id, seq, item_id):
        item_embs = np.expand_dims(self.Q(seq), 1)
        user_emb = self.P(user_id)  # (4096, 10)
        out, out_h, out_v, out_hs = None, None, None, []
        # 横向卷积
        if self.d_prime:
            out_v = self.conv_v(item_embs)
            out_v = out_v.reshape(
                out_v.shape[0], self.fc1_dim_v)  # (4096, 4*10)
        # 纵向卷积 - 时间
        if self.d:
            for conv, maxp in zip(self.conv_h, self.max_pool):  # 滑动
                conv_out = np.squeeze(npx.relu(conv(item_embs)), axis=3)
                t = maxp(conv_out)
                pool_out = np.squeeze(t, axis=2)
                out_hs.append(pool_out)
            out_h = np.concatenate(out_hs, axis=1)  # (4096, 16*3)
        out = np.concatenate([out_v, out_h], axis=1)  # (4096, 4*10+16*3)
        z = self.fc(self.dropout(out))  # (4096, 10)

        # 和user_emb
        x = np.concatenate([z, user_emb], axis=1)  # (4096, 20)

        # 和item_emb计算
        q_prime_i = np.squeeze(self.Q_prime(item_id))  # (4096, 20)
        b = np.squeeze(self.b(item_id))
        res = (x * q_prime_i).sum(1) + b  # (4096,)
        return res
Exemple #7
0
 def forward(self, X):
     X = self.dense(X)
     X = npx.relu(np.dot(X, self.rand_weight.data()) + 1)
     X = self.dense(X)
     while np.abs(X).sum() > 1:
         X /= 2
     return X.sum()
Exemple #8
0
 def forward(self, x):
     ctx = x.ctx
     h = x.dot(self.w1.data(ctx))  # equivalent to np.dot(x, w1)
     h_relu = npx.relu(
         h)  # equivalent to npx.relu(h) but generating np.ndarray
     y_pred = h_relu.dot(
         self.w2.data(ctx))  # equivalent to np.dot(h_relu, w2)
     return y_pred
Exemple #9
0
    def forward(self, _layer_lo, _layer_hi):

        up = self.up(_layer_lo)
        up = FFx.relu(up)
        x = FF.concatenate([up, _layer_hi], axis=1)
        x = self.conv_normed(x)

        return x
Exemple #10
0
    def forward(self, _layer_lo, _layer_hi):

        up = mx.contrib.nd.BilinearResize2D(_layer_lo.as_nd_ndarray(),
                                            scale_height=2.,
                                            scale_width=2.)
        up = up.as_np_ndarray()
        up = self.conv1(up)
        up = FFx.relu(up)
        x = self.conv3(up, _layer_hi)

        return x
Exemple #11
0
    def forward(self, inputs: np.ndarray, previous_states: np.ndarray,
                *args) -> Tuple[np.ndarray, np.ndarray]:
        """
        :param inputs: input data. Shape: (max_length, batch, input_depth).
        :param previous_states: previous cell states. Shape: (max_length, batch, input_depth)
        :return: cell output and new cell states.  Both with shape (max_length, batch, input_depth).
        """
        forget_rates = self.forget_gate(inputs)
        weighted_inputs = (1 - forget_rates) * self.linear(inputs)

        cell_state, last_step_state = self.cell_state_transform(
            previous_states, weighted_inputs, forget_rates)

        return npx.relu(cell_state), last_step_state
Exemple #12
0
    def forward(self, input):

        # =========== UNet branch ===========
        out10 = self.conv_init_1(input)
        out1 = self.compr11(out10)
        out1 = FFx.relu(out1)
        #print (out1.shape)
        out1 = self.compr12(out1)
        out1 = FFx.relu(out1)
        #print (out1.shape)
        out1 = self.expand1(out1, out10)
        out1 = FFx.relu(out1)

        # =========== \capNet branch ===========
        out20 = self.conv_init_2(input)
        out2 = self.expand2(out20)
        out2 = FFx.relu(out2)
        out2 = self.compr21(out2)
        out2 = FFx.relu(out2)
        out2 = self.compr22(out2, out20)

        att = self.gamma1.data() * self.att(input)
        ratt122 = self.gamma2.data() * self.ratt122(out1, out2, out2)
        ratt211 = self.gamma3.data() * self.ratt211(out2, out1, out1)

        ones1 = FF.ones_like(out10)
        ones2 = FF.ones_like(input)

        # Enhanced output of 1, based on memory of 2
        out122 = out1 * (ones1 + ratt122)
        # Enhanced output of 2, based on memory of 1
        out211 = out2 * (ones1 + ratt211)

        out12 = self.collect(out122, out211)  # includes relu, it's for fusion

        out_res = (input + out12) * (ones2 + att)
        return out_res
    def forward(self, input):

        conv1_first = self.conv_first(input)
 
        # ******** Going down ***************
        pools = conv1_first
        for idx in range(self.depth):
            conv1 = self.convs_dn[idx](pools)
            if idx < self.depth-1:
                # Evaluate pools 
                pools =  self.pools[idx](conv1)
        # Middle psppooling
        middle =  self.middle(conv1)
        # Activation of middle layer
        middle = FFx.relu(middle)
        #print (middle.shape)      
        return middle
Exemple #14
0
    def forward(self, input_t1, input_t2):
        # These inputs must have the same dimensionality , t1, t2
        relatt12 = self.gamma1.data() * self.relatt12(input_t1, input_t2,
                                                      input_t2)
        relatt21 = self.gamma2.data() * self.relatt21(input_t2, input_t1,
                                                      input_t1)

        ones = FF.ones_like(input_t1)

        # Enhanced output of 1, based on memory of 2
        out12 = input_t1 * (ones + relatt12)
        # Enhanced output of 2, based on memory of 1
        out21 = input_t2 * (ones + relatt21)

        fuse = self.fuse(FF.concatenate([out12, out21], axis=1))
        fuse = FFx.relu(fuse)

        return fuse
Exemple #15
0
 def forward(self, user_id, seq, item_id):
     item_embs = np.expand_dims(self.Q(seq), 1)
     user_emb = self.P(user_id)
     out, out_h, out_v, out_hs = None, None, None, []
     if self.d_prime:
         out_v = self.conv_v(item_embs)
         out_v = out_v.reshape(out_v.shape[0], self.fc1_dim_v)
     if self.d:
         for conv, maxp in zip(self.conv_h, self.max_pool):
             conv_out = np.squeeze(npx.relu(conv(item_embs)), axis=3)
             t = maxp(conv_out)
             pool_out = np.squeeze(t, axis=2)
             out_hs.append(pool_out)
         out_h = np.concatenate(out_hs, axis=1)
     out = np.concatenate([out_v, out_h], axis=1)
     z = self.fc(self.dropout(out))
     x = np.concatenate([z, user_emb], axis=1)
     q_prime_i = np.squeeze(self.Q_prime(item_id))
     b = np.squeeze(self.b(item_id))
     res = (x * q_prime_i).sum(1) + b
     return res
Exemple #16
0
    def forward(self, input):

        # =========== UNet branch ===========
        out10 = self.conv_init_1(input)
        out1 = self.compr11(out10)
        out1 = FFx.relu(out1)
        out1 = self.compr12(out1)
        out1 = FFx.relu(out1)
        out1 = self.expand1(out1, out10)
        out1 = FFx.relu(out1)

        # =========== \capNet branch ===========

        out20 = self.conv_init_2(input)
        out2 = self.expand2(out20)
        out2 = FFx.relu(out2)
        out2 = self.compr21(out2)
        out2 = FFx.relu(out2)
        out2 = self.compr22(FF.concatenate([out2, out20], axis=1))
        out2 = FFx.relu(out2)

        att = self.gamma1.data() * self.att(input)
        ratt122 = self.gamma2.data() * self.ratt122(out1, out2, out2)
        ratt211 = self.gamma3.data() * self.ratt211(out2, out1, out1)

        ones1 = FF.ones_like(out10)
        ones2 = FF.ones_like(input)

        # Enhanced output of 1, based on memory of 2
        out122 = out1 * (ones1 + ratt122)
        # Enhanced output of 2, based on memory of 1
        out211 = out2 * (ones1 + ratt211)

        out12 = FFx.relu(self.collect(FF.concatenate([out122, out211],
                                                     axis=1)))

        # Emphasize residual output from memory on input
        out_res = (input + out12) * (ones2 + att)
        return out_res
Exemple #17
0
 def forward(self, X):
     Y = npx.relu(self.bn1(self.conv1(X)))
     Y = self.bn2(self.conv2(Y))
     if self.conv3:
         X = self.conv3(X)
     return npx.relu(Y + X)
Exemple #18
0
 def forward(self, x):
     return -self._alpha * npx.relu(1.0 - np.exp(x)) + npx.relu(x)
 def forward(self, x):
     x = npx.relu(self.fc1(x))
     x = npx.relu(self.fc2(x))
     return self.fc3(x)
Exemple #20
0
#4.1.2 Activation Functions
from mxnet import autograd, np, npx
from d2l import mxnet as d2l

npx.set_np()

#Plot ReLu function
x = np.arange(-8.0, 8.0, 0.1)
x.attach_grad()
with autograd.record():
    y = npx.relu(x)
d2l.plot(x, y, 'x', 'relu(x)', figsize = (5, 2.5))
y.backward()
d2l.plot(x, x.grad, 'x', 'grad of relu', figsize = (5, 2.5))

#Plot Sigmoid function
with autograd.record():
    y = npx.sigmoid(x)
d2l.plot(x, y, 'x', 'sigmoid(x)', figsize = (5, 2.5))
y.backward()
d2l.plot(x, x.grad, 'x', 'grad of sigmoid', figsize = (5, 2.5))

#Plot tanh function
with autograd.record():
    y = np.tanh(x) #npx doesnt have tanh function
d2l.plot(x, y, 'x', 'tanh(x)', figsize = (5, 2.5))
y.backward()
d2l.plot(x, x.grad, 'x', 'grad of tanh', figsize = (5, 2.5))

#Calculate the derivative of the pReLU activation function
#with autograd.record():