def test_np_reshape(): # TODO(junwu): Add more test cases data = mx.sym.var('a').as_np_ndarray() ret = data.reshape(shape=()) assert type(ret) == mx.sym.np._Symbol data = np.ones((1, 1, 1)) ret = np.reshape(data, ()) assert ret.shape == () ret = np.reshape(ret, (1, 1, 1, 1)) assert ret.shape == (1, 1, 1, 1) assert type(ret) == np.ndarray
def forward(self, x): embed_x = self.embedding(x) square_of_sum = np.sum(embed_x, axis=1)**2 sum_of_square = np.sum(embed_x**2, axis=1) inputs = np.reshape(embed_x, (-1, self.embed_output_dim)) x = self.linear_layer(self.fc(x).sum(1)) \ + 0.5 * (square_of_sum - sum_of_square).sum(1, keepdims=True) \ + self.mlp(inputs) x = npx.sigmoid(x) return x
def __call__(self, scores, offset): """ Get the lowest k elements per sentence from a `scores` matrix. :param scores: Vocabulary scores for the next beam step. (batch_size * beam_size, target_vocabulary_size) :param offset: Array to add to the hypothesis indices for offsetting in batch decoding. :return: The row indices, column indices and values of the k smallest items in matrix. """ batch_times_beam, vocab_size = scores.shape batch_size = int(batch_times_beam / self.k) # Shape: (batch size, beam_size * vocab_size) batchwise_scores = np.reshape(scores, (batch_size, self.k * vocab_size)) indices, values = super().__call__(batchwise_scores) best_hyp_indices, best_word_indices = np.unravel_index(indices, shape=(batch_size * self.k, vocab_size)) if batch_size > 1: # Offsetting the indices to match the shape of the scores matrix best_hyp_indices = best_hyp_indices + offset return best_hyp_indices, best_word_indices, values
def test_np_reshape(): class TestReshape(HybridBlock): def __init__(self, newshape): super(TestReshape, self).__init__() self._newshape = newshape def hybrid_forward(self, F, a): return F.np.reshape(a, self._newshape) shape_pairs = [((2, 6), (6, 2)), ((2, 6), (3, 4)), ((1, 0), (0, )), ((0, 0), (0, )), ((), (1, 1, 1))] for hybridize in [True, False]: for shape_pair in shape_pairs: shape1, shape2 = shape_pair print(shape1, shape2) test_reshape = TestReshape(shape2) if hybridize: test_reshape.hybridize() x = rand_ndarray(shape1).as_np_ndarray() x.attach_grad() np_out = _np.reshape(x.asnumpy(), shape2) with mx.autograd.record(): mx_out = test_reshape(x) assert mx_out.shape == np_out.shape assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5, use_broadcast=False) mx_out.backward() np_backward = _np.ones(shape1) assert_almost_equal(x.grad.asnumpy(), np_backward, rtol=1e-3, atol=1e-5, use_broadcast=False) mx_out = np.reshape(x, shape2) np_out = _np.reshape(x.asnumpy(), shape2) assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5, use_broadcast=False)
def forward(self, source_encoded: np.ndarray, source_encoded_length: np.ndarray) -> np.ndarray: """ Transformation to the length ratio. Returns a vector. :param source_encoded: Encoder representation for n elements. Shape: (n, source_encoded_length, hidden_size). :param source_encoded_length: A vector of encoded sequence lengths. Shape: (n,). :return: Predictions of the ratio length(hypothesis)/length(reference). Shape(n, 1). """ # source_masked: (n, source_encoded_length, hidden_size) source_masked = npx.sequence_mask( source_encoded, axis=1, sequence_length=source_encoded_length, use_sequence_length=True, value=0.) # calculate the proper means of encoded sources # data: (n, hidden_size) data = np.sum(source_masked, axis=1, keepdims=False) / np.reshape( source_encoded_length, (-1, 1)) # MLP. Shape: (n, 1) data = self.layers(data) # Shape: (n,) return np.squeeze(data)
def forward(self, data, indices): mask = indices < 3 data = npx.reshape(data, (-1, -2), reverse=True) mask = np.reshape(mask, (-1, )) sel = nd.np._internal.boolean_mask(data, mask) return sel
def forward(self, scores): values, indices = npx.topk(scores, axis=1, k=self.k, ret_typ='both', is_ascend=True, dtype='int32') # Project indices back into original shape (which is different for t==1 and t>1) values, indices = np.reshape(values, (-1, 1)), np.reshape(indices, (-1,)) return indices, values