Esempio n. 1
0
 def get_loss_info(prefix, e, batch_index, loss_value, cls_pack):
     # 1. Prefix
     info = prefix
     if e is not None:
         if batch_index is not None:
             info += 'Epoch[%-3d], batch[%-3d]. ' % (e, batch_index)
         else:
             info += 'Epoch[%-3d]. ' % e
     else:
         if batch_index is not None: info += 'Batch[%-3d]. ' % batch_index
     # 2. Losses
     info += utils.io.get_logger_info_loss(['L_value'], [loss_value])
     # 3. Metric
     out, label = cls_pack
     n, c, h, w = out.shape
     out = nd.swapaxes(
         nd.reshape(nd.swapaxes(out, 0, 1), shape=(c, n * h * w)), 0, 1)
     label = nd.swapaxes(
         nd.reshape(nd.swapaxes(label, 0, 1), shape=(c, n * h * w)), 0, 1)
     info += 'Acc: %.5f. ' % utils.io.get_cls_ap(out, label)
     # Return
     return info
Esempio n. 2
0
 def sample(self, n_samples: int = 1) -> nd.NDArray:
     mean = self.get_param_not_repeated('mean')
     if n_samples == 1:
         res = nd.sample_poisson(mean)
     else:
         res = nd.sample_poisson(mean, n_samples)
         res = nd.transpose(res)
     if res.ndim == 3:
         return nd.swapaxes(res, 1, 2)
     elif res.ndim == 2:
         return res
     else:
         raise ValueError('Ambiguous sample shape.')
Esempio n. 3
0
 def forward(self, x, *args):
     if isinstance(x, list) or isinstance(x, tuple):
         x = [self.forward(_x) for _x in x]
         x = utils.io.gather_to_the_first_context(x)
         return x
     # (n, 3, h, w) -> (n, c, h', w')
     x = self._conv1(x)
     x = self._conv2(x)
     x = self._conv3(x)
     x = self._conv4(x)
     x = self._conv5(x)
     # (n, c, h', w') -> (n*h'*w', c)
     n, c, h, w = x.shape
     x = nd.swapaxes(nd.reshape(nd.swapaxes(x, 0, 1), shape=(c, n * h * w)),
                     0, 1)
     # (n*h'*w', c) -> (n*h'*w', 40)
     x = self._fc1(x)
     x = self._fc2(x)
     x = self._fc3(x)
     # (n*h'*w', 40) -> (n, 40, h', w')
     x = nd.swapaxes(nd.reshape(nd.swapaxes(x, 0, 1), shape=(40, n, h, w)),
                     0, 1)
     # Return
     return x
Esempio n. 4
0
def get_data():
    batch_size = 30
    seq_length = 29
    # -1 here so we have enough characters for labels later
    num_samples = (len(time_numerical) - 1) // seq_length
    print(time_numerical[:seq_length * num_samples])
    dataset = one_hots(time_numerical[:seq_length * num_samples]).reshape(
        (num_samples, seq_length, vocab_size))
    num_batches = len(dataset) // batch_size
    print("num_batches : {0} {1}".format(num_batches, len(dataset)))
    train_data = dataset[:num_batches * batch_size].reshape(
        (num_batches, batch_size, seq_length, vocab_size))
    #print(train_data)
    # swap batch_size and seq_length axis to make later access easier
    train_data = nd.swapaxes(train_data, 1, 2)

    labels = one_hots(time_numerical[1:seq_length * num_samples + 1])
    #print(len(labels))
    train_label = labels.reshape(
        (num_batches, batch_size, seq_length, vocab_size))
    train_label = nd.swapaxes(train_label, 1, 2)
    #print(len(train_data),len(train_label))

    return train_data, train_label
Esempio n. 5
0
def gather(self, dim, index):
    """
    Gathers values along an axis specified by ``dim``.

    For a 3-D tensor the output is specified by:
        out[i][j][k] = input[index[i][j][k]][j][k]  # if dim == 0
        out[i][j][k] = input[i][index[i][j][k]][k]  # if dim == 1
        out[i][j][k] = input[i][j][index[i][j][k]]  # if dim == 2

    Parameters
    ----------
    dim:
        The axis along which to index
    index:
        A tensor of indices of elements to gather

    Returns
    -------
    Output Tensor
    """
    idx_xsection_shape = index.shape[:dim] + \
        index.shape[dim + 1:]
    self_xsection_shape = self.shape[:dim] + self.shape[dim + 1:]
    if idx_xsection_shape != self_xsection_shape:
        raise ValueError(
            "Except for dimension " + str(dim) +
            ", all dimensions of index and self should be the same size")
    if index.dtype != np.dtype('int_'):
        raise TypeError("The values of index must be integers")
    data_swaped = nd.swapaxes(self, 0, dim).asnumpy()
    index_swaped = nd.swapaxes(index, 0, dim).asnumpy()
    #print(data_swaped,index_swaped)
    #print("index_swaped\n",index_swaped,index_swaped.shape,"data_swaped\n",data_swaped,data_swaped.shape,'\n')
    gathered = nd.from_numpy(np.choose(
        index_swaped, data_swaped)).as_in_context(d2l.try_gpu())
    return nd.swapaxes(gathered, 0, dim)
Esempio n. 6
0
def test_swapaxes():
    b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y)
    t = nd.swapaxes(b, dim1=0, dim2=1)
    assert t.shape == (SMALL_Y, LARGE_X)
    assert np.sum(t[:, -1].asnumpy() == (LARGE_X - 1)) == b.shape[1]
 def forward(self, cov_x):
     output, self.h = self.lstm(cov_x, self.h)
     ##TODO: Can we avoid swapping?
     output = self.linear(nd.swapaxes(output, 0, 1))
     return output
Esempio n. 8
0
def decode_centernet_pose(heat, wh, kps, reg=None, hm_hp=None, hp_offset=None, K=100):
    batch, cat, height, width = heat.shape
    num_joints = kps.shape[1] // 2
    # perform nms on heatmaps
    heat = _nms(heat)
    scores, inds, clses, ys, xs = _topk(heat, K=K)

    kps = _tranpose_and_gather_feat(kps, inds)
    kps = nd.reshape(kps, (batch, K, num_joints * 2))

    kps[:, :, ::2] += nd.reshape(xs, (batch, K, 1)).broadcast_to((batch, K, num_joints))
    kps[:, :, 1::2] += nd.reshape(ys, (batch, K, 1)).broadcast_to((batch, K, num_joints))

    if reg is not None:
        reg = _tranpose_and_gather_feat(reg, inds)
        reg = nd.reshape(reg,(batch, K, 2))
        xs = xs.reshape((batch, K, 1)) + reg[:, :, 0:1]
        ys = ys.reshape((batch, K, 1)) + reg[:, :, 1:2]
    else:
        xs = xs.reshape((batch, K, 1)) + 0.5
        ys = ys.reshape((batch, K, 1)) + 0.5

    wh = _tranpose_and_gather_feat(wh, inds)
    wh = wh.reshape((batch, K, 2))
    clses  = clses.reshape((batch, K, 1)).astype('float32')
    scores = scores.reshape((batch, K, 1))

    bboxes =  nd.concat(xs - wh[:, :, 0:1] / 2,
                        ys - wh[:, :, 1:2] / 2,
                        xs + wh[:, :, 0:1] / 2,
                        ys + wh[:, :, 1:2] / 2,
                        dim=2)
    if hm_hp is not None:
        hm_hp = _nms(hm_hp)
        thresh = 0.1
        kps = kps.reshape((batch, K, num_joints, 2))
        kps = nd.swapaxes(kps, 1, 2) # b x J x K x 2

        reg_kps = nd.expand_dims(kps, axis=3).broadcast_to((batch, num_joints, K, K, 2))
        hm_score, hm_inds, hm_ys, hm_xs = _topk_channel(hm_hp, K=K) # b x J x K

        if hp_offset is not None:
            hp_offset = _tranpose_and_gather_feat(hp_offset, hm_inds.reshape((batch, -1)))
            hp_offset = hp_offset.reshape((batch, num_joints, K, 2))
            hm_xs = hm_xs + hp_offset[:, :, :, 0]
            hm_ys = hm_ys + hp_offset[:, :, :, 1]
        else:
            hm_xs = hm_xs + 0.5
            hm_ys = hm_ys + 0.5

        mask = (hm_score > thresh).astype('float32')
        hm_score = (1 - mask) * -1 + mask * hm_score
        hm_ys = (1 - mask) * (-10000) + mask * hm_ys
        hm_xs = (1 - mask) * (-10000) + mask * hm_xs

        hm_kps = nd.stack(hm_xs, hm_ys, axis=-1).expand_dims(axis=2).broadcast_to((batch, num_joints, K, K, 2))
        dist = (((reg_kps - hm_kps) ** 2).sum(axis=4) ** 0.5)
        min_dist = dist.min(axis=3) # b x J x K
        min_ind = nd.argmin(dist, axis=3) # b x J x K

        M, N, K = hm_score.shape[0:3]
        for i in range(M):
            for j in range(N):
                for k in range(K):
                    hm_score[i, j, k] = hm_score[i, j, min_ind[i, j, k]]
        hm_score = hm_score.expand_dims(axis=-1)
        min_dist = min_dist.expand_dims(-1)

        hm_kps = hm_kps.reshape((batch, num_joints, K, 2))
        for i in range(M):
            for j in range(N):
                for k in range(K):
                        hm_kps[i, j, k, 0] = hm_kps[i, j, min_ind[i, j, k], 0]
                        hm_kps[i, j, k, 1] = hm_kps[i, j, min_ind[i, j, k], 1]

        l = bboxes[:, :, 0].reshape((batch, 1, K, 1)).broadcast_to((batch, num_joints, K, 1))
        t = bboxes[:, :, 1].reshape((batch, 1, K, 1)).broadcast_to((batch, num_joints, K, 1))
        r = bboxes[:, :, 2].reshape((batch, 1, K, 1)).broadcast_to((batch, num_joints, K, 1))
        b = bboxes[:, :, 3].reshape((batch, 1, K, 1)).broadcast_to((batch, num_joints, K, 1))

        mask = (hm_kps[:, :, 0:1] < l) + (hm_kps[:, :, 0:1] > r)
        mask += (hm_kps[:, :, 1:2] < t) + (hm_kps[:, :, 1:2] > b)
        mask += (hm_score < thresh)
        mask += (min_dist > (nd.maximum(b - t, r - l) * 0.3))
        mask = (mask > 0).astype('float32').broadcast_to((batch, num_joints, K, 2))

        kps = (1 - mask) * hm_kps + mask * kps
        kps = nd.swapaxes(kps, 1, 2).reshape((batch, K, num_joints * 2))

    detections = nd.concat(bboxes, scores, kps, clses, dim=2)
    return detections
Esempio n. 9
0
seq_length = 64
# -1 here so we have enough characters for labels later
num_samples = (len(time_numerical) - 1) // seq_length
dataset = one_hots(time_numerical[:seq_length * num_samples]).reshape(
    (num_samples, seq_length, vocab_size))
textify(dataset[0])

batch_size = 32

print('# of sequences in dataset: ', len(dataset))
num_batches = len(dataset) // batch_size
print('# of batches: ', num_batches)
train_data = dataset[:num_batches * batch_size].reshape(
    (batch_size, num_batches, seq_length, vocab_size))
# swap batch_size and seq_length axis to make later access easier
train_data = nd.swapaxes(train_data, 0, 1)
train_data = nd.swapaxes(train_data, 1, 2)
print('Shape of data set: ', train_data.shape)

for i in range(3):
    print("***Batch %s:***\n %s \n %s \n\n" %
          (i, textify(train_data[i, :, 0]), textify(train_data[i, :, 1])))

labels = one_hots(time_numerical[1:seq_length * num_samples + 1])
train_label = labels.reshape((batch_size, num_batches, seq_length, vocab_size))
train_label = nd.swapaxes(train_label, 0, 1)
train_label = nd.swapaxes(train_label, 1, 2)
print(train_label.shape)

print(textify(train_data[10, :, 3]))
print(textify(train_label[10, :, 3]))
Esempio n. 10
0
def textify(embedding):
    result = ""
    indices = nd.argmax(embedding, axis=1).asnumpy()
    for idx in indices:
        result += character_list[int(idx)]
    return result

batch_size = 32
seq_length = 64
# -1 here so we have enough characters for labels later
num_samples = (len(time_numerical) - 1) // seq_length
dataset = one_hots(time_numerical[:seq_length*num_samples]).reshape((num_samples, seq_length, vocab_size))
num_batches = len(dataset) // batch_size
train_data = dataset[:num_batches*batch_size].reshape((num_batches, batch_size, seq_length, vocab_size))
# swap batch_size and seq_length axis to make later access easier
train_data = nd.swapaxes(train_data, 1, 2)

labels = one_hots(time_numerical[1:seq_length*num_samples+1])
train_label = labels.reshape((num_batches, batch_size, seq_length, vocab_size))
train_label = nd.swapaxes(train_label, 1, 2)

########################
#  allocate parameter
########################

num_inputs = vocab_size
num_hidden = 256
num_outputs = vocab_size

########################
#  Weights connecting the inputs to the hidden layer
Esempio n. 11
0
 #data = packer.readcmp(path)
 data = np.fromfile(path, dtype='<f', count=-1, sep='').reshape(-1, 25)
 data += source_mean
 data *= source_std
 data = data[:, 1:25]
 num_data_seg = data.shape[0] // segment_length + 1
 for i in range(1, num_data_seg + 1):
     if data.shape[0] >= i * segment_length:
         data_seg = data[(i - 1) * segment_length:i * segment_length, :]
     else:
         data_seg = np.concatenate(
             (data[(i - 1) * segment_length:, :],
              np.zeros((i * segment_length - data.shape[0], feat_dim))),
             axis=0)
     assert (data_seg.shape == (segment_length, feat_dim))
     data_seg = nd.swapaxes(nd.array(data_seg), 1, 0)
     data_seg = nd.reshape(data_seg, (1, feat_dim, 1, -1))
     pred_seg = G_A(data_seg)
     pred_seg = nd.reshape(pred_seg, (feat_dim, -1))
     pred_seg = nd.swapaxes(pred_seg, 1, 0)
     pred_seg = pred_seg.asnumpy()
     pred_seg /= target_std[1:25]
     pred_seg -= target_mean[1:25]
     pred = pred_seg if i == 1 else np.concatenate(
         (pred, pred_seg), axis=0)
 pred = pred[:data.shape[0], :]
 assert (data.shape == pred.shape)
 mgc_feat = []
 for i in range(pred.shape[0]):
     mgc_feat.append(pred[i].tolist())
 writer.writecmp(savepath, mgc_feat)
Esempio n. 12
0
 def forward(self, x):
     return nd.swapaxes(x, self.dim1, self.dim2)
    def forward(self,
                data,
                features,
                features_in_offset,
                features_in_size,
                features_out_offset,
                features_out_size,
                weight_factor=None):
        """Returns convolved data at locations defined by features.

        Args:
            features: Input and output features concatenated along dimension=2.
            data: Input data corresponding to input pixels.
            features_in_offset: Offset of input feature positions in features.
            features_in_size: Number of input pixels.
            features_out_offset: Offset of output feature positions in features.
            features_out_size: Number of output pixels.
            weight_factor: Optional parameter that scales convolution weights.
        """

        if self.lattice_size is None:
            self.lattice_size = (features_in_size + features_out_size) * 10

        current_context = data.context
        if current_context != mx.cpu():
            # Push features and data to cpu as there is no gpu implementation
            # for lattice and splat.
            features = features.copyto(mx.cpu())
            data = data.copyto(mx.cpu())

        barycentric, offset, blur_neighbors = nd.permutohedral_lattice(
            nd.swapaxes(features, 1, 2),
            neighborhood_size=self.neighborhood_size,
            lattice_size=self.lattice_size)
        data_lattice = nd.permutohedral_splat(
            data,
            barycentric,
            offset,
            features_in_offset=features_in_offset,
            features_in_size=features_in_size,
            lattice_size=self.lattice_size)

        if self.convolution_block is None:
            data_blurred = data_lattice
        else:
            if current_context != mx.cpu():
                # Push data and blur_neighbors to gpu to perform convolution.
                data_lattice = data_lattice.copyto(current_context)
                blur_neighbors = blur_neighbors.copyto(current_context)

            data_blurred = self.convolution_block(data_lattice, blur_neighbors,
                                                  weight_factor)

        if current_context != mx.cpu():
            # Push data to cpu as there is no gpu implementation of slice.
            data_blurred = data_blurred.copyto(mx.cpu())

        data_out = nd.permutohedral_slice(
            data_blurred,
            barycentric,
            offset,
            features_out_offset=features_out_offset,
            features_out_size=features_out_size)

        if self.normalization_type is not None:
            normalization = get_normalization(
                barycentric, offset, blur_neighbors, self.normalization_block,
                weight_factor, features_in_offset, features_in_size,
                features_out_offset, features_out_size, self.lattice_size,
                data_lattice.shape[1])
            # Set normalization value to 1 for zero entries.
            normalization = normalization + (normalization == 0)
            data_out = data_out / normalization

        if current_context != mx.cpu():
            # Push output data back to original context.
            data_out = data_out.copyto(current_context)

        return data_out