예제 #1
0
    def symeig_svd(self, matrix, n_eigenvecs=None):
        """Computes a truncated SVD on `matrix` using symeig

            Uses symeig on matrix.T.dot(matrix) or its transpose

        Parameters
        ----------
        matrix : 2D-array
        n_eigenvecs : int, optional, default is None
            if specified, number of eigen[vectors-values] to return

        Returns
        -------
        U : 2D-array
            of shape (matrix.shape[0], n_eigenvecs)
            contains the right singular vectors
        S : 1D-array
            of shape (n_eigenvecs, )
            contains the singular values of `matrix`
        V : 2D-array
            of shape (n_eigenvecs, matrix.shape[1])
            contains the left singular vectors
        """
        # Check that matrix is... a matrix!
        if self.ndim(matrix) != 2:
            raise ValueError('matrix be a matrix. matrix.ndim is %d != 2' %
                             self.ndim(matrix))

        dim_1, dim_2 = self.shape(matrix)
        if dim_1 <= dim_2:
            min_dim = dim_1
            max_dim = dim_2
        else:
            min_dim = dim_2
            max_dim = dim_1

        if n_eigenvecs is None:
            n_eigenvecs = max_dim

        if min_dim <= n_eigenvecs:
            if n_eigenvecs > max_dim:
                warnings.warn(
                    'Trying to compute SVD with n_eigenvecs={0}, which '
                    'is larger than max(matrix.shape)={1}. Setting '
                    'n_eigenvecs to {1}'.format(n_eigenvecs, max_dim))
                n_eigenvecs = max_dim
            # we compute decomposition on the largest of the two to keep more eigenvecs
            dim_1, dim_2 = dim_2, dim_1

        if dim_1 < dim_2:
            U, S = nd.linalg.syevd(dot(matrix, transpose(matrix)))
            S = self.sqrt(S)
            V = dot(transpose(matrix), U / reshape(S, (1, -1)))
        else:
            V, S = nd.linalg.syevd(dot(transpose(matrix), matrix))
            S = self.sqrt(S)
            U = dot(matrix, V) / reshape(S, (1, -1))

        U, S, V = U[:, ::-1], S[::-1], transpose(V)[::-1, :]
        return U[:, :n_eigenvecs], S[:n_eigenvecs], V[:n_eigenvecs, :]
예제 #2
0
    def forward(self, data, neighbor_data, neighbor_indices, neighbor_indptr,
                       node_type_mask=None, neighbor_type_mask=None, edge_type_mask=None, seg_indices=None):
        """Map the input features to hidden states + apply pooling + apply FC

        Parameters
        ----------
        F
        data : Symbol or NDArray
            Shape (batch_size, node_num, feat_dim)
        neighbor_data : Symbol or NDArray
            Shape (batch_size, neighbor_node_num, feat_dim)
        data_mask :  Symbol or NDArray
            Shape (batch_size, node_num, num_set, 1)
        neighbor_mask : Symbol or NDArray
            Shape (batch_size, neighbor_node_num, num_set, 1)
        neighbor_indices : Symbol or NDArray
            Shape (nnz, )
        neighbor_indptr : Symbol or NDArray
            Shape (node_num + 1, )
        edge_data : Symbol or NDArray or None
            Shape (batch_size, nnz, num_edge_num, 1)

        Returns
        -------

        """
        ## TODO does not consider node type
        if self._num_node_set is not None:
            #print("data", data.shape)
            #print("node_type_mask", node_type_mask.shape)
            data = self.data_map(data, node_type_mask)
            neighbor_data = self.neighbor_mid_map(neighbor_data, neighbor_type_mask)
        if self._num_edge_set is not None:
            neighbor_data = self.relation_W(neighbor_data)  ### (batch_size, neighbor_node_num, mid_units*num_edge_set)
            neighbor_data = nd.take(neighbor_data, indices=neighbor_indices, axis=-2) ## (batch_size, nnz, mid_units*num_edge_set)
            #print("neighbor_data", neighbor_data.shape)
            neighbor_data = nd.reshape(neighbor_data,
                                       shape=(0, 0, self._num_edge_set, self._mid_units)) ## (batch_size, nnz, mid_units*num_edge_set)
            #print("neighbor_data", neighbor_data.shape)
            #print("edge_data", edge_data.shape)
            neighbor_data = nd.reshape(nd.broadcast_mul(neighbor_data, edge_type_mask),
                                       shape=(0, 0, -1))
            #print("neighbor_data", neighbor_data.shape)


        pool_data = nd.contrib.seg_pool(data=neighbor_data,
                                       indices=seg_indices,
                                       indptr=neighbor_indptr,
                                       pool_type=self._pool_type)  # Shape(batch_size, node_num, mid_units*num_edge_set)
        if self._num_edge_set is not None:
            if self._accum_type == "stack":
                pool_data = self._out_act(pool_data)
            elif self._accum_type == "sum":
                pool_data = self._out_act(nd.sum(nd.reshape(pool_data, shape=(0, 0, self._num_edge_set, self._mid_units )), axis=2))

        #out = self.out_layer(nd.concat(pool_data, data, dim=-1))
        #out = self.out_layer(pool_data)
        return pool_data
예제 #3
0
def _rearrange(raw, F, upscale_factor):
    # (N, C * r^2, H, W) -> (N, C, r^2, H, W)
    splitted = F.reshape(raw, shape=(0, -4, -1, upscale_factor**2, 0, 0))
    # (N, C, r^2, H, W) -> (N, C, r, r, H, W)
    unflatten = F.reshape(splitted, shape=(0, 0, -4, upscale_factor, upscale_factor, 0, 0))
    # (N, C, r, r, H, W) -> (N, C, H, r, W, r)
    swapped = F.transpose(unflatten, axes=(0, 1, 4, 2, 5, 3))
    # (N, C, H, r, W, r) -> (N, C, H*r, W*r)
    return F.reshape(swapped, shape=(0, 0, -3, -3))
예제 #4
0
def _rearrange(raw, F, upscale_factor):
    # (N, C * r^2, H, W) -> (N, C, r^2, H, W)
    splitted = F.reshape(raw, shape=(0, -4, -1, upscale_factor**2, 0, 0))
    # (N, C, r^2, H, W) -> (N, C, r, r, H, W)
    unflatten = F.reshape(splitted, shape=(0, 0, -4, upscale_factor, upscale_factor, 0, 0))
    # (N, C, r, r, H, W) -> (N, C, H, r, W, r)
    swapped = F.transpose(unflatten, axes=(0, 1, 4, 2, 5, 3))
    # (N, C, H, r, W, r) -> (N, C, H*r, W*r)
    return F.reshape(swapped, shape=(0, 0, -3, -3))
예제 #5
0
파일: core.py 프로젝트: gokererdogan/Papers
    def generate(self, x: nd.NDArray = None, include_intermediate: bool = False, **kwargs) -> \
            Union[nd.NDArray, Tuple[nd.NDArray, nd.NDArray]]:
        """
        Generate a batch of samples from model. See Section 2.3 in paper.

        If x is None, this method generates unconditional samples from the model (as explained in Section 2.3 in the
        paper).

        If x is provided, this method reconstructs the input to generate the sample. This is not really a true sample
        from the model because the model looks at the image it is trying to generate. However, this is useful for seeing
        how the model generates a particular image.

        :param x: Input to generate images from.
        :param include_intermediate: If True, samples from all timesteps (not only the last timestep) are returned.
        :return: n x *image_shape array of generated samples. If include_intermediate is True,
            then steps x n x *image_shape.
        """
        r = nd.zeros((self._batch_size, *self._input_shape), ctx=self._ctx)  # reconstruction
        h_dec = nd.zeros((self._batch_size, *self._rnn_hidden_shape), ctx=self._ctx)
        c_dec = nd.zeros((self._batch_size, *self._rnn_hidden_shape), ctx=self._ctx)

        if x is not None:
            h_enc = nd.zeros((self._batch_size, *self._rnn_hidden_shape), ctx=self._ctx)
            c_enc = nd.zeros((self._batch_size, *self._rnn_hidden_shape), ctx=self._ctx)
            encoded_x = self._enc_nn(x)

        rs = []  # sample(s) over time

        for i in range(self._num_steps):
            rs.append(nd.sigmoid(r))
            encoded_r = self._enc_nn(rs[-1])
            if x is not None:
                err = encoded_x - encoded_r
                _, (h_enc, c_enc) = self._enc_rnn(nd.concat(encoded_x, err, h_dec, c_dec, dim=1), [h_enc, c_enc])

                q = self._q_layer(h_enc)
                # convert NxCxHxW to NxF
                q = nd.reshape(q, (self._batch_size, -1))
                z = self._latent_layer(q)
            else:
                # sample from prior
                p = self._p_layer(h_dec)
                p = nd.reshape(p, (self._batch_size, -1))
                z = self._latent_layer(p)

            dec_z = nd.reshape(z, (self._batch_size, self._num_latent_maps, *self._encoder_output_shape[1:]))
            _, (h_dec, c_dec) = self._dec_rnn(nd.concat(dec_z, encoded_r, dim=1), [h_dec, c_dec])
            r = r + self._dec_nn(h_dec)

        rs.append(nd.sigmoid(r))

        if include_intermediate:
            samples = nd.stack(*rs, axis=0)
        else:
            samples = rs[-1]

        return samples
예제 #6
0
    def generate(self,
                 v_q: nd.NDArray,
                 x_context: nd.NDArray,
                 v_context: nd.NDArray,
                 include_intermediate: bool = False,
                 **kwargs) -> Union[nd.NDArray, Tuple[nd.NDArray, nd.NDArray]]:
        """
        Generate a batch of samples from model. See Algorithm S3 in paper.

        :param v_q: Query view camera info.
        :param x_context: Context frames.
        :param v_context: Context camera info.
        :param include_intermediate: If True, samples from all timesteps (not only the last timestep) are returned.
        :return: n x *image_shape array of generated samples. If include_intermediate is True,
            then steps x n x *image_shape.
        """
        u = nd.zeros((self._batch_size, *self._upsample_output_shape),
                     ctx=self._ctx)  # canvas (reconstruction)
        h_dec = nd.zeros((self._batch_size, *self._rnn_hidden_shape),
                         ctx=self._ctx)
        c_dec = nd.zeros((self._batch_size, *self._rnn_hidden_shape),
                         ctx=self._ctx)

        # reshape camera information so we can concat it to image data
        v_q = nd.broadcast_to(
            nd.expand_dims(nd.expand_dims(v_q, axis=-1), axis=-1),
            (0, 0, *self._downsample_output_shape[1:]))

        outs = []  # sample(s) over time

        r = self._representation_nn(x_context, v_context)
        for i in range(self._num_steps):
            outs.append(self._out_layer(u))

            # Eq. S11
            p = self._p_layer(h_dec)
            p = nd.reshape(p, (self._batch_size, -1))
            z = self._latent_layer(p)

            gen_z = nd.reshape(z, (self._batch_size, self._num_latent_maps,
                                   *self._downsample_output_shape[1:]))
            _, (h_dec, c_dec) = self._gen_rnn(nd.concat(gen_z, v_q, r, dim=1),
                                              [h_dec, c_dec])

            u = u + self._upsample_nn(h_dec)

        outs.append(self._out_layer(u))

        if include_intermediate:
            samples = nd.stack(*outs, axis=0)
        else:
            samples = outs[-1]

        return nd.clip(samples, a_min=0.0, a_max=1.0)
예제 #7
0
파일: net.py 프로젝트: anzhao0503/gluon-cv
 def forward(self, x):
     batch_size, c, h, w = x.shape
     if batch_size is None:
         batch_size = -1
     out_c = c // (self.up_scale * self.up_scale)
     out_h = h * self.up_scale
     out_w = w * self.up_scale
     out = F.reshape(
         x, (batch_size, self.up_scale, self.up_scale, out_c, h, w))
     out = F.transpose(out, (0, 3, 4, 1, 5, 2))
     out = F.reshape(out, (batch_size, out_c, out_h, out_w))
     return out
예제 #8
0
파일: reshaper.py 프로젝트: roya0045/cvar2
    def var(array,W=_W,B=None,square=0,sqrt=0,V=False,order='NCHW',sizz=0):
        arrs=array.shape
        ashp=W.shape
        xi=(-2,-1)
        x2=(-2,-1,-3)
        sb=(ashp[1],1,1)
        WV=ashp[-2:]
        print(sb)

        mnc=mnd.tile(mnd.reshape(mnd.array([WV[0]*WV[1]]), shape=(1,1,1)),ashp[1])
        print(mnc)

        if V:
            print(W.eval())
        print(arrs,ashp)
        mul=(mnd.broadcast_mul(array,W))
        if V:
            print('Wsamp',W[-1,-1])
            print('array*w',mul[0,-1])
        size=mnd.sum(W,axis=xi,keepdims=True)#shape=(outputs, channel)
        if V:
            print("sizesamp",size.shape,size)
        if B is None:
            B=mnd.zeros(W.shape[0:2],dtype=np.float32)#channel
        B=mnd.reshape(B,(*B.shape,*[1 for _ in range(len(ashp)-len(B.shape))]))
        if sizz==1:
            mean=mnd.sum(mul,axis=xi,keepdims=True)/size
        else:
            mean=mnd.sum(mul,axis=xi,keepdims=True)/mnc
        if V:
            print("meansamp",mean[0,-1])
        if square:
            i=mnd.square(mnd.broadcast_add(mnd.broadcast_minus(mul,mean),B))
        else:
            i=mnd.broadcast_add(mnd.broadcast_minus(mul,mean),B)
        di=i/size
        if V==2:
            print("i",i,"i")
            print("di",di,"di")
        if V:
            print('isamp',i.shape,i[-1,-1,])
        out=mnd.sum(mnd.broadcast_add(i,B),axis=x2)
        #out=np.rollaxis(np.sum(i+B,axis=x2),-1,1)
        #print(out.shape)
        if sqrt:
            out=mnd.sqrt(out)
        out=mnd.swapaxes(out, 3, 1)
        #print(out.shape,(arrs[0],ashp[0],arrs[1],arrs[2]))
        assert out.shape==(arrs[0],ashp[0],arrs[1],arrs[2])
        return(out)
 def hybrid_forward(self, F, x, *args, **kwargs):
     # batch_size, channels, height, width = x.shape
     # assert channels % 2 == 0
     # mid_channels = channels // 2
     data = F.reshape(x, shape=(0, -4, self.groups, -1, -2))
     data = F.swapaxes(data, 1, 2)
     data = F.reshape(data, shape=(0, -3, -2))
     data_project = F.slice(data,
                            begin=(None, None, None, None),
                            end=(None, self.mid_channel, None, None))
     data_x = F.slice(data,
                      begin=(None, self.mid_channel, None, None),
                      end=(None, None, None, None))
     return data_project, data_x
def rgb_to_lab(image_srgb, ctx=None):

    if ctx is None:
        raise ValueError("ctx can not be None")

    if image_srgb is None:
        raise ValueError("image_srgb can not be None")

    with mx.Context(ctx):

        srgb = __check_image(image_srgb)

        if nd.max(srgb).asscalar() > 1:
            srgb = __normalize_rgb_image(srgb)

        srgb_pixels = nd.reshape(srgb, [-1, 3])

        linear_mask = nd.cast(srgb_pixels <= 0.04045, dtype='float32')
        exponential_mask = nd.cast(srgb_pixels > 0.04045, dtype='float32')
        rgb_pixels = (srgb_pixels / 12.92 * linear_mask) + (((srgb_pixels + 0.055) / 1.055) ** 2.4) * exponential_mask
        rgb_to_xyz = nd.array([
            #    X        Y          Z
            [0.412453, 0.212671, 0.019334],  # R
            [0.357580, 0.715160, 0.119193],  # G
            [0.180423, 0.072169, 0.950227],  # B
        ])
        xyz_pixels = nd.linalg_gemm2(rgb_pixels, rgb_to_xyz)

        # https://en.wikipedia.org/wiki/Lab_color_space#CIELAB-CIEXYZ_conversions
        # convert to fx = f(X/Xn), fy = f(Y/Yn), fz = f(Z/Zn)
        # normalize for D65 white point
        xyz_normalized_pixels = nd.multiply(xyz_pixels, nd.array([1 / 0.950456, 1.0, 1 / 1.088754]))

        epsilon = 6 / 29
        linear_mask = nd.cast(xyz_normalized_pixels <= (epsilon ** 3), dtype='float32')
        exponential_mask = nd.cast(xyz_normalized_pixels > (epsilon ** 3), dtype='float32')
        fxfyfz_pixels = (xyz_normalized_pixels / (3 * epsilon ** 2) + 4 / 29) * linear_mask + (
                                                                                                  xyz_normalized_pixels ** (
                                                                                                  1 / 3)) * exponential_mask
            # convert to lab
        fxfyfz_to_lab = nd.array([
                #  l       a       b
                [0.0, 500.0, 0.0],  # fx
                [116.0, -500.0, 200.0],  # fy
                [0.0, 0.0, -200.0],  # fz
            ])
        lab_pixels = nd.linalg_gemm2(fxfyfz_pixels, fxfyfz_to_lab) + nd.array([-16.0, 0.0, 0.0])

        return nd.reshape(lab_pixels, srgb.shape)
예제 #11
0
파일: compile.py 프로젝트: tzechienchu/tvm
def verify_loaded_model(net):
    """Run inference using ten random images.
    Print both input and output of the model"""

    def transform(data, label):
        return data.astype(np.float32)/255, label.astype(np.float32)

    # Load ten random images from the test dataset
    sample_data = mx.gluon.data.DataLoader(mx.gluon.data.vision.MNIST(train=False, transform=transform),
                                  10, shuffle=True)

    for data, label in sample_data:

        # Display the images
        img = nd.transpose(data, (1,0,2,3))
        img = nd.reshape(img, (28,10*28,1))
        imtiles = nd.tile(img, (1,1,3))
        plt.imshow(imtiles.asnumpy())
        plt.show()

        # Display the predictions
        data = nd.transpose(data, (0, 3, 1, 2))
        out = net(data.as_in_context(ctx))
        predictions = nd.argmax(out, axis=1)
        print('Model predictions: ', predictions.asnumpy())

        break
def merge(conv_w, gamma, beta, running_mean, running_var):
    gamma_over_var = gamma / nd.sqrt(running_var + 1e-5)
    gamma_over_var_expanded = nd.reshape(gamma_over_var,
                                         (gamma_over_var.shape[0], 1, 1, 1))
    new_w = gamma_over_var_expanded * nd.cast(conv_w, 'float32')
    new_b = beta - running_mean * gamma_over_var
    return new_w, new_b
예제 #13
0
    def forward(self, x, mask):
        """

        Parameters
        ----------
        F
        x: Shape(batch_size, num_node, input_dim)
        mask: Shape(batch_size, num_node, num_set, 1)

        Returns
        -------

        """
        layer_in_l = [x]
        layer_out = None
        for i in range(self._layer_num):
            if len(layer_in_l) == 1:
                layer_in = layer_in_l[0]
            else:
                layer_in = nd.concat(*layer_in_l, dim=-1)
            ### TODO assume batch_size=1
            x_mW = nd.reshape(self.layers[i](layer_in),
                              shape=(0, 0, self._num_set, self._units))
            layer_out = self._act(nd.sum(nd.broadcast_mul(x_mW, mask),
                                         axis=-2))
            layer_in_l.append(layer_out)
        return layer_out
예제 #14
0
def proto_loss(embedding, nc, ns, nq):
    embedding = embedding.astype('float64');
    cls_data = nd.reshape(embedding[0:nc*ns], (nc, ns, -1)); cls_data.attach_grad()
    cls_center = nd.mean(cls_data, axis=1);
    data_center_dis = nd.norm(embedding[nc*ns:].expand_dims(axis=1) - cls_center.expand_dims(axis=0),
                              axis=2) ** 2

    # print(nd.max(data_center_dis).asscalar())


    weight = nd.zeros((nc*nq, nc), ctx=embedding.context, dtype='float64')
    pick_vec = nd.zeros((nc*nq), ctx=embedding.context)
    for i in range(0, nc):
        weight[i*nq:i*nq+nq, i] = 1
        pick_vec[i*nq:i*nq+nq] = i
    """
    temp = nd.SoftmaxOutput(-data_center_dis, label)
    temp = nd.log(temp) * weight
    temp = nd.sum(-temp, axis=1)
    predict = nd.argmin(data_center_dis, axis=1)
    return -temp * nd.log(temp), predict
    """

    temp1 = nd.log_softmax(-data_center_dis, axis=1);
    temp2 = nd.pick(temp1, index=pick_vec, axis=1);
    temp3 = nd.sum(-temp2);
    label = nd.argmin(data_center_dis, axis=1)
    return temp3 / (nc * nq), label
예제 #15
0
def my_loss(data, nc, ns, nq):
    data = data.astype('float64')
    cls_data = nd.reshape(data[0:nc * ns], (nc, ns, -1))
    cls_center = nd.mean(cls_data, axis=1) + 1e-10
    data_center_dis = nd.norm(data[nc * ns:].expand_dims(axis=1) -
                              cls_center.expand_dims(axis=0),
                              axis=2)**2

    weight = nd.zeros((nc * nq, nc), ctx=data.context, dtype='float64')
    for i in range(0, nc):
        weight[i * nq:i * nq + nq, i] = 1
    weight2 = 1 - weight

    temp1 = nd.log_softmax(-data_center_dis, axis=1)
    temp2 = nd.sum(temp1, axis=1)
    temp3 = nd.sum(-temp2)
    label = nd.argmin(data_center_dis, axis=1)
    return temp3 / (nc * nq), label

    loss1 = nd.sum(data_center_dis * weight)

    temp = nd.sum(nd.exp(-data_center_dis), axis=1)
    loss2 = nd.sum(nd.log(temp))

    if loss1 is np.nan or loss2 is np.nan:
        raise StopIteration

    return (loss1 + loss2) / (nc * nq), label
def __check_image(image):
    assert image.shape[-1] == 3
    if len(image.shape) not in (3, 4):
        raise ValueError("image must be either 3 or 4 dimensions")
    shape = list(image.shape)
    shape[-1] = 3
    return nd.reshape(image, shape)
예제 #17
0
def test_selective_attention_write():
    # filter 3x3
    # image 4x5
    # batch size 2
    ctx = mx.cpu()
    attn_block = SelectiveAttentionWrite(filter_size=3,
                                         input_shape=(4, 5),
                                         batch_size=2)
    attn_block.collect_params().initialize(ctx=ctx)

    h_dec = nd.random.normal(shape=(2, 3), ctx=ctx)
    c_dec = nd.random.normal(shape=(2, 3), ctx=ctx)

    attn_params = attn_block._attention_params_layer(
        nd.concat(h_dec, c_dec, dim=1))
    Fx, Fy = attn_block._build_filter(nd, attn_params)

    write, _ = attn_block(h_dec, c_dec)

    # calculate expected
    w = nd.reshape(attn_block._patch_layer(nd.concat(h_dec, c_dec, dim=1)),
                   (-1, 3, 3))
    expected_1 = (
        np.dot(np.dot(Fx[0].asnumpy().T, w[0].asnumpy()), Fy[0].asnumpy()) /
        np.exp(attn_params[0, 4].asscalar()))
    expected_2 = (
        np.dot(np.dot(Fx[1].asnumpy().T, w[1].asnumpy()), Fy[1].asnumpy()) /
        np.exp(attn_params[1, 4].asscalar()))
    expected = np.stack([expected_1.flatten(), expected_2.flatten()], axis=0)

    assert np.allclose(expected, write.asnumpy())
예제 #18
0
    def get(self, pred, label):
        embedding = nd.L2Normalization(pred, mode='instance')
        self.acc = 0
        nc = self.nc
        ns = self.ns
        nq = self.nq
        margin = self.margin

        s_embedding = embedding.slice_axis(axis=0, begin=0, end=nc * ns)
        q_embedding = embedding.slice_axis(axis=0, begin=nc * ns, end=None)
        s_cls_data = nd.reshape(s_embedding, (nc, ns, -1))
        q_cls_data = nd.reshape(q_embedding, (nc, nq, -1))

        s_cls_center = nd.mean(s_cls_data, axis=1)
        s_cls_center = nd.L2Normalization(s_cls_center, mode='instance')

        temp = q_embedding.expand_dims(axis=1) * s_cls_center.expand_dims(
            axis=0)
        data_center_dis = nd.sum(temp, axis=2)
        cur_label = nd.argmax(data_center_dis, axis=1)

        loss = 0
        # Calculating loss
        for i in range(nc):
            temp = data_center_dis[i * nq:(i + 1) * nq, i]
            loss += nd.sum(
                nd.LeakyReLU(margin - temp, act_type='leaky', slope=0.1))

        for i in range(nc):
            self.acc += nd.sum(cur_label[nq * i:nq * (i + 1)] == i).asscalar()
        self.acc /= (nc * nq)

        s_embedding = embedding.slice_axis(axis=0, begin=0, end=nc * ns)
        q_embedding = embedding.slice_axis(axis=0, begin=nc * ns, end=None)

        s_cls_data = nd.reshape(s_embedding, (nc, ns, -1))
        q_cls_data = nd.reshape(q_embedding, (nc, nq, -1))

        s_cls_center = nd.mean(s_cls_data, axis=1)
        s_cls_center = nd.L2Normalization(s_cls_center, mode='instance')
        s_center_broadcast = s_cls_center.expand_dims(axis=1)
        s_center_dis = nd.sum(nd.broadcast_mul(q_cls_data, s_center_broadcast),
                              axis=2)
        temp = nd.LeakyReLU(margin - s_center_dis, act_type='leaky', slope=0.1)
        loss1 = nd.sum(temp)

        return (self.acc, cur_label, loss)
 def hybrid_forward(self, F, x, block_channel_mask, *args, **kwargs):
     block_channel_mask = F.slice(block_channel_mask,
                                  begin=(None, None),
                                  end=(None, self.channel_number))
     block_channel_mask = F.reshape(block_channel_mask,
                                    shape=(1, self.channel_number, 1, 1))
     x = F.broadcast_mul(x, block_channel_mask)
     return x
예제 #20
0
 def forward(self, signals) -> NDArray:
     outputs = []
     for signal, RFmapper in zip(signals, self._RFmappers):
         for i in range(signal.shape[1]):
             outputs.append(RFmapper(signal[:,i,:]))
     outputs = stack(*outputs, axis=1)
     outputs = reshape(outputs, (outputs.shape[0], outputs.shape[1], *self.output_shape))
     return outputs
예제 #21
0
def calculate_norm(x, y):
    assert x.shape == y.shape
    ndims = np.product(x.shape)
    x = nd.reshape(x, shape=(ndims, ))
    y = nd.reshape(y, shape=(ndims, ))
    res = x - y
    nx = nd.norm(x)
    ny = nd.norm(y)
    nr = nd.norm(res)
    print("saving...")
    f = "/home/ryt/data/cmp_"
    names = ["nx", "ny", "nr"]
    objs = [nx, ny, nr]
    for obj in objs:
        print(type(obj), obj.shape)
    for i in range(3):
        nd.save(f + names[i], objs[i])
    print('success')
def kr(matrices):
    """Khatri-Rao product of a list of matrices

        This can be seen as a column-wise kronecker product.

    Parameters
    ----------
    matrices : ndarray list
        list of matrices with the same number of columns, i.e.::

            for i in len(matrices):
                matrices[i].shape = (n_i, m)

    Returns
    -------
    khatri_rao_product: matrix of shape ``(prod(n_i), m)``
        where ``prod(n_i) = prod([m.shape[0] for m in matrices])``
        i.e. the product of the number of rows of all the matrices in the product.

    Notes
    -----
    Mathematically:

    .. math::
         \\text{If every matrix } U_k \\text{ is of size } (I_k \\times R),\\\\
         \\text{Then } \\left(U_1 \\bigodot \\cdots \\bigodot U_n \\right) \\text{ is of size } (\\prod_{k=1}^n I_k \\times R)
    """
    if len(matrices) < 2:
        raise ValueError(
            'kr requires a list of at least 2 matrices, but {} given.'.format(
                len(matrices)))

    n_col = shape(matrices[0])[1]
    for i, e in enumerate(matrices[1:]):
        if not i:
            res = matrices[0]
        s1, s2 = shape(res)
        s3, s4 = shape(e)
        if not s2 == s4 == n_col:
            raise ValueError(
                'All matrices should have the same number of columns.')
        res = reshape(
            reshape(res, (s1, 1, s2)) * reshape(e, (1, s3, s4)), (-1, n_col))
    return res
예제 #23
0
 def forward(self, x):
     if not self.pretrained:
         input_ = x.reshape(x.shape[0],x.shape[1],x.shape[2]*x.shape[3]*x.shape[4])
     else:
         input_ = x
         
     output_1 = self.lstm_1(input_)
     output_2 = self.lstm_2(output_1)
     dense_1 = self.dense(output_2)
     output = F.reshape(dense_1,(dense_1.shape[0],self.caption_length,self.caption_length))
     return output
def get_minibatch(data_iter):
    try:
        batch = data_iter.next()
    except StopIteration:
        data_iter.reset()
        batch = data_iter.next()

    x = batch.data[0]
    x = nd.reshape(x, (x.shape[0], -1))
    y = nd.one_hot(batch.label[0], 10)
    return x, y
예제 #25
0
파일: reshaper.py 프로젝트: roya0045/cvar2
 def mxwindow(mna,window):
     mnas=mna.shape
     mnout=(*mnas[:-2],*window,((mnas[-2]-window[-2])+1),((mnas[-1]-window[-1])+1))
     mne2=None
     for R in range(window[0]):
         j_lim = R + mnout[-2]
         for H in range(window[1]):
             tdata=mnd.slice(mna, begin=(None,None,R,H), end=(None,None,j_lim,(H +  mnout[-1])), step=(None,None,1,1))
             if mne2 is None:
                 mne2=tdata
             else:
                 mne2=mnd.concat(mne2,tdata,dim=1)
     return(mnd.expand_dims(mnd.transpose(mnd.reshape(mne2, shape=mnout),axes=(0,5,4,3,2,1)), 3))
예제 #26
0
    def forward(self, is_train, req, in_data, out_data, aux):
        arm_cls_preds = in_data[0]
        odm_cls_target = in_data[1]
        odm_loc_target_mask = in_data[2]

        arm_cls_preds = nd.softmax(data=arm_cls_preds)
        arm_cls_preds_classes = nd.split(data=arm_cls_preds,axis=1,num_outputs=2)
        # arm_cls_preds_bg shape : (batch , h*w*num_anchors[:layers]) 负类【0】
        arm_cls_preds_bg = nd.reshape(data=arm_cls_preds_classes[0],shape=(0,-1))
        prob_temp = nd.ones_like(arm_cls_preds_bg)*0.99
        cond1 = arm_cls_preds_bg >= prob_temp # > 0.99 idx is 1
        # print('negative cond1 ------- :',heapq.nlargest(2,arm_cls_preds_bg[0]))
        temp1 = nd.ones_like(odm_cls_target)*(-1) ### TODO: 0 还是-1表示背景??
        # 如果ARM分类出的负类的置信度大于0.99,将其在ODM的anchor标号中去掉(-1替代),负类转换为背景
        odm_cls_target_mask = nd.where(condition=cond1,x=temp1,y=odm_cls_target)

        # apply filtering to odm_loc_target_mask
        # odm_loc_target_mask_shape: (batch, num_anchors, 4)

        arm_cls_preds_bg = nd.reshape(data=arm_cls_preds_bg,shape=(0,-1,1))#(batch , h*w*num_anchors[:layers],1)
        # (batch , h*w*num_anchors[:layers] , 4 )
        odm_loc_target_mask = nd.reshape(data=odm_loc_target_mask,shape=(0,-1,4))
        odm_loc_target_mask = odm_loc_target_mask[:,:,0] #(batch , h*w*num_anchors[:layers])
        #(batch , h*w*num_anchors[:layers], 1)
        ## 取整个batch中 所有行的 第一列,相当于对原来的4个相同label[0 0 0 0 ],[1 1 1 1]变成[0],[1]
        odm_loc_target_mask = nd.reshape(data=odm_loc_target_mask,shape=(0,-1,1))
        loc_temp = nd.ones_like(odm_loc_target_mask)*0.99
        cond2 = arm_cls_preds_bg >= loc_temp
        temp2 = nd.zeros_like(odm_loc_target_mask) # 取0
        # 如果ARM分类出的负类的置信度大于0.99,将其在ODM的掩码置0
        ## 实际上不管IOU计算的大小,用AMR的分类结果,如果是大于0.99的负类,不管通过IOU判断的正负类结果如何,都设置为背景
        odm_loc_target_bg_mask = nd.where(cond2,temp2,odm_loc_target_mask)
        odm_loc_target_bg_mask = nd.concat(*[odm_loc_target_bg_mask]*4,dim=2)
        # 还原维度
        odm_loc_target_bg_mask = nd.reshape(odm_loc_target_bg_mask,shape=(0,-1))

        for ind, val in enumerate([odm_cls_target_mask, odm_loc_target_bg_mask]):
            self.assign(out_data[ind], req[ind], val)
예제 #27
0
def unsorted_1d_segment_sum(input, seg_id, n_segs, dim):
    # TODO: support other dimensions
    assert dim == 0, 'MXNet only supports segment sum on first dimension'

    # Use SPMV to simulate segment sum
    ctx = input.context
    n_inputs = input.shape[0]
    input_shape_suffix = input.shape[1:]
    input = input.reshape(n_inputs, -1)
    n_range = nd.arange(n_inputs, dtype='int64').as_in_context(input.context)
    w_nnz = nd.ones(n_inputs).as_in_context(input.context)
    w_nid = nd.stack(seg_id, n_range, axis=0)
    w = nd.sparse.csr_matrix((w_nnz, (seg_id, n_range)), (n_segs, n_inputs))
    w = w.as_in_context(input.context)
    y = nd.dot(w, input)
    y = nd.reshape(y, (n_segs, ) + input_shape_suffix)
    return y
def _group_norm_func(input_data, num_groups, eps, gamma, beta):
    n, c, h, w = input_data.shape

    input_data = nd.reshape(data=input_data,
                            shape=(n, num_groups, c / num_groups, h, w))

    # mean
    mean = nd.mean(input_data, axis=2, keepdims=True)

    # std
    temp = nd.square((input_data - mean))
    std = nd.sqrt(nd.sum(temp, axis=2, keepdims=True) / (c / num_groups))

    input_data = (input_data - mean) / (std + eps)
    out = input_data.reshape((n, c, h, w))
    gamma = gamma.reshape((c, 1, 1))
    beta = beta.reshape((c, 1, 1))
    out = out * gamma + beta
    return out
예제 #29
0
    def forward(self, X, stride=1):
        filters = []
        for i in range(self._n_scales):
            kernel = (i * 2 + 1, ) * 2
            pad = (i, ) * 2
            f = nd.Pooling(data=data,
                           pool_type='max',
                           kernel=kernel,
                           stride=(stride, stride),
                           pad=pad,
                           cudnn_off=True)
            f = nd.reshape(f, (f.shape[0], 1) + f.shape[1:])
            filters.append(f)

        filters = nd.concat(*filters, dim=1)
        weight = nd.softmax(self._get_param(self.weight), axis=1)
        filters = nd.mean(filters, axis=1)
        #   filters = nd.sum(filters * weight, axis=1)

        return filters
def verify_reshape_like_dynamic(
    xshp, wshp, lhs_begin, lhs_end, rhs_begin, rhs_end):
    x_np = np.random.uniform(size=xshp)
    w_np = np.random.uniform(size=wshp)
    x = nd.array(x_np)
    w = nd.array(w_np)

    # org op
    y = nd.reshape_like(
        x, w, lhs_begin=lhs_begin, lhs_end=lhs_end,
        rhs_begin=rhs_begin, rhs_end=rhs_end)

    # rewrite op
    xndims = len(xshp)
    lhs_begin = lhs_begin+xndims if lhs_begin < 0 else lhs_begin
    lhs_end = lhs_end+xndims if lhs_end< 0 else lhs_end
    assert 0 <= lhs_begin < lhs_end <= xndims

    wndims = len(wshp)
    rhs_begin = rhs_begin+wndims if rhs_begin < 0 else rhs_begin
    rhs_end = rhs_end+wndims if rhs_end< 0 else rhs_end
    assert 0 <= rhs_begin < rhs_end <= wndims

    rshp = xshp[:lhs_begin] + \
        wshp[rhs_begin:rhs_end] + xshp[lhs_end:]

    batch_axes = [0]
    assert len(batch_axes) == 1, \
        "Dynamic batch shape fusion only support" + \
        "single dimension of batch. Providied: (%s)" % batch_axes
    batch_axis = batch_axes[0]
    rshp = rshp[:batch_axis] + (-1,) + rshp[batch_axis+1:]

    z = nd.reshape(x, shape=rshp)

    # compare
    assert z.shape == y.shape
    zn, zp = get_norm(z)
    yn, yp = get_norm(y)
    rn = np.linalg.norm(zp-yp)
    print(zn, yn, rn)
예제 #31
0
def get_sample_point():
    """Grabs a single input/label pair from MNIST"""
    def transform(data, label):
        return data.astype(np.float32) / 255.0, label.astype(np.float32)

    # Load ten random images from the test dataset
    sample_data = mx.gluon.data.DataLoader(mx.gluon.data.vision.MNIST(
        train=False, transform=transform),
                                           1,
                                           shuffle=True)
    for data, label in sample_data:
        img = nd.transpose(data, (1, 0, 2, 3))
        img = nd.reshape(img, (28, 28, 1))
        imtiles = nd.tile(img, (1, 1, 3))
        plt.imshow(imtiles.asnumpy())
        plt.savefig("test_input.png")

        data = nd.transpose(data, (0, 3, 1, 2))
        data = data.as_in_context(ctx).asnumpy()
        label = int(label.asnumpy()[0])
        return data, label