Esempio n. 1
0
def extract_multi_position_matrix_nd(bbox):
    bbox = nd.transpose(bbox, axes=(1, 0, 2))
    xmin, ymin, xmax, ymax = nd.split(data=bbox, num_outputs=4, axis=2)
    # [num_fg_classes, num_boxes, 1]
    bbox_width = xmax - xmin + 1.
    bbox_height = ymax - ymin + 1.
    center_x = 0.5 * (xmin + xmax)
    center_y = 0.5 * (ymin + ymax)
    # [num_fg_classes, num_boxes, num_boxes]
    delta_x = nd.broadcast_minus(lhs=center_x,
                                 rhs=nd.transpose(center_x, axes=(0, 2, 1)))
    delta_x = nd.broadcast_div(delta_x, bbox_width)
    delta_x = nd.log(nd.maximum(nd.abs(delta_x), 1e-3))

    delta_y = nd.broadcast_minus(lhs=center_y,
                                 rhs=nd.transpose(center_y, axes=(0, 2, 1)))
    delta_y = nd.broadcast_div(delta_y, bbox_height)
    delta_y = nd.log(nd.maximum(nd.abs(delta_y), 1e-3))

    delta_width = nd.broadcast_div(lhs=bbox_width,
                                   rhs=nd.transpose(bbox_width,
                                                    axes=(0, 2, 1)))
    delta_width = nd.log(delta_width)

    delta_height = nd.broadcast_div(lhs=bbox_height,
                                    rhs=nd.transpose(bbox_height,
                                                     axes=(0, 2, 1)))
    delta_height = nd.log(delta_height)
    concat_list = [delta_x, delta_y, delta_width, delta_height]
    for idx, sym in enumerate(concat_list):
        concat_list[idx] = nd.expand_dims(sym, axis=3)
    position_matrix = nd.concat(*concat_list, dim=3)
    return position_matrix
Esempio n. 2
0
def norm(tensor, order=2, axis=None):
    """Computes the l-`order` norm of tensor

    Parameters
    ----------
    tensor : ndarray
    order : int
    axis : int or tuple

    Returns
    -------
    float or tensor
        If `axis` is provided returns a tensor.
    """
    # handle difference in default axis notation
    if axis is None:
        axis = ()

    if order == 'inf':
        res = nd.max(nd.abs(tensor), axis=axis)
    elif order == 1:
        res =  nd.sum(nd.abs(tensor), axis=axis)
    elif order == 2:
        res = nd.sqrt(nd.sum(tensor**2, axis=axis))
    else:
        res = nd.sum(nd.abs(tensor)**order, axis=axis)**(1/order)

    if res.shape == (1,):
        return res.asscalar()
    return res
Esempio n. 3
0
 def quantize(self, x):
     max = nd.max(nd.abs(x))
     if max != 0:
         int_len = (nd.ceil(nd.log2(nd.max(nd.abs(x))))).astype('float32')
         num_bit = self.num_bit.as_in_context(x.context)
         frac_len = num_bit - int_len
         f = (2**(frac_len)).astype('float32')
         y = ((x * f)).floor() * (1 / f)
         return y
     return x
Esempio n. 4
0
 def int_inference(self, x):
     max = nd.max(nd.abs(x))
     if max != 0:
         int_len = (nd.ceil(nd.log2(nd.max(nd.abs(x))))).astype('float32')
         num_bit = self.num_bit.as_in_context(x.context)
         frac_len = num_bit - int_len
         f = (2**(frac_len)).astype('float32')
         y = ((x * f)).round()
         return y.astype('int8'), frac_len
     return x.astype('int8'), 0
Esempio n. 5
0
 def int_quantize(self, x):
     max = nd.max(nd.abs(x))
     if max != 0:
         int_len = (nd.ceil(nd.log2(nd.max(nd.abs(x))))).astype('float32')
         num_bit = self.num_bit.as_in_context(x.context)
         frac_len = num_bit - int_len
         f = (2**(frac_len)).astype('float32')
         y = ((x * f)).floor()
         y = nd.clip(y, a_min=-128, a_max=127)
         return y, frac_len
     return x, 0
Esempio n. 6
0
	def update(self, data):
		pred = self.scaler.inverse_transform(data[self.pred_name])
		label = self.scaler.inverse_transform(data[self.label_name])
		mask = label >= 10

		_cnt = nd.sum(mask).as_in_context(mx.cpu())
		_loss = nd.sum(nd.abs(pred - label) / (nd.abs(pred) + nd.abs(label) + 1e-5) * mask).as_in_context(mx.cpu())
		if self.cnt is None:
			self.cnt = self.loss = 0
		
		self.cnt += _cnt
		self.loss += _loss
Esempio n. 7
0
 def forward(self, is_train, req, in_data, out_data, aux):
     x = in_data[0]
     y = out_data[0]
     in_max = nd.max(nd.abs(x))
     if in_max != 0:
         int_len = (nd.ceil(nd.log2(nd.max(nd.abs(x))))).astype('float32')
         num_bit = self.num_bit.as_in_context(x.context)
         frac_len = num_bit - int_len
         f = (2**(frac_len)).astype('float32')
         y = ((x * f)).round() * (1 / f)
     y = x
     self.assign(out_data[0], req[0], mx.nd.array(y))
Esempio n. 8
0
def bbox_iou(box1, box2, transform=True):
    """Calculate the IoU Error
    """

    #Change to NDArray if not
    if not isinstance(box1, nd.NDArray):
        box1 = nd.array(box1)
    if not isinstance(box2, nd.NDArray):
        box2 = nd.array(box2)

    #Make sure > 0
    box1 = nd.abs(box1)
    box2 = nd.abs(box2)
    '''Calculate the IoU'''
    if transform:
        tmp_box1 = box1.copy()
        tmp_box1[:, 0] = box1[:, 0] - box1[:, 2] / 2.0
        tmp_box1[:, 1] = box1[:, 1] - box1[:, 3] / 2.0
        tmp_box1[:, 2] = box1[:, 0] + box1[:, 2] / 2.0
        tmp_box1[:, 3] = box1[:, 1] + box1[:, 3] / 2.0
        box1 = tmp_box1

        tmp_box2 = box2.copy()
        tmp_box2[:, 0] = box2[:, 0] - box2[:, 2] / 2.0
        tmp_box2[:, 1] = box2[:, 1] - box2[:, 3] / 2.0
        tmp_box2[:, 2] = box2[:, 0] + box2[:, 2] / 2.0
        tmp_box2[:, 3] = box2[:, 1] + box2[:, 3] / 2.0
        box2 = tmp_box2

    # Get the coordinates of bounding boxes (xStart,yStart,xEnd,yEnd)
    b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
    b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]

    # get the corrdinates of the intersection rectangle
    inter_rect_x1 = nd.where(
        b1_x1 > b2_x1, b1_x1, b2_x1
    )  #if b1_x1 > b2_x1 => x1 of the intersection rectangle must be b1_x1, otherwise it will be b2_x1. Basically it's just a max function!
    inter_rect_y1 = nd.where(b1_y1 > b2_y1, b1_y1, b2_y1)
    inter_rect_x2 = nd.where(b1_x2 < b2_x2, b1_x2, b2_x2)
    inter_rect_y2 = nd.where(b1_y2 < b2_y2, b1_y2, b2_y2)

    # Intersection area
    inter_area = nd.clip(
        inter_rect_x2 - inter_rect_x1 + 1, a_min=0, a_max=10000) * nd.clip(
            inter_rect_y2 - inter_rect_y1 + 1, a_min=0, a_max=10000)

    # Union Area
    b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
    b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
    iou = inter_area / (b1_area + b2_area - inter_area)

    return nd.clip(iou, 1e-5, 1. - 1e-5)
Esempio n. 9
0
def test_513():
    # 边缘检测小例
    X = nd.ones((8, 8))
    X[2:6, 2:6] = 0
    print(X)
    '''
    原始图片,任务是进行边缘检测
        [
         [1. 1. 1. 1. 1. 1. 1. 1.]
         [1. 1. 1. 1. 1. 1. 1. 1.]
         [1. 1. 0. 0. 0. 0. 1. 1.]
         [1. 1. 0. 0. 0. 0. 1. 1.]
         [1. 1. 0. 0. 0. 0. 1. 1.]
         [1. 1. 0. 0. 0. 0. 1. 1.]
         [1. 1. 1. 1. 1. 1. 1. 1.]
         [1. 1. 1. 1. 1. 1. 1. 1.]
        ]
     '''
    # 构造kernel
    K1 = nd.array([[1, -1], [1, -1]])
    Y1 = corr2d(X, K1)
    print(Y1)
    '''
    检测纵向边缘    
    [[ 0.  0.  0.  0.  0.  0.  0.]
     [ 0.  1.  0.  0.  0. -1.  0.]
     [ 0.  2.  0.  0.  0. -2.  0.]
     [ 0.  2.  0.  0.  0. -2.  0.]
     [ 0.  2.  0.  0.  0. -2.  0.]
     [ 0.  1.  0.  0.  0. -1.  0.]
     [ 0.  0.  0.  0.  0.  0.  0.]]
     '''
    # 构造kernel
    K2 = nd.array([[1, 1], [-1, -1]])
    Y2 = corr2d(X, K2)
    print(Y2)
    '''
    检测横向边缘    
    [[ 0.  0.  0.  0.  0.  0.  0.]
     [ 0.  1.  2.  2.  2.  1.  0.]
     [ 0.  0.  0.  0.  0.  0.  0.]
     [ 0.  0.  0.  0.  0.  0.  0.]
     [ 0.  0.  0.  0.  0.  0.  0.]
     [ 0. -1. -2. -2. -2. -1.  0.]
     [ 0.  0.  0.  0.  0.  0.  0.]]
    
    '''
    Y3 = nd.add(nd.abs(Y1), nd.abs(Y2))
    print(Y3)
    '''
Esempio n. 10
0
def bbox_iou(box1, box2, transform=True, ctx=None):
    '''
        判断预测盒子和实际盒子的重合度。>0.5是比较好的预测
    '''

    ctx = ctx
    if not isinstance(box1, nd.NDArray):
        box1 = nd.array(box1, ctx=ctx)
    if not isinstance(box2, nd.NDArray):
        box2 = nd.array(box2, ctx=ctx)
    box1 = nd.abs(box1)
    box2 = nd.abs(box2)

    if transform:
        tmp_box1 = box1.copy()
        tmp_box1[:, 0] = box1[:, 0] - box1[:, 2] / 2.0
        tmp_box1[:, 1] = box1[:, 1] - box1[:, 3] / 2.0
        tmp_box1[:, 2] = box1[:, 0] + box1[:, 2] / 2.0
        tmp_box1[:, 3] = box1[:, 1] + box1[:, 3] / 2.0
        box1 = tmp_box1
        tmp_box2 = box2.copy()
        tmp_box2[:, 0] = box2[:, 0] - box2[:, 2] / 2.0
        tmp_box2[:, 1] = box2[:, 1] - box2[:, 3] / 2.0
        tmp_box2[:, 2] = box2[:, 0] + box2[:, 2] / 2.0
        tmp_box2[:, 3] = box2[:, 1] + box2[:, 3] / 2.0
        box2 = tmp_box2
    # Get the coordinates of bounding boxes
    b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
    b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]

    # get the corrdinates of the intersection rectangle
    inter_rect_x1 = nd.where(b1_x1 > b2_x1, b1_x1, b2_x1)
    inter_rect_y1 = nd.where(b1_y1 > b2_y1, b1_y1, b2_y1)
    inter_rect_x2 = nd.where(b1_x2 < b2_x2, b1_x2, b2_x2)
    inter_rect_y2 = nd.where(b1_y2 < b2_y2, b1_y2, b2_y2)

    # Intersection area
    inter_area = nd.clip(
        inter_rect_x2 - inter_rect_x1 + 1, a_min=0, a_max=10000) * nd.clip(
            inter_rect_y2 - inter_rect_y1 + 1, a_min=0, a_max=10000)

    # Union Area
    b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
    b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
    iou = inter_area / (b1_area + b2_area - inter_area)

    # iou[inter_area >= b1_area] = 0.8
    # iou[inter_area >= b2_area] = 0.8
    return nd.clip(iou, 1e-5, 1. - 1e-5)
Esempio n. 11
0
 def forward(self, output, mask, ind, target):
     pred = _tranpose_and_gather_feat(output, ind)
     pred = pred.swapaxes(dim1=0, dim2=1)
     mask = mask.astype('float32')
     loss = nd.abs(pred * mask - target * mask).sum()
     loss = loss / (mask.sum() + 1e-4)
     return loss
Esempio n. 12
0
def accuracy(predictions, targets):
    # predictions = nd.argmax(predictions, 1)
    # targets = nd.argmax(targets, 1)
    # return nd.mean(nd.equal(predictions, targets)).asscalar() * 100
    predictions = nd.where(predictions > 0.5, nd.ones_like(predictions),
                           nd.zeros_like(predictions))
    return 100 - nd.mean(nd.abs(predictions - targets)).asscalar() * 100
Esempio n. 13
0
 def acc(output, label):
     min_acc = 1
     for i in range(output.shape[0]):
         x = nd.sum(nd.abs(nd.subtract(output[i], label[i])), axis=0)
         if 1 - x / 96 < min_acc:
             min_acc = 1 - x / 96
     return min_acc.asscalar()
Esempio n. 14
0
    def add_split(x, leaf, p_tau):
        center = leaf.parent['node'].center.data()
        radius = leaf.parent['node'].radius.data()
        tau = p_tau + nd.random.exponential(radius**-1)
        while 1:
            s = nd.random.normal(shape=(2, x.shape[-1]))
            s = s / nd.norm(s, axis=-1, keepdims=True)
            r = nd.random.uniform(low=nd.array([0]), high=radius)
            r = r * nd.random.uniform()**(1 / 3)
            if nd.sign(s[0][-1]) > 0:
                weight = s[0]
                bias = nd.dot(s[0], -1 * r * (s[1] + center))
                y = nd.sign(nd.dot(x, weight) + bias)
                if nd.abs(nd.sum(y)) != len(y):
                    break

        split = Split(weight=weight,
                      bias=bias,
                      sharpness=3 / radius,
                      tau=tau,
                      decision=leaf.parent['decision'],
                      side=leaf.parent['side'])
        tree.splits.add(split)
        leaf.parent['node'].child['decision'] = split
        leaf.parent['decision'] = split
Esempio n. 15
0
 def int_quantize_double(self, x, w):
     max1 = nd.max(nd.abs(x))
     max2 = nd.max(nd.abs(w))
     if max1 > max2:
         max = max1
     else:
         max = max2
     if max != 0:
         int_len = (nd.ceil(nd.log2(max))).astype('float32')
         num_bit = self.num_bit.as_in_context(x.context)
         frac_len = num_bit - int_len
         f = (2**(frac_len)).astype('float32')
         int_x = ((x * f)).floor()
         int_w = ((w * f)).floor()
         return int_x, int_w, frac_len
     return x, w, 0
def nd_std(x, axis=-1):
    """ Standard Deviation (SD) 
        Note: Do not try 'axis=0'
    """
    return nd.sqrt(
        nd.square(nd.abs(x - x.mean(axis=axis).expand_dims(axis=axis))).mean(
            axis=axis))
def calc_loss(pred, label):
    # Mean average loss
    softmax_pred = stable_softmax(pred) + NEAR_0
    expectation = idx_tensor * softmax_pred
    expectation = nd.sum(expectation, 1) * 3 - 102
    loss = nd.abs(expectation - label)
    loss = nd.sum(loss) / len(loss)
Esempio n. 18
0
    def compute(self, input):
        """Compute the output of the neural net given the input.
        The input has to be a ndarray of shape input_size or (input_size, N) where N is the batch_size."""
        if len(input.shape) == 1:
            input = input.reshape((input.shape[0], 1))

        X = input[0]
        Y = input[1]
        for i in range(self.n):
            _X = nd.cos(self.thetas[i]) * X + nd.sin(
                self.thetas[i]) * Y  # Sym / (cos(O)u_x + sin(O)u_y)
            Y = -nd.sin(self.thetas[i]) * X + nd.cos(self.thetas[i]) * Y

            X = nd.abs(_X + self.biases[i]) - self.biases[i]

            if (False):  # optimize
                _X = nd.cos(self.thetas[i]) * X - nd.sin(
                    self.thetas[i]) * Y  # Sym / (cos(O)u_x + sin(O)u_y)
                Y = nd.sin(self.thetas[i]) * X + nd.cos(self.thetas[i]) * Y
                X = _X

        _X = nd.cos(self.thetas[self.n]) * X + nd.sin(self.thetas[self.n]) * Y
        Y = -nd.sin(self.thetas[self.n]) * X + nd.cos(self.thetas[self.n]) * Y

        return nd.sigmoid(Y - _X)
Esempio n. 19
0
def test(ctx=mx.cpu()):
    from mxboard import SummaryWriter
    sw = SummaryWriter(logdir='sphere_dynamic', flush_secs=5)

    net = nn.Sequential()
    b1 = base_net(48,
                  3,
                  fun=special_conv,
                  kernel_size=(3, 3),
                  same_shape=False)
    b2 = base_net(1,
                  48,
                  fun=special_conv,
                  kernel_size=(3, 3),
                  same_shape=False)
    fc = nn.Dense(3, in_units=9)
    net.add(b1, b2, fc)
    init_s(net, ctx)

    from mxnet import gluon, autograd
    trainer = gluon.Trainer(net.collect_params(), 'sgd',
                            {'learning_rate': 0.01})
    for i in range(10000):
        with autograd.record():
            out = net(img)
            loss = nd.sum(nd.abs(out - target))
        loss.backward()
        trainer.step(2)
        sw.add_scalar(tag='loss', value=loss.asscalar(), global_step=i)
        if i % 100 == 0:
            print i, loss.asscalar()
    sw.close()
Esempio n. 20
0
def bbox_iou(box1, box2, transform=True):
    """
    Returns the IoU of two bounding boxes
    """
    box1 = nd.array(box1)
    box2 = nd.array(box2)
    if box1.size == 0 or box2.size == 0:
        raise ValueError
    box1 = nd.abs(box1)
    box2 = nd.abs(box2)
    if transform:
        tmp_box1 = box1.copy()
        tmp_box1[:, 0] = box1[:, 0] - box1[:, 2] / 2.0
        tmp_box1[:, 1] = box1[:, 1] - box1[:, 3] / 2.0
        tmp_box1[:, 2] = box1[:, 0] + box1[:, 2] / 2.0
        tmp_box1[:, 3] = box1[:, 1] + box1[:, 3] / 2.0
        box1 = tmp_box1
        tmp_box2 = box2.copy()
        tmp_box2[:, 0] = box2[:, 0] - box2[:, 2] / 2.0
        tmp_box2[:, 1] = box2[:, 1] - box2[:, 3] / 2.0
        tmp_box2[:, 2] = box2[:, 0] + box2[:, 2] / 2.0
        tmp_box2[:, 3] = box2[:, 1] + box2[:, 3] / 2.0
        box2 = tmp_box2
    # Get the coordinates of bounding boxes
    b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
    b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]

    # get the corrdinates of the intersection rectangle
    inter_rect_x1 = nd.where(b1_x1 > b2_x1, b1_x1, b2_x1)
    inter_rect_y1 = nd.where(b1_y1 > b2_y1, b1_y1, b2_y1)
    inter_rect_x2 = nd.where(b1_x2 < b2_x2, b1_x2, b2_x2)
    inter_rect_y2 = nd.where(b1_y2 < b2_y2, b1_y2, b2_y2)

    # Intersection area
    inter_area = nd.clip(
        inter_rect_x2 - inter_rect_x1 + 1, a_min=0, a_max=10000) * nd.clip(
            inter_rect_y2 - inter_rect_y1 + 1, a_min=0, a_max=10000)

    # Union Area
    b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
    b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
    iou = inter_area / (b1_area + b2_area - inter_area)
    # iou[inter_area >= b1_area] = 0.8
    # iou[inter_area >= b2_area] = 0.8
    # iou[inter_area >= b2_area] = 0.8
    return nd.clip(iou, 1e-5, 1. - 1e-5)
Esempio n. 21
0
def custom_loss(pred, label):
    channel_weights = nd.arange(1, 1+label.shape[1], ctx=label.context).reshape(1, -1, 1, 1) * 0.5
    # point_weights = 0.1 + label + (nd.clip(label, 0.5, 1.0) - 0.5) * 10
    point_weights = nd.exp(label*4) - 0.8
    tp = nd.abs(pred - label) * point_weights * channel_weights
    cloud_ratio = nd.mean(label > 0.1, axis=(0, 1), exclude=True)
    bc = nd.mean(tp, axis=(0, 1), exclude=True) * cloud_ratio
    return nd.mean(bc, axis=0, exclude=True)
Esempio n. 22
0
    def forward(self, cls_pred, box_pred, cls_target, box_target):
        """Compute loss in entire batch across devices."""
        # require results across different devices at this time
        cls_pred, box_pred, cls_target, box_target = [
            _as_list(x) for x in (cls_pred, box_pred, cls_target, box_target)]
        # cross device reduction to obtain positive samples in entire batch
        num_pos = []
        for cp, bp, ct, bt in zip(
                *[cls_pred, box_pred, cls_target, box_target]):
            pos_samples = (ct > 0)
            num_pos.append(pos_samples.sum())
        num_pos_all = sum([p.asscalar() for p in num_pos])
        if num_pos_all < 1:
            # no positive samples found, return dummy losses
            return nd.zeros((1,)), nd.zeros((1,)), nd.zeros((1,))

        # compute element-wise cross entropy loss and sort, then perform
        # negative mining
        cls_losses = []
        box_losses = []
        sum_losses = []
        for cp, bp, ct, bt in zip(
                *[cls_pred, box_pred, cls_target, box_target]):
            pred = nd.log_softmax(cp, axis=-1)
            pos = ct > 0
            cls_loss = -nd.pick(pred, ct, axis=-1, keepdims=False)
            rank = (cls_loss * (pos - 1)).argsort(axis=1).argsort(axis=1)
            hard_negative = rank < (
                pos.sum(axis=1) * self._negative_mining_ratio).expand_dims(-1)
            # mask out if not positive or negative
            cls_loss = nd.where(
                (pos + hard_negative) > 0,
                cls_loss,
                nd.zeros_like(cls_loss))
            cls_losses.append(
                nd.sum(
                    cls_loss,
                    axis=0,
                    exclude=True) /
                num_pos_all)

            bp = _reshape_like(nd, bp, bt)
            box_loss = nd.abs(bp - bt)
            box_loss = nd.where(
                box_loss > self._rho,
                box_loss - 0.5 * self._rho,
                (0.5 / self._rho) * nd.square(box_loss))
            # box loss only apply to positive samples
            box_loss = box_loss * pos.expand_dims(axis=-1)
            box_losses.append(
                nd.sum(
                    box_loss,
                    axis=0,
                    exclude=True) /
                num_pos_all)
            sum_losses.append(cls_losses[-1] + self._lambd * box_losses[-1])

        return sum_losses, cls_losses, box_losses
Esempio n. 23
0
def fmap_lossfunc(fmap, focus_lbl):
    """
    calculate loss among feature-map generated by focus branch
    :param fmap: mxnet ndarray, feature map
    :param focus_lbl: mxnet ndarray, label map generated by affine_fmap-n-gt
    :return: loss.
    """
    res = nd.abs(nd.sum(focus_lbl - fmap) / nd.sum(focus_lbl))
    return res
Esempio n. 24
0
def accuracy(y_hat, y):
    sum = 0
    yy = y_hat - y
    yy = nd.abs(yy)
    i = 0
    for i, val in enumerate(yy):
        sum = sum + equal(val)

    return sum / i
def contrastive_loss(net,data,label):
    label = label.reshape(-1, 1)
    label_mat = nd.relu(-nd.abs(label - label.T) + 1).astype('float32')
    vec = net(data)
    vec = nd.Flatten(vec)
    dist_self = nd.sum(nd.square(vec), axis=1, keepdims=True)
    dist_mat = nd.broadcast_add(dist_self, dist_self.T) - 2 * nd.dot(vec, vec.T)
    loss = label_mat * dist_mat + nd.relu((1.0 - dist_mat * (1 - label_mat))).astype('float32')
    return loss
Esempio n. 26
0
    def norm(tensor, order=2, axis=None):
        # handle difference in default axis notation
        if axis is None:
            axis = ()

        if order == 'inf':
            res = nd.max(nd.abs(tensor), axis=axis)
        elif order == 1:
            res = nd.sum(nd.abs(tensor), axis=axis)
        elif order == 2:
            res = nd.sqrt(nd.sum(tensor**2, axis=axis))
        else:
            res = nd.sum(nd.abs(tensor)**order, axis=axis)**(1 / order)

        if res.shape == (1, ):
            return res.asscalar()

        return res
Esempio n. 27
0
 def forward(self, output, mask, ind, target):
     pred = _tranpose_and_gather_feat(output, ind)
     pred = pred.swapaxes(dim1=0, dim2=1)
     mask = mask.expand_dims(axis=2).broadcast_like(pred).astype('float32')
     pred = pred / (target + 1e-4)
     target = target * 0 + 1
     loss = nd.abs(pred * mask - target * mask).sum()
     loss = loss / (mask.sum() + 1e-4)
     return loss
Esempio n. 28
0
 def compare():
     nparams = []
     diff = 0.
     print('p, b {} {}'.format(params[0][0],
                               BNs[0].params.get('running_mean').data()[0]))
     for param, bn in zip(params, BNs):
         nparam = bn.params.get('running_mean').data()
         diff += nd.sum(nd.abs(nparam - param)).asscalar()
     return diff
Esempio n. 29
0
def quantize_to(x, bits=8):
    max_v = nd.max(nd.abs(x))
    if max_v == 0:
        return x.astype(np.int8), 8
    int_len = nd.ceil(nd.log2(max_v)).asscalar()
    sb = bits - int_len
    f = 2**sb
    y = nd.floor(x * f)
    y = nd.clip(y, a_min=-2**(bits - 1), a_max=2**(bits - 1) - 1)
    return y, sb
Esempio n. 30
0
    def forward(self, cls_pred, box_pred, cls_target, box_target):
        """Compute loss in entire batch across devices."""
        # require results across different devices at this time
        cls_pred, box_pred, cls_target, box_target = [_as_list(x) \
            for x in (cls_pred, box_pred, cls_target, box_target)]
        # cross device reduction to obtain positive samples in entire batch
        pos_ct = [ct > 0 for ct in cls_target]
        num_pos = [ct.sum() for ct in pos_ct]
        num_pos_all = sum([p.asscalar() for p in num_pos])
        # print ('num_pos_all: {}'.format(num_pos_all))
        if num_pos_all < 1 and self._min_hard_negatives < 1:
            # no positive samples and no hard negatives, return dummy losses
            cls_losses = [nd.sum(cp * 0) for cp in cls_pred]
            box_losses = [nd.sum(bp * 0) for bp in box_pred]
            sum_losses = [
                nd.sum(cp * 0) + nd.sum(bp * 0)
                for cp, bp in zip(cls_pred, box_pred)
            ]
            return sum_losses, cls_losses, box_losses

        # compute element-wise cross entropy loss and sort, then perform negative mining
        cls_losses = []
        box_losses = []
        sum_losses = []
        for cp, bp, ct, bt in zip(
                *[cls_pred, box_pred, cls_target, box_target]):
            # print ('cp shape: {}'.format(cp.shape))
            # print ('bp shape: {}'.format(bp.shape))
            # print ('ct shape: {}'.format(ct.shape))
            # print ('bt shape: {}'.format(bt.shape))
            pred = nd.log_softmax(cp, axis=-1)
            pos = ct > 0
            cls_loss = -nd.pick(pred, ct, axis=-1, keepdims=False)
            rank = (cls_loss * (pos - 1)).argsort(axis=1).argsort(axis=1)
            hard_negative = rank < nd.maximum(
                self._min_hard_negatives,
                pos.sum(axis=1) * self._negative_mining_ratio).expand_dims(-1)
            # mask out if not positive or negative
            cls_loss = nd.where((pos + hard_negative) > 0, cls_loss,
                                nd.zeros_like(cls_loss))
            cls_losses.append(
                nd.sum(cls_loss, axis=0, exclude=True) / max(1., num_pos_all))

            bp = _reshape_like(nd, bp, bt)
            box_loss = nd.abs(bp - bt)
            box_loss = nd.where(box_loss > self._rho,
                                box_loss - 0.5 * self._rho,
                                (0.5 / self._rho) * nd.square(box_loss))
            # box loss only apply to positive samples
            box_loss = box_loss * pos.expand_dims(axis=-1)
            box_losses.append(
                nd.sum(box_loss, axis=0, exclude=True) / max(1., num_pos_all))
            sum_losses.append(cls_losses[-1] + self._lambd * box_losses[-1])

        return sum_losses, cls_losses, box_losses
Esempio n. 31
0
    def forward(self, cls_pred, box_pred, cls_target, box_target):
        """Compute loss in entire batch across devices."""
        # require results across different devices at this time
        cls_pred, box_pred, cls_target, box_target = [_as_list(x) \
            for x in (cls_pred, box_pred, cls_target, box_target)]
        # cross device reduction to obtain positive samples in entire batch
        num_pos = []
        for cp, bp, ct, bt in zip(*[cls_pred, box_pred, cls_target, box_target]):
            pos_samples = (ct > 0)
            num_pos.append(pos_samples.sum())
        num_pos_all = sum([p.asscalar() for p in num_pos])
        if num_pos_all < 1 and self._min_hard_negatives < 1:
            # no positive samples and no hard negatives, return dummy losses
            cls_losses = [nd.sum(cp * 0) for cp in cls_pred]
            box_losses = [nd.sum(bp * 0) for bp in box_pred]
            sum_losses = [nd.sum(cp * 0) + nd.sum(bp * 0) for cp, bp in zip(cls_pred, box_pred)]
            return sum_losses, cls_losses, box_losses


        # compute element-wise cross entropy loss and sort, then perform negative mining
        cls_losses = []
        box_losses = []
        sum_losses = []
        for cp, bp, ct, bt in zip(*[cls_pred, box_pred, cls_target, box_target]):
            pred = nd.log_softmax(cp, axis=-1)
            pos = ct > 0
            cls_loss = -nd.pick(pred, ct, axis=-1, keepdims=False)
            rank = (cls_loss * (pos - 1)).argsort(axis=1).argsort(axis=1)
            hard_negative = rank < nd.maximum(self._min_hard_negatives, pos.sum(axis=1)
                                              * self._negative_mining_ratio).expand_dims(-1)
            # mask out if not positive or negative
            cls_loss = nd.where((pos + hard_negative) > 0, cls_loss, nd.zeros_like(cls_loss))
            cls_losses.append(nd.sum(cls_loss, axis=0, exclude=True) / max(1., num_pos_all))

            bp = _reshape_like(nd, bp, bt)
            box_loss = nd.abs(bp - bt)
            box_loss = nd.where(box_loss > self._rho, box_loss - 0.5 * self._rho,
                                (0.5 / self._rho) * nd.square(box_loss))
            # box loss only apply to positive samples
            box_loss = box_loss * pos.expand_dims(axis=-1)
            box_losses.append(nd.sum(box_loss, axis=0, exclude=True) / max(1., num_pos_all))
            sum_losses.append(cls_losses[-1] + self._lambd * box_losses[-1])

        return sum_losses, cls_losses, box_losses