コード例 #1
0
ファイル: box.py プロジェクト: ydlstartx/MultiDet
def sample(match, cls_pred, iou, ratio=3, min_sample=0, threshold=0.5, do=True):
    if do is False:
        ones = nd.ones_like(match)
        sample = nd.where(match > -0.5, ones, ones*-1)
        return sample
    sample = nd.zeros_like(match)
    num_pos = nd.sum(match > -0.5, axis=-1)
    requre_neg = ratio * num_pos
    neg_mask = nd.where(match < -0.5, nd.max(iou, axis=-1) < threshold, sample)
    max_neg = neg_mask.sum(axis=-1)
    num_neg = nd.minimum(max_neg, nd.maximum(requre_neg, min_sample)).astype('int')
   
    neg_prob = cls_pred[:,:,0]
    max_value = nd.max(cls_pred, axis=-1, keepdims=True)
    score = max_value[:,:,0] - neg_prob + nd.log(
                                   nd.sum(
                                   nd.exp(cls_pred-max_value), axis=-1))

    score = nd.where(neg_mask, score, nd.zeros_like(score))
    argmax = nd.argsort(score, axis=-1, is_ascend=False)
    sample = nd.where(match > -0.5, nd.ones_like(sample), sample)
    
    for i, num in enumerate(num_neg):
        sample[i, argmax[i,:num.asscalar()]] = -1
    
    return sample
コード例 #2
0
ファイル: loss.py プロジェクト: zhzhuangxue/Gluon-PSENet
    def _ohem_single(self, score_gt, score_pred, training_masks):
        if self.debug:
            print("score_gt_shape:", score_gt.shape, "score_pred_shape:", score_pred.shape, \
                "train_mask_shape:", training_masks.shape)
        pos_gt_thres = F.where(score_gt > 0.5, F.ones_like(score_gt), F.zeros_like(score_gt))
        pos_num = F.sum(pos_gt_thres) - F.sum(pos_gt_thres * training_masks)

        if pos_num == 0:
            selected_mask = training_masks
            return selected_mask

        neg_lt_thres = F.where(score_gt <= 0.5, F.ones_like(score_gt), F.zeros_like(score_gt))
        neg_num = F.sum(neg_lt_thres)
        neg_num = min(pos_num * 3, neg_num)

        if neg_num == 0:
            selected_mask = training_masks
            return training_masks
        neg_score = neg_lt_thres * score_pred
        neg_score_sorted = F.sort(neg_score.reshape(-1), is_ascend=0, axis=None)
        threshold = neg_score_sorted[neg_num - 1]
        score_gt_thres = F.where(score_pred >= threshold, F.ones_like(score_pred), F.zeros_like(score_pred))

        trained_sample_mask = F.logical_or(score_gt_thres, pos_gt_thres)
        selected_mask = F.logical_and(trained_sample_mask, training_masks)

        return selected_mask
コード例 #3
0
def concentration_transfer(class_pre_l, class_true_l, con_pre_l, con_true_l,
                           data_utils):
    eth_co_me_limit = nd.array([[
        data_utils.scale_CO[1], data_utils.scale_CO[0], data_utils.scale_Me[0]
    ]])
    concentration_mat_pre = nd.where(class_pre_l > 0.5,
                                     nd.repeat(eth_co_me_limit, repeats=class_pre_l.shape[0], axis=0), \
                                     nd.zeros_like(class_pre_l))
    concentration_mat_true = nd.where(class_true_l == 1,
                                      nd.repeat(eth_co_me_limit, repeats=class_true_l.shape[0], axis=0), \
                                      nd.zeros_like(class_true_l))

    eth_con_pre, eth_con_true = concentration_mat_pre[:,
                                                      0] * con_pre_l[:,
                                                                     1], concentration_mat_true[:,
                                                                                                0] * con_true_l[:,
                                                                                                                1]
    co_con_pre, co_con_true = concentration_mat_pre[:,
                                                    1] * con_pre_l[:,
                                                                   0], concentration_mat_true[:,
                                                                                              1] * con_true_l[:,
                                                                                                              0]
    me_con_pre, me_con_true = concentration_mat_pre[:,
                                                    2] * con_pre_l[:,
                                                                   0], concentration_mat_true[:,
                                                                                              2] * con_true_l[:,
                                                                                                              0]

    eth_co_me_con_pre = nd.concat(nd.expand_dims(eth_con_pre, axis=0), nd.expand_dims(co_con_pre, axis=0), \
                                  nd.expand_dims(me_con_pre, axis=0), dim=0).transpose()
    eth_co_me_con_true = nd.concat(nd.expand_dims(eth_con_true, axis=0), nd.expand_dims(co_con_true, axis=0), \
                                   nd.expand_dims(me_con_true, axis=0), dim=0).transpose()
    return eth_co_me_con_pre, eth_co_me_con_true
コード例 #4
0
ファイル: box.py プロジェクト: ydlstartx/MultiDet
def label_offset(anchors, bbox, match, sample, 
                 means=(0,0,0,0), stds=(0.1,0.1,0.2,0.2), flatten=True):
    anchors = anchors.reshape((-1,4))
    N, _ = anchors.shape
    B, M, _ = bbox.shape
    anchor_x, anchor_y, anchor_w, anchor_h = corner_to_center(anchors, split=True)
    
    bbox = bbox.reshape((B,1,M,4))
    bbox = nd.broadcast_to(bbox, (B,N,M,4))
    bbox = nd.stack(*[nd.pick(bbox[:,:,:,p], match) for p in range(4)], axis=-1)
    bbox_x, bbox_y, bbox_w, bbox_h = corner_to_center(bbox, split=True)
    
    offset_x = ((bbox_x - anchor_x) / anchor_w - means[0]) / stds[0]
    offset_y = ((bbox_y - anchor_y) / anchor_h - means[1]) / stds[1]
    offset_w = (nd.log(bbox_w/anchor_w) - means[2]) / stds[2]
    offset_h = (nd.log(bbox_h/anchor_h) - means[3]) / stds[3]
    offset = nd.concat(*(offset_x, offset_y, offset_w, offset_h), dim=-1)
    sample = sample.reshape((B,N,1))
    sample = nd.broadcast_to(sample, (B,N,4)) > 0.5
    
    anchor_offset = nd.where(sample, offset, nd.zeros_like(offset))
    anchor_mask = nd.where(sample, nd.ones_like(offset), nd.zeros_like(offset))
    
    if flatten:
        anchor_offset = anchor_offset.reshape((B,-1))
        anchor_mask = anchor_mask.reshape((B,-1))
        
    return anchor_mask, anchor_offset
コード例 #5
0
def get_accuracy(pre_l, true_l):
    one_zero_pre = nd.where(pre_l > 0.5, nd.ones_like(pre_l),
                            nd.zeros_like(pre_l))
    compare = nd.equal(one_zero_pre, true_l).sum(axis=1)
    samples_right = nd.where(compare == 3, nd.ones_like(compare),
                             nd.zeros_like(compare)).sum()
    all_num = pre_l.shape[0]
    return samples_right / all_num
コード例 #6
0
    def forward(self, cls_pred, box_pred, cls_target, box_target):
        """Compute loss in entire batch across devices."""
        # require results across different devices at this time
        cls_pred, box_pred, cls_target, box_target = [
            _as_list(x) for x in (cls_pred, box_pred, cls_target, box_target)]
        # cross device reduction to obtain positive samples in entire batch
        num_pos = []
        for cp, bp, ct, bt in zip(
                *[cls_pred, box_pred, cls_target, box_target]):
            pos_samples = (ct > 0)
            num_pos.append(pos_samples.sum())
        num_pos_all = sum([p.asscalar() for p in num_pos])
        if num_pos_all < 1:
            # no positive samples found, return dummy losses
            return nd.zeros((1,)), nd.zeros((1,)), nd.zeros((1,))

        # compute element-wise cross entropy loss and sort, then perform
        # negative mining
        cls_losses = []
        box_losses = []
        sum_losses = []
        for cp, bp, ct, bt in zip(
                *[cls_pred, box_pred, cls_target, box_target]):
            pred = nd.log_softmax(cp, axis=-1)
            pos = ct > 0
            cls_loss = -nd.pick(pred, ct, axis=-1, keepdims=False)
            rank = (cls_loss * (pos - 1)).argsort(axis=1).argsort(axis=1)
            hard_negative = rank < (
                pos.sum(axis=1) * self._negative_mining_ratio).expand_dims(-1)
            # mask out if not positive or negative
            cls_loss = nd.where(
                (pos + hard_negative) > 0,
                cls_loss,
                nd.zeros_like(cls_loss))
            cls_losses.append(
                nd.sum(
                    cls_loss,
                    axis=0,
                    exclude=True) /
                num_pos_all)

            bp = _reshape_like(nd, bp, bt)
            box_loss = nd.abs(bp - bt)
            box_loss = nd.where(
                box_loss > self._rho,
                box_loss - 0.5 * self._rho,
                (0.5 / self._rho) * nd.square(box_loss))
            # box loss only apply to positive samples
            box_loss = box_loss * pos.expand_dims(axis=-1)
            box_losses.append(
                nd.sum(
                    box_loss,
                    axis=0,
                    exclude=True) /
                num_pos_all)
            sum_losses.append(cls_losses[-1] + self._lambd * box_losses[-1])

        return sum_losses, cls_losses, box_losses
コード例 #7
0
    def forward(self, cls_pred, box_pred, cls_target, box_target):
        """Compute loss in entire batch across devices."""
        # require results across different devices at this time
        cls_pred, box_pred, cls_target, box_target = [_as_list(x) \
            for x in (cls_pred, box_pred, cls_target, box_target)]
        # cross device reduction to obtain positive samples in entire batch
        pos_ct = [ct > 0 for ct in cls_target]
        num_pos = [ct.sum() for ct in pos_ct]
        num_pos_all = sum([p.asscalar() for p in num_pos])
        # print ('num_pos_all: {}'.format(num_pos_all))
        if num_pos_all < 1 and self._min_hard_negatives < 1:
            # no positive samples and no hard negatives, return dummy losses
            cls_losses = [nd.sum(cp * 0) for cp in cls_pred]
            box_losses = [nd.sum(bp * 0) for bp in box_pred]
            sum_losses = [
                nd.sum(cp * 0) + nd.sum(bp * 0)
                for cp, bp in zip(cls_pred, box_pred)
            ]
            return sum_losses, cls_losses, box_losses

        # compute element-wise cross entropy loss and sort, then perform negative mining
        cls_losses = []
        box_losses = []
        sum_losses = []
        for cp, bp, ct, bt in zip(
                *[cls_pred, box_pred, cls_target, box_target]):
            # print ('cp shape: {}'.format(cp.shape))
            # print ('bp shape: {}'.format(bp.shape))
            # print ('ct shape: {}'.format(ct.shape))
            # print ('bt shape: {}'.format(bt.shape))
            pred = nd.log_softmax(cp, axis=-1)
            pos = ct > 0
            cls_loss = -nd.pick(pred, ct, axis=-1, keepdims=False)
            rank = (cls_loss * (pos - 1)).argsort(axis=1).argsort(axis=1)
            hard_negative = rank < nd.maximum(
                self._min_hard_negatives,
                pos.sum(axis=1) * self._negative_mining_ratio).expand_dims(-1)
            # mask out if not positive or negative
            cls_loss = nd.where((pos + hard_negative) > 0, cls_loss,
                                nd.zeros_like(cls_loss))
            cls_losses.append(
                nd.sum(cls_loss, axis=0, exclude=True) / max(1., num_pos_all))

            bp = _reshape_like(nd, bp, bt)
            box_loss = nd.abs(bp - bt)
            box_loss = nd.where(box_loss > self._rho,
                                box_loss - 0.5 * self._rho,
                                (0.5 / self._rho) * nd.square(box_loss))
            # box loss only apply to positive samples
            box_loss = box_loss * pos.expand_dims(axis=-1)
            box_losses.append(
                nd.sum(box_loss, axis=0, exclude=True) / max(1., num_pos_all))
            sum_losses.append(cls_losses[-1] + self._lambd * box_losses[-1])

        return sum_losses, cls_losses, box_losses
コード例 #8
0
def distanceAA2(regions,i,binnum,dibins,dibins4):
#Initiate empty array for storing histogram for directions, distances, and number of counted pairs in each distance range bin
    co0=nd.zeros(binnum-1,gpu(0),dtype="float32")
    codi0=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
    count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
    count4=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
    co4=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
    seed=nd.zeros((1,2),gpu(0))
#Calculate index coordinates and directions by chuncks
    a=regions[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions.shape[0]),:]
    b=regions[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions.shape[0]),:]
    a1=nd.array(a,gpu(0))
    b1=nd.array(b,gpu(0))
#    print ("a1",a1,"b1",b1)
    for ii in range (a1.shape[0]-1):
        a1_b1=(nd.expand_dims(a1[ii].reshape((1,2)),axis=1)-b1[ii+1:,:]).reshape((a1[ii+1:,:].shape[0],2))
        seed=nd.concat(seed,a1_b1,dim=0)
    if seed.shape[0]>1:
        x1_x2=seed[1:,0]
        y1_y2=seed[1:,1]
        labels=nd.zeros(x1_x2.shape[0],gpu(0),dtype="float32")
        sdi0=(nd.degrees(nd.arctan((y1_y2)/(x1_x2)))+90).reshape((-1,))
        ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))

#Change 0 to 180 so it can apply sum of boolean mask without losing values        
        sdi0=nd.where(condition=(sdi0==0),x=labels+180,y=sdi0)

#Store sum of distances co0 and histogram of directions in each range bin
        for p in range (0,binnum-1):
            booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
            count0[p]+=nd.nansum(booleanmask)
            co0[p]+=nd.nansum(ldis*booleanmask)

#Exclue values not in distance range bin
            sdi1=nd.where(condition=(booleanmask==0),x=labels-1,y=sdi0)
            for q in range (0,5):
                booleanmaskdi=nd.equal((sdi1>=dibins[q]),(sdi1<dibins[q+1]))            
                codi0[q,p]+=nd.nansum(booleanmaskdi)
            
        for k in range (0,5):
            booleanmaskdi=nd.equal((sdi0>=dibins4[k]),(sdi0<dibins4[k+1]))
            ldis0=ldis*booleanmaskdi
            for l in range (0,binnum-1):
                booleanmask=nd.equal((ldis0>=bins[l]),(ldis0<bins[l+1]))
                count4[k,l]+=nd.nansum(booleanmask)
                co4[k,l]+=nd.nansum(ldis0*booleanmask)

    codi0[0,:]+=codi0[4,:]
    codi0=codi0[0:4,:]
    count4[0,:]+=count4[4,:]
    count4=count4[0:4,:]
    co4[0,:]+=co4[4,:]
    co4=co4[0:4,:]
    return(co0,codi0,count0,co4,count4)
コード例 #9
0
ファイル: loss.py プロジェクト: xcgoner/gluon-exp
    def forward(self, cls_pred, box_pred, cls_target, box_target):
        """Compute loss in entire batch across devices."""
        # require results across different devices at this time
        cls_pred, box_pred, cls_target, box_target = [_as_list(x) \
            for x in (cls_pred, box_pred, cls_target, box_target)]
        # cross device reduction to obtain positive samples in entire batch
        num_pos = []
        for cp, bp, ct, bt in zip(*[cls_pred, box_pred, cls_target, box_target]):
            pos_samples = (ct > 0)
            num_pos.append(pos_samples.sum())
        num_pos_all = sum([p.asscalar() for p in num_pos])
        # synchronize across different machines
        # print('before sync:', num_pos_all)
        if self._distributed:
            num_pos_out = nd.zeros(1, mx.cpu())
            num_pos_in = nd.zeros(1, mx.cpu()) + num_pos_all
            # allreduce only supports pushpull
            if 'allreduce' in self._kv_store_type:
                self._kv_store.pushpull(self._num_pos_key, num_pos_in, num_pos_out)
            else:
                self._kv_store.push(self._num_pos_key, num_pos_in)
                # self._kv_store._barrier()
                self._kv_store.pull(self._num_pos_key, out=num_pos_out)
            num_pos_all = num_pos_out.asscalar()
        # print('after sync:', num_pos_all)
        if num_pos_all < 1:
            # no positive samples found, return dummy losses
            return nd.zeros((1,)), nd.zeros((1,)), nd.zeros((1,))

        # compute element-wise cross entropy loss and sort, then perform negative mining
        cls_losses = []
        box_losses = []
        sum_losses = []
        for cp, bp, ct, bt in zip(*[cls_pred, box_pred, cls_target, box_target]):
            pred = nd.log_softmax(cp, axis=-1)
            pos = ct > 0
            cls_loss = -nd.pick(pred, ct, axis=-1, keepdims=False)
            rank = (cls_loss * (pos - 1)).argsort(axis=1).argsort(axis=1)
            hard_negative = rank < (pos.sum(axis=1) * self._negative_mining_ratio).expand_dims(-1)
            # mask out if not positive or negative
            cls_loss = nd.where((pos + hard_negative) > 0, cls_loss, nd.zeros_like(cls_loss))
            cls_losses.append(nd.sum(cls_loss, axis=0, exclude=True) / num_pos_all)

            bp = _reshape_like(nd, bp, bt)
            box_loss = nd.abs(bp - bt)
            box_loss = nd.where(box_loss > self._rho, box_loss - 0.5 * self._rho,
                                (0.5 / self._rho) * nd.square(box_loss))
            # box loss only apply to positive samples
            box_loss = box_loss * pos.expand_dims(axis=-1)
            box_losses.append(nd.sum(box_loss, axis=0, exclude=True) / num_pos_all)
            sum_losses.append(cls_losses[-1] + self._lambd * box_losses[-1])

        return sum_losses, cls_losses, box_losses
コード例 #10
0
ファイル: utils.py プロジェクト: ngochieu642/yolov3-mxnet
def bbox_iou(box1, box2, transform=True):
    """Calculate the IoU Error
    """

    #Change to NDArray if not
    if not isinstance(box1, nd.NDArray):
        box1 = nd.array(box1)
    if not isinstance(box2, nd.NDArray):
        box2 = nd.array(box2)

    #Make sure > 0
    box1 = nd.abs(box1)
    box2 = nd.abs(box2)
    '''Calculate the IoU'''
    if transform:
        tmp_box1 = box1.copy()
        tmp_box1[:, 0] = box1[:, 0] - box1[:, 2] / 2.0
        tmp_box1[:, 1] = box1[:, 1] - box1[:, 3] / 2.0
        tmp_box1[:, 2] = box1[:, 0] + box1[:, 2] / 2.0
        tmp_box1[:, 3] = box1[:, 1] + box1[:, 3] / 2.0
        box1 = tmp_box1

        tmp_box2 = box2.copy()
        tmp_box2[:, 0] = box2[:, 0] - box2[:, 2] / 2.0
        tmp_box2[:, 1] = box2[:, 1] - box2[:, 3] / 2.0
        tmp_box2[:, 2] = box2[:, 0] + box2[:, 2] / 2.0
        tmp_box2[:, 3] = box2[:, 1] + box2[:, 3] / 2.0
        box2 = tmp_box2

    # Get the coordinates of bounding boxes (xStart,yStart,xEnd,yEnd)
    b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
    b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]

    # get the corrdinates of the intersection rectangle
    inter_rect_x1 = nd.where(
        b1_x1 > b2_x1, b1_x1, b2_x1
    )  #if b1_x1 > b2_x1 => x1 of the intersection rectangle must be b1_x1, otherwise it will be b2_x1. Basically it's just a max function!
    inter_rect_y1 = nd.where(b1_y1 > b2_y1, b1_y1, b2_y1)
    inter_rect_x2 = nd.where(b1_x2 < b2_x2, b1_x2, b2_x2)
    inter_rect_y2 = nd.where(b1_y2 < b2_y2, b1_y2, b2_y2)

    # Intersection area
    inter_area = nd.clip(
        inter_rect_x2 - inter_rect_x1 + 1, a_min=0, a_max=10000) * nd.clip(
            inter_rect_y2 - inter_rect_y1 + 1, a_min=0, a_max=10000)

    # Union Area
    b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
    b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
    iou = inter_area / (b1_area + b2_area - inter_area)

    return nd.clip(iou, 1e-5, 1. - 1e-5)
コード例 #11
0
def balance_sampler(samples):
    """ignore extra negative samples to keep batch balance"""
    num_pos = nd.sum(samples == 1, axis=0)
    num_neg = nd.sum(samples == 0, axis=0)
    drop_prob = (num_neg - num_pos) / num_neg
    drop_prob = nd.where(nd.lesser(drop_prob, 0), nd.zeros_like(drop_prob),
                         drop_prob)
    mask = nd.where(
        nd.greater(
            nd.random.uniform(0, 1, shape=samples.shape, ctx=samples.context),
            drop_prob), nd.ones_like(samples), nd.zeros_like(samples))
    mask = nd.where(nd.equal(samples, 1), samples, mask)
    return mask
コード例 #12
0
ファイル: box.py プロジェクト: ydlstartx/MultiDet
def label_box_cls(match, sample, gt_cls, ignore_label=-1):
    B, N = match.shape
    B, M = gt_cls.shape
    # (B,N,M)
    gt_cls = gt_cls.reshape((B,1,M))
    gt_cls = nd.broadcast_to(gt_cls, (B,N,M))
    # (B,N)
    label_cls = nd.pick(gt_cls, match, axis=-1) + 1
    label_cls = nd.where(sample > 0.5, label_cls, nd.ones_like(label_cls)*ignore_label)
    label_cls = nd.where(sample < -0.5, nd.zeros_like(label_cls), label_cls)
    # (B,N)
    label_mask = label_cls > -0.5
    return label_cls, label_mask
コード例 #13
0
def distanceAATOPO(regions,i,binnum,dibins,dibins4,x,y,ctx):
#Initiate empty array for storing histogram for directions, distances, and number of counted pairs in each distance range bin
    co0=nd.zeros(binnum-1,ctx[0],dtype="float32")
    codi0=nd.zeros((5,binnum-1),ctx[0],dtype="float32")
    count0=nd.zeros(binnum-1,ctx[0],dtype="float32")
    count4=nd.zeros((5,binnum-1),ctx[0],dtype="float32")
    co4=nd.zeros((5,binnum-1),ctx[0],dtype="float32")
    
#Calculate index coordinates and directions by chuncks
    a=regions[i*broadcdp:min((i+1)*broadcdp,regions.shape[0]),:]
    a1=nd.array(a,ctx[0])
    b1=nd.array([x,y],ctx[0])
    a1_b1=(nd.expand_dims(a1,axis=1)-b1).reshape((-1,2))
    x1_x2=a1_b1[:,0]
    y1_y2=a1_b1[:,1]
#Find the rows where all equal zeros
    boolmask=(x1_x2==0)*(y1_y2==0)
    labels=nd.zeros(boolmask.shape[0],ctx[0],dtype="float32")
    sdi0=(nd.degrees(nd.arctan((y1_y2)/(x1_x2)))+90).reshape((-1,))
    ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))
#Change the zeros into -1
    sdi0=nd.where(condition=boolmask,x=labels-1,y=sdi0)
    ldis=nd.where(condition=boolmask,x=labels-1,y=ldis)
#Change 0 to 180 so it can apply sum of boolean mask without losing values        
    sdi0=nd.where(condition=(sdi0==0),x=labels+180,y=sdi0)
#Store sum of distances co0 and histogram of directions in each range bin
    for p in range (0,binnum-1):
        booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
        count0[p]+=nd.sum(booleanmask)
        co0[p]+=nd.sum(ldis*booleanmask)
#Exclue values not in distance range bin
        sdi1=nd.where(condition=(booleanmask==0),x=labels-1,y=sdi0)
        for q in range (0,5):
            booleanmaskdi=nd.equal((sdi1>=dibins[q]),(sdi1<dibins[q+1]))            
            codi0[q,p]+=nd.nansum(booleanmaskdi)
            
    for k in range (0,5):
        booleanmaskdi=nd.equal((sdi0>=dibins4[k]),(sdi0<dibins4[k+1]))
        ldis0=ldis*booleanmaskdi
        for l in range (0,binnum-1):
            booleanmask=nd.equal((ldis0>=bins[l]),(ldis0<bins[l+1]))
            count4[k,l]+=nd.sum(booleanmask)
            co4[k,l]+=nd.sum(ldis0*booleanmask)
            
    codi0[0,:]+=codi0[4,:]
    codi0=codi0[0:4,:]
    count4[0,:]+=count4[4,:]
    count4=count4[0:4,:]
    co4[0,:]+=co4[4,:]
    co4=co4[0:4,:]
    return(co0.asnumpy(),codi0.asnumpy(),count0.asnumpy(),co4.asnumpy(),count4.asnumpy())
コード例 #14
0
def bbox_iou(box1, box2, transform=True, ctx=None):
    '''
        判断预测盒子和实际盒子的重合度。>0.5是比较好的预测
    '''

    ctx = ctx
    if not isinstance(box1, nd.NDArray):
        box1 = nd.array(box1, ctx=ctx)
    if not isinstance(box2, nd.NDArray):
        box2 = nd.array(box2, ctx=ctx)
    box1 = nd.abs(box1)
    box2 = nd.abs(box2)

    if transform:
        tmp_box1 = box1.copy()
        tmp_box1[:, 0] = box1[:, 0] - box1[:, 2] / 2.0
        tmp_box1[:, 1] = box1[:, 1] - box1[:, 3] / 2.0
        tmp_box1[:, 2] = box1[:, 0] + box1[:, 2] / 2.0
        tmp_box1[:, 3] = box1[:, 1] + box1[:, 3] / 2.0
        box1 = tmp_box1
        tmp_box2 = box2.copy()
        tmp_box2[:, 0] = box2[:, 0] - box2[:, 2] / 2.0
        tmp_box2[:, 1] = box2[:, 1] - box2[:, 3] / 2.0
        tmp_box2[:, 2] = box2[:, 0] + box2[:, 2] / 2.0
        tmp_box2[:, 3] = box2[:, 1] + box2[:, 3] / 2.0
        box2 = tmp_box2
    # Get the coordinates of bounding boxes
    b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
    b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]

    # get the corrdinates of the intersection rectangle
    inter_rect_x1 = nd.where(b1_x1 > b2_x1, b1_x1, b2_x1)
    inter_rect_y1 = nd.where(b1_y1 > b2_y1, b1_y1, b2_y1)
    inter_rect_x2 = nd.where(b1_x2 < b2_x2, b1_x2, b2_x2)
    inter_rect_y2 = nd.where(b1_y2 < b2_y2, b1_y2, b2_y2)

    # Intersection area
    inter_area = nd.clip(
        inter_rect_x2 - inter_rect_x1 + 1, a_min=0, a_max=10000) * nd.clip(
            inter_rect_y2 - inter_rect_y1 + 1, a_min=0, a_max=10000)

    # Union Area
    b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
    b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
    iou = inter_area / (b1_area + b2_area - inter_area)

    # iou[inter_area >= b1_area] = 0.8
    # iou[inter_area >= b2_area] = 0.8
    return nd.clip(iou, 1e-5, 1. - 1e-5)
コード例 #15
0
 def get_cls_targets(cls_targets_1, cls_targets_2):
     cls_targets = []
     for (cls_target_1, cls_target_2) in zip(cls_targets_1, cls_targets_2):
         cls_target_1_idx = nd.where(cls_target_1 > 0,
                                     nd.ones_like(cls_target_1),
                                     nd.zeros_like(cls_target_1))
         cls_target_2_idx = nd.where(cls_target_2 > 0,
                                     nd.ones_like(cls_target_2),
                                     nd.zeros_like(cls_target_2))
         cls_target_idx = nd.where(cls_target_1_idx == cls_target_2_idx, nd.ones_like(cls_target_1_idx),\
                                   nd.zeros_like(cls_target_1_idx))
         cls_target = nd.where(cls_target_idx, cls_target_1,
                               nd.ones_like(cls_target_1) * -1)
         cls_targets.append(cls_target)
     return cls_targets
コード例 #16
0
ファイル: box.py プロジェクト: ydlstartx/MultiDet
def match(iou, threshould=0.5, share_max=False):
    B, N, M = iou.shape
    
    if share_max:
        result = nd.argmax(iou, axis=-1)
        result = nd.where(nd.max(iou, axis=-1) > threshould, result, nd.ones_like(result)*-1)
    else:
        match = [getUniqueMatch(i) for i in iou]
        result = nd.concat(*match, dim=0)
        argmax_row = nd.argmax(iou, axis=-1)
        max_row = nd.max(iou, axis=-1)
        argmax_row = nd.where(max_row > threshould, argmax_row, nd.ones_like(argmax_row)*-1)
        result = nd.where(result > -0.5, result, argmax_row)
        
    return result
コード例 #17
0
ファイル: training.py プロジェクト: braindotai/Dr-Deep
def accuracy(predictions, targets):
    # predictions = nd.argmax(predictions, 1)
    # targets = nd.argmax(targets, 1)
    # return nd.mean(nd.equal(predictions, targets)).asscalar() * 100
    predictions = nd.where(predictions > 0.5, nd.ones_like(predictions),
                           nd.zeros_like(predictions))
    return 100 - nd.mean(nd.abs(predictions - targets)).asscalar() * 100
コード例 #18
0
def distance2(regions,i,binnum,bins,ctx):
#Initiate empty array for storing the number of counted pairs in each distance range bin
    count0=nd.zeros(binnum-1,ctx[0],dtype="float32")
    seed=nd.zeros((1,2),ctx[0])
#Calculate index coordinates and directions by chuncks
    a=regions[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions.shape[0]),:]
    b=regions[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions.shape[0]),:]
    a1=nd.array(a,ctx[0])
    b1=nd.array(b,ctx[0])    
    for i in range (a1.shape[0]):
        if i<a1.shape[0]-1:
            a1_b1=(nd.expand_dims(a1[i].reshape((1,2)),axis=1)-b1[i+1:,:]).reshape((a1[i+1:,:].shape[0],2))
            seed=nd.concat(seed,a1_b1,dim=0)
    if seed.shape[0]>1:
        x1_x2=seed[:,0]
        y1_y2=seed[:,1]
#Find the rows where all equal zeros and assign label -1
        boolmask=(x1_x2==0)*(y1_y2==0)
        labels=nd.zeros(boolmask.shape[0],ctx[0],dtype="float32")-1
        ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))
#Change the zeros into -1
        ldis=nd.where(condition=boolmask,x=labels,y=ldis)
        for p in range (0,binnum-1):
            booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
            count0[p]+=nd.sum(booleanmask)
    return(count0.asnumpy())
コード例 #19
0
ファイル: box.py プロジェクト: ydlstartx/MultiDet
def corner_to_center(box, split=False, eps=1e-12):
    shape = box.shape
    assert len(shape) in (2,3) and shape[-1] == 4
    # (B,N,1) or (N,1)
    xmin, ymin, xmax, ymax = nd.split(box, 4, axis=-1)
    width = xmax - xmin
    height = ymax - ymin
    cx = xmin + width / 2
    cy = ymin + height / 2
    width = nd.where(width==0, nd.full(width.shape, eps), width)
    height = nd.where(height==0, nd.full(height.shape, eps), height)
    result = [cx, cy, width, height]
    if split:
        return result
    else:
        return nd.concat(*result, dim=-1)
コード例 #20
0
ファイル: loss.py プロジェクト: zhzhuangxue/Gluon-PSENet
    def hybrid_forward(self, F, score_gt, kernel_gt, score_pred, training_masks, *args, **kwargs):
        """
        kernels map's order: [1, ..., 0.5]
        """
        C_pred = score_pred[:, 0, :, :]
        self.pixel_acc = batch_pix_accuracy(C_pred, score_gt)
        # classification loss
        eps = 1e-5
        intersection = F.sum(score_gt * C_pred * training_masks, axis=(1, 2))
        union = F.sum(training_masks * score_gt * score_gt, axis=(1, 2)) + F.sum(training_masks * C_pred * C_pred, axis=(1, 2)) + eps
        
        C_dice_loss = 1. - (2 * intersection) / (union)

        # loss for kernel
        kernel_mask = F.where(training_masks * C_pred > 0.5, F.ones_like(C_pred), F.zeros_like(C_pred))
        kernel_mask = F.expand_dims(kernel_mask, axis=1)
        kernel_mask = F.repeat(kernel_mask, repeats=self.num_kernels-1, axis=1)
        self.kernel_acc = batch_pix_accuracy(score_pred[:, 1, :, :] * score_gt, kernel_gt[:, 0, :, :])
        kernel_intersection = F.sum(kernel_gt * score_pred[:, 1:, :, :] * kernel_mask, axis=(2, 3))
        kernel_union = F.sum(kernel_gt * kernel_gt * kernel_mask, axis=(2, 3)) + F.sum(score_pred[:, 1:, :, :] * score_pred[:, 1:, :, :] * kernel_mask, axis=(2, 3)) + eps
        kernel_dice = 1. - (2 * kernel_intersection) / kernel_union
    
        kernel_dice_loss = F.mean(kernel_dice, axis=1)
        self.C_loss = C_dice_loss
        self.kernel_loss = kernel_dice_loss
        
        loss = self.lam * C_dice_loss + (1. - self.lam) *  kernel_dice_loss

        return loss
コード例 #21
0
    def greedy_action_choice(self, context, e=0.10, with_dropout=False):
        """Sample actions randomly with e probability. Exploit with 1-e probability."""

        context = self.check_input(context)
        self.batch_size = context.shape[0]

        _mode = "train" if with_dropout else "predict"
        self.mode = self.modes[_mode]

        take_greedy_choice = nd.array(
            np.random.uniform(0, 1, size=self.batch_size)) > e
        rnd_selections = nd.array(
            np.random.choice(self.selections,
                             size=self.batch_size,
                             replace=True))

        with autograd.record():
            with self.mode:
                yhat = self.forward(context)
                greedy_selections = nd.argmax(yhat, 1)

                actions = nd.where(take_greedy_choice, greedy_selections,
                                   rnd_selections).astype("long")
                preds = yhat.pick(actions)

        return preds, actions
コード例 #22
0
ファイル: hsi_rgb.py プロジェクト: tensionfly/colorizenet
def bgr2hsi(x):
    """ x:n,c(b,g,r),w,h
        return n,c(h,s,i),w,h
    """
    sum_RGB = nd.sum(x.astype('float32'), axis=1)
    R = x[:, 0, :, :].astype('float32')
    G = x[:, 1, :, :].astype('float32')
    B = x[:, 2, :, :].astype('float32')

    r = (R + eps) / (sum_RGB + 3 * eps)
    g = (G + eps) / (sum_RGB + 3 * eps)
    b = (B + eps) / (sum_RGB + 3 * eps)

    cossita = (2 * r - g - b) / (2 * ((r - g)**2 + (r - b) *
                                      (g - b))**(1.0 / 2) + eps)
    cossita_cilp = nd.clip(cossita, -1.0, 1.0)

    sita = nd.arccos(cossita_cilp)

    h = (nd.where(g >= b, sita, 2 * math.pi - sita)).expand_dims(axis=1)

    s = (1 - 3 * nd.minimum(nd.minimum(r, g), b)).expand_dims(axis=1)
    s = nd.clip(s, 0., 1.)

    i = ((R + G + B) / 3).expand_dims(axis=1)

    return nd.concat(h, s, i, dim=1)
コード例 #23
0
    def forward(self, bboxes, anchors, height, width):
        # 标注ious
        with autograd.pause():
            ious = mx.nd.contrib.box_iou(anchors, bboxes)
            # 去除无效的锚框(超出边界的)
            x_min, y_min, x_max, y_max = self._spliter(anchors)
            invalid_mask = (x_min < 0) + (y_min < 0) + (x_max >= width) + (
                y_max >= height)
            # 将所有无效锚框的ious设为-1
            invalid_mask = nd.repeat(invalid_mask,
                                     repeats=bboxes.shape[0],
                                     axis=-1)
            ious = nd.where(invalid_mask > 0, nd.ones_like(ious) * -1, ious)

            # 对锚框进行采样
            samples, matches = self._sampler(ious)
            # 下面进行标注

            cls_label, _ = self._cls_encoder(samples)
            targets, masks = self._bbox_encoder(samples.expand_dims(axis=0),
                                                matches.expand_dims(axis=0),
                                                anchors.expand_dims(axis=0),
                                                bboxes.expand_dims(axis=0))

        return cls_label, targets[0], masks[0]
コード例 #24
0
ファイル: model.py プロジェクト: heechul90/Mxnet_Learning
def dot_attention(query, key, value, mask, dropout=0.0):
    # query: (batch_size, h, length_q, model_dim/h)
    # key:   (batch_size, h, length_k, model_dim/h)
    # value: (batch_size, h, length_k, model_dim/h)

    query_shape = query.shape
    query = query.reshape(-3, -2)
    key = key.reshape(-3, -2)
    value = value.reshape(-3, -2)

    # matmul, t: (batch_size*h, length_q, length_k)
    t = nd.batch_dot(query, key.swapaxes(1, 2)) / math.sqrt(query.shape[-1])

    # masked
    # mask PAD and future words
    m = nd.full(t.shape, LARGE_NEGATIVE_VALUE)
    mask = nd.ones(t.shape) * mask
    t = nd.where(mask, t, m)

    # softmax
    t = nd.softmax(t, axis=-1)
    if dropout > 0.0:
        t = nd.dropout(t, p=dropout)

    # (batch_size, h, length_q, model_dim/h)
    return nd.batch_dot(t, value).reshape(query_shape)
コード例 #25
0
ファイル: loss.py プロジェクト: zhzhuangxue/Gluon-PSENet
    def hybrid_forward(self, F, score_gt, kernel_gt, score_pred, training_masks, *args, **kwargs):

        # cal ohem mask
        selected_masks = []
        for i in range(score_gt.shape[0]):
            # cal for text region
            selected_mask = self._ohem_single(score_gt[i:i+1], score_pred[i:i+1], training_masks[i:i+1])
            selected_masks.append(selected_mask)
        selected_masks = F.concat(*selected_masks, dim=0)

        C_pred = score_pred[:, 0, :, :]
        self.pixel_acc = batch_pix_accuracy(C_pred, score_gt)
        # classification loss
        eps = 1e-5
        intersection = F.sum(score_gt * C_pred * selected_masks, axis=(1, 2))
        union = F.sum(selected_masks * score_gt * score_gt, axis=(1, 2)) + F.sum(selected_masks * C_pred * C_pred, axis=(1, 2)) + eps
        
        C_dice_loss = 1. - (2 * intersection) / (union)

        # loss for kernel
        kernel_mask = F.where(training_masks * C_pred > 0.5, F.ones_like(C_pred), F.zeros_like(C_pred))
        kernel_mask = F.expand_dims(kernel_mask, axis=1)
        kernel_mask = F.repeat(kernel_mask, repeats=self.num_kernels-1, axis=1)
        self.kernel_acc = batch_pix_accuracy(score_pred[:, 1, :, :] * score_gt, kernel_gt[:, 0, :, :])
        kernel_intersection = F.sum(kernel_gt * score_pred[:, 1:, :, :] * kernel_mask, axis=(2, 3))
        kernel_union = F.sum(kernel_gt * kernel_gt * kernel_mask, axis=(2, 3)) + F.sum(score_pred[:, 1:, :, :] * score_pred[:, 1:, :, :] * kernel_mask, axis=(2, 3)) + eps
        kernel_dice = 1. - (2 * kernel_intersection) / kernel_union
    
        kernel_dice_loss = F.mean(kernel_dice, axis=1)
        self.C_loss = C_dice_loss
        self.kernel_loss = kernel_dice_loss
        
        loss = self.lam * C_dice_loss + (1. - self.lam) *  kernel_dice_loss

        return loss
コード例 #26
0
    def forward(self, ious):

        matches = nd.argmax(ious, axis=-1)
        # 每个锚框最高得分
        max_iou_pre_anchor = nd.max(ious, axis=-1)
        # 将所有锚框都初始化为0,ignore
        samples = nd.zeros_like(max_iou_pre_anchor)

        # 计算每个ground_truth 的最高iou
        max_all_ious = nd.max(ious, axis=0, keepdims=True)
        # 标记处mask中最高分值的那一行为1
        mask = nd.broadcast_greater(ious + self._eps, max_all_ious)
        mask = nd.sum(mask, axis=-1)
        # 将最高分数的锚框标记为 1 正类
        samples = nd.where(mask, nd.ones_like(samples), samples)

        # 下面标记大于 pos_iou_thresh的样本为正例
        samples = nd.where(max_iou_pre_anchor > self._pos_iou_thresh,
                           nd.ones_like(samples), samples)

        # 标记小于neg_iou_thresh的样本为负类
        tmp = (max_iou_pre_anchor < self._neg_iou_thresh) * (max_iou_pre_anchor
                                                             > 0)

        samples = nd.where(tmp, nd.ones_like(samples) * -1, samples)
        # 将其转换为 numnpy
        samples = samples.asnumpy()
        # 下面进行采样
        # 首先对正样本进行采样
        num_pos = int((samples > 0).sum())
        if num_pos > self._max_pos:
            discard_indices = np.random.choice(np.where((samples > 0))[0],
                                               size=(num_pos - self._max_pos),
                                               replace=False)
            samples[discard_indices] = 0  # 将多余部分设置为忽略
        num_neg = int((samples < 0).sum())
        max_neg = self._num_sample - min(self._max_pos, num_pos)

        if num_neg > max_neg:
            discard_indices = np.random.choice(np.where((samples < 0))[0],
                                               size=(num_neg - max_neg),
                                               replace=False)
            samples[discard_indices] = 0

        # 最后将其转化为ndarray
        samples = nd.array(samples, ctx=matches.context)
        return samples, matches
コード例 #27
0
def test_where():
    a = nd.ones(shape=(LARGE_X, SMALL_Y))
    b = nd.arange(0, LARGE_X * SMALL_Y).reshape(LARGE_X, SMALL_Y)
    res = nd.where(b > 100, a, b)
    assert np.sum(res[-1].asnumpy() == 1) == b.shape[1]
    csr_cond = nd.sparse.cast_storage(b < 10, 'csr')
    res = nd.sparse.where(csr_cond, a, b)
    assert np.sum(res[0].asnumpy() == 1) == 10
コード例 #28
0
def bbox_iou(box1, box2, transform=True):
    """
    Returns the IoU of two bounding boxes
    """
    box1 = nd.array(box1)
    box2 = nd.array(box2)
    if box1.size == 0 or box2.size == 0:
        raise ValueError
    box1 = nd.abs(box1)
    box2 = nd.abs(box2)
    if transform:
        tmp_box1 = box1.copy()
        tmp_box1[:, 0] = box1[:, 0] - box1[:, 2] / 2.0
        tmp_box1[:, 1] = box1[:, 1] - box1[:, 3] / 2.0
        tmp_box1[:, 2] = box1[:, 0] + box1[:, 2] / 2.0
        tmp_box1[:, 3] = box1[:, 1] + box1[:, 3] / 2.0
        box1 = tmp_box1
        tmp_box2 = box2.copy()
        tmp_box2[:, 0] = box2[:, 0] - box2[:, 2] / 2.0
        tmp_box2[:, 1] = box2[:, 1] - box2[:, 3] / 2.0
        tmp_box2[:, 2] = box2[:, 0] + box2[:, 2] / 2.0
        tmp_box2[:, 3] = box2[:, 1] + box2[:, 3] / 2.0
        box2 = tmp_box2
    # Get the coordinates of bounding boxes
    b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
    b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]

    # get the corrdinates of the intersection rectangle
    inter_rect_x1 = nd.where(b1_x1 > b2_x1, b1_x1, b2_x1)
    inter_rect_y1 = nd.where(b1_y1 > b2_y1, b1_y1, b2_y1)
    inter_rect_x2 = nd.where(b1_x2 < b2_x2, b1_x2, b2_x2)
    inter_rect_y2 = nd.where(b1_y2 < b2_y2, b1_y2, b2_y2)

    # Intersection area
    inter_area = nd.clip(
        inter_rect_x2 - inter_rect_x1 + 1, a_min=0, a_max=10000) * nd.clip(
            inter_rect_y2 - inter_rect_y1 + 1, a_min=0, a_max=10000)

    # Union Area
    b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
    b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
    iou = inter_area / (b1_area + b2_area - inter_area)
    # iou[inter_area >= b1_area] = 0.8
    # iou[inter_area >= b2_area] = 0.8
    # iou[inter_area >= b2_area] = 0.8
    return nd.clip(iou, 1e-5, 1. - 1e-5)
コード例 #29
0
 def mean(self) -> Tensor:
     result = (
         self.mu.exp() * self.sigma * np.pi / (self.sigma * np.pi).sin()
     )
     # Expectation diverges if sigma > 1
     return nd.where(
         self.sigma > 1.0, nd.full(result.shape, np.inf), result
     )
コード例 #30
0
    def forward(self, x, target):
        assert x.shape[1] == self.size #sequence length

        with autograd.pause():
            true_dist = nd.zeros_like(x) + self.smoothing / (self.size - 2)
            target_mask = nd.zeros_like(true_dist)
            for r, c in enumerate(target):
                target_mask[r,c] = 1
            true_dist = nd.where(target_mask, nd.zeros_like(true_dist) + self.confidence, true_dist)
            true_dist[:, self.padding_idx] = 0
            mask = nd.equal(target,self.padding_idx)

            if len(mask.shape) > 0:
                true_dist = nd.where( nd.squeeze(mask), nd.zeros_like(true_dist) ,true_dist )

        self.true_dist = true_dist
        return self.criterion(x, true_dist.as_in_context(cfg.ctx))
コード例 #31
0
ファイル: loss.py プロジェクト: xiayongtao/gluon-cv
    def forward(self, cls_pred, box_pred, cls_target, box_target):
        """Compute loss in entire batch across devices."""
        # require results across different devices at this time
        cls_pred, box_pred, cls_target, box_target = [_as_list(x) \
            for x in (cls_pred, box_pred, cls_target, box_target)]
        # cross device reduction to obtain positive samples in entire batch
        num_pos = []
        for cp, bp, ct, bt in zip(*[cls_pred, box_pred, cls_target, box_target]):
            pos_samples = (ct > 0)
            num_pos.append(pos_samples.sum())
        num_pos_all = sum([p.asscalar() for p in num_pos])
        if num_pos_all < 1 and self._min_hard_negatives < 1:
            # no positive samples and no hard negatives, return dummy losses
            cls_losses = [nd.sum(cp * 0) for cp in cls_pred]
            box_losses = [nd.sum(bp * 0) for bp in box_pred]
            sum_losses = [nd.sum(cp * 0) + nd.sum(bp * 0) for cp, bp in zip(cls_pred, box_pred)]
            return sum_losses, cls_losses, box_losses


        # compute element-wise cross entropy loss and sort, then perform negative mining
        cls_losses = []
        box_losses = []
        sum_losses = []
        for cp, bp, ct, bt in zip(*[cls_pred, box_pred, cls_target, box_target]):
            pred = nd.log_softmax(cp, axis=-1)
            pos = ct > 0
            cls_loss = -nd.pick(pred, ct, axis=-1, keepdims=False)
            rank = (cls_loss * (pos - 1)).argsort(axis=1).argsort(axis=1)
            hard_negative = rank < nd.maximum(self._min_hard_negatives, pos.sum(axis=1)
                                              * self._negative_mining_ratio).expand_dims(-1)
            # mask out if not positive or negative
            cls_loss = nd.where((pos + hard_negative) > 0, cls_loss, nd.zeros_like(cls_loss))
            cls_losses.append(nd.sum(cls_loss, axis=0, exclude=True) / max(1., num_pos_all))

            bp = _reshape_like(nd, bp, bt)
            box_loss = nd.abs(bp - bt)
            box_loss = nd.where(box_loss > self._rho, box_loss - 0.5 * self._rho,
                                (0.5 / self._rho) * nd.square(box_loss))
            # box loss only apply to positive samples
            box_loss = box_loss * pos.expand_dims(axis=-1)
            box_losses.append(nd.sum(box_loss, axis=0, exclude=True) / max(1., num_pos_all))
            sum_losses.append(cls_losses[-1] + self._lambd * box_losses[-1])

        return sum_losses, cls_losses, box_losses
コード例 #32
0
def test_where():
    a = nd.ones(shape=(LARGE_X, SMALL_Y))
    b = nd.arange(0, LARGE_X).reshape(LARGE_X, 1)
    b = nd.broadcast_to(b, shape=(b.shape[0], SMALL_Y))
    res = nd.where(b > 100, a, b)
    assert np.sum(res[-1].asnumpy() == 1) == b.shape[1]

    csr_cond = nd.sparse.cast_storage(b < 10, 'csr')
    res = nd.sparse.where(csr_cond, a, b)
    assert np.sum(res[0].asnumpy() == 1) == b.shape[1]
コード例 #33
0
ファイル: func.py プロジェクト: chr5tphr/ecGAN
def fuzzy_one_hot(arr, size):
    x = arr.reshape((-1, ))
    return nd.where(nd.one_hot(x, size),
                    nd.uniform(low=0.7, high=1.2, shape=(x.shape[0], size), ctx=x.context),
                    nd.uniform(low=0.0, high=0.3, shape=(x.shape[0], size), ctx=x.context))