コード例 #1
0
def distanceAA2(regions,i,binnum,dibins,dibins4):
#Initiate empty array for storing histogram for directions, distances, and number of counted pairs in each distance range bin
    co0=nd.zeros(binnum-1,gpu(0),dtype="float32")
    codi0=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
    count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
    count4=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
    co4=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
    seed=nd.zeros((1,2),gpu(0))
#Calculate index coordinates and directions by chuncks
    a=regions[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions.shape[0]),:]
    b=regions[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions.shape[0]),:]
    a1=nd.array(a,gpu(0))
    b1=nd.array(b,gpu(0))
#    print ("a1",a1,"b1",b1)
    for ii in range (a1.shape[0]-1):
        a1_b1=(nd.expand_dims(a1[ii].reshape((1,2)),axis=1)-b1[ii+1:,:]).reshape((a1[ii+1:,:].shape[0],2))
        seed=nd.concat(seed,a1_b1,dim=0)
    if seed.shape[0]>1:
        x1_x2=seed[1:,0]
        y1_y2=seed[1:,1]
        labels=nd.zeros(x1_x2.shape[0],gpu(0),dtype="float32")
        sdi0=(nd.degrees(nd.arctan((y1_y2)/(x1_x2)))+90).reshape((-1,))
        ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))

#Change 0 to 180 so it can apply sum of boolean mask without losing values        
        sdi0=nd.where(condition=(sdi0==0),x=labels+180,y=sdi0)

#Store sum of distances co0 and histogram of directions in each range bin
        for p in range (0,binnum-1):
            booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
            count0[p]+=nd.nansum(booleanmask)
            co0[p]+=nd.nansum(ldis*booleanmask)

#Exclue values not in distance range bin
            sdi1=nd.where(condition=(booleanmask==0),x=labels-1,y=sdi0)
            for q in range (0,5):
                booleanmaskdi=nd.equal((sdi1>=dibins[q]),(sdi1<dibins[q+1]))            
                codi0[q,p]+=nd.nansum(booleanmaskdi)
            
        for k in range (0,5):
            booleanmaskdi=nd.equal((sdi0>=dibins4[k]),(sdi0<dibins4[k+1]))
            ldis0=ldis*booleanmaskdi
            for l in range (0,binnum-1):
                booleanmask=nd.equal((ldis0>=bins[l]),(ldis0<bins[l+1]))
                count4[k,l]+=nd.nansum(booleanmask)
                co4[k,l]+=nd.nansum(ldis0*booleanmask)

    codi0[0,:]+=codi0[4,:]
    codi0=codi0[0:4,:]
    count4[0,:]+=count4[4,:]
    count4=count4[0:4,:]
    co4[0,:]+=co4[4,:]
    co4=co4[0:4,:]
    return(co0,codi0,count0,co4,count4)
コード例 #2
0
        def log_softmax_likelihood(yhat_linear, y):
            """ 
				Likelihood of output yhat_linear, given the label y
				yhat_linear, y: ndarray
			"""
            return nd.nansum(y * nd.log_softmax(yhat_linear),
                             axis=0,
                             exclude=True)
コード例 #3
0
def distanceAATOPO(regions,i,binnum,dibins,dibins4,x,y,ctx):
#Initiate empty array for storing histogram for directions, distances, and number of counted pairs in each distance range bin
    co0=nd.zeros(binnum-1,ctx[0],dtype="float32")
    codi0=nd.zeros((5,binnum-1),ctx[0],dtype="float32")
    count0=nd.zeros(binnum-1,ctx[0],dtype="float32")
    count4=nd.zeros((5,binnum-1),ctx[0],dtype="float32")
    co4=nd.zeros((5,binnum-1),ctx[0],dtype="float32")
    
#Calculate index coordinates and directions by chuncks
    a=regions[i*broadcdp:min((i+1)*broadcdp,regions.shape[0]),:]
    a1=nd.array(a,ctx[0])
    b1=nd.array([x,y],ctx[0])
    a1_b1=(nd.expand_dims(a1,axis=1)-b1).reshape((-1,2))
    x1_x2=a1_b1[:,0]
    y1_y2=a1_b1[:,1]
#Find the rows where all equal zeros
    boolmask=(x1_x2==0)*(y1_y2==0)
    labels=nd.zeros(boolmask.shape[0],ctx[0],dtype="float32")
    sdi0=(nd.degrees(nd.arctan((y1_y2)/(x1_x2)))+90).reshape((-1,))
    ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))
#Change the zeros into -1
    sdi0=nd.where(condition=boolmask,x=labels-1,y=sdi0)
    ldis=nd.where(condition=boolmask,x=labels-1,y=ldis)
#Change 0 to 180 so it can apply sum of boolean mask without losing values        
    sdi0=nd.where(condition=(sdi0==0),x=labels+180,y=sdi0)
#Store sum of distances co0 and histogram of directions in each range bin
    for p in range (0,binnum-1):
        booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
        count0[p]+=nd.sum(booleanmask)
        co0[p]+=nd.sum(ldis*booleanmask)
#Exclue values not in distance range bin
        sdi1=nd.where(condition=(booleanmask==0),x=labels-1,y=sdi0)
        for q in range (0,5):
            booleanmaskdi=nd.equal((sdi1>=dibins[q]),(sdi1<dibins[q+1]))            
            codi0[q,p]+=nd.nansum(booleanmaskdi)
            
    for k in range (0,5):
        booleanmaskdi=nd.equal((sdi0>=dibins4[k]),(sdi0<dibins4[k+1]))
        ldis0=ldis*booleanmaskdi
        for l in range (0,binnum-1):
            booleanmask=nd.equal((ldis0>=bins[l]),(ldis0<bins[l+1]))
            count4[k,l]+=nd.sum(booleanmask)
            co4[k,l]+=nd.sum(ldis0*booleanmask)
            
    codi0[0,:]+=codi0[4,:]
    codi0=codi0[0:4,:]
    count4[0,:]+=count4[4,:]
    count4=count4[0:4,:]
    co4[0,:]+=co4[4,:]
    co4=co4[0:4,:]
    return(co0.asnumpy(),codi0.asnumpy(),count0.asnumpy(),co4.asnumpy(),count4.asnumpy())
コード例 #4
0
def total_loss(output, params, mus, sigmas, label_one_hot, log_prior):
    log_likelihood_s = nd.sum(
        nd.nansum(label_one_hot * nd.log_softmax(output), axis=0,
                  exclude=True))
    log_prior_pre_sum = []
    for param in params:
        log_prior_pre_sum.append(nd.sum(log_prior(param)))
    log_prior_sum = sum(log_prior_pre_sum)
    log_var_posterior_pre_sum = []
    for i in range(len(params)):
        log_var_posterior_pre_sum.append(
            nd.sum(log_gaussian(params[i], mus[i], sigmas[i])))
    log_var_posterior_sum = sum(log_var_posterior_pre_sum)
    total_loss = 1.0 / num_batches * (log_var_posterior_sum -
                                      log_prior_sum) - log_likelihood_s
    return total_loss
コード例 #5
0
def distance11(regions_high,regions_low,i,binnum,bins):
#Initiate empty array for storing the number of counted pairs in each distance range bin
    count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
#Calculate index coordinates and directions by chuncks
    a=regions_high[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions_high.shape[0]),:]
    b=regions_low[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions_low.shape[0]),:]
    a1=nd.array(a,gpu(0))
    b1=nd.array(b,gpu(0))
    a1_b1=(nd.expand_dims(a1,axis=1)-b1).reshape((-1,2))
    x1_x2=a1_b1[:,0]
    y1_y2=a1_b1[:,1]
    ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))
    for p in range (0,binnum-1):
        booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
        count0[p]+=nd.nansum(booleanmask)
    return(count0)
コード例 #6
0
def distance2(regions,i,binnum,bins):
#Initiate empty array for storing the number of counted pairs in each distance range bin
    count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
    seed=nd.zeros((1,2),gpu(0))
#Calculate index coordinates and directions by chuncks
    a=regions[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions.shape[0]),:]
    b=regions[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions.shape[0]),:]
    a1=nd.array(a,gpu(0))
    b1=nd.array(b,gpu(0))    
    for ii in range (a1.shape[0]-1):
        a1_b1=(nd.expand_dims(a1[ii].reshape((1,2)),axis=1)-b1[ii+1:,:]).reshape((a1[ii+1:,:].shape[0],2))
        seed=nd.concat(seed,a1_b1,dim=0)
    if seed.shape[0]>1:
        x1_x2=seed[1:,0]
        y1_y2=seed[1:,1]
        ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))
        for p in range (0,binnum-1):
            booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
            count0[p]+=nd.nansum(booleanmask)
    return(count0)
コード例 #7
0
ファイル: nn4_vb.py プロジェクト: j1nma/counterfactuals
    def hybrid_forward(self,
                       F,
                       output,
                       label,
                       params,
                       lambdas,
                       sigmas,
                       num_batches,
                       sample_weight=None):
        prior = None
        if self.log_prior == "gaussian":
            prior = self.gaussian_prior
        elif self.log_prior == "scale_mixture":
            prior = self.scale_mixture_prior
        elif self.log_prior == "exponential":
            prior = self.exponential_prior

        # Calculate prior
        log_prior_sum = sum(
            [nd.sum(prior(mx.nd.array(param))) for param in params])
        # log_prior_sum = sum([nd.nansum(prior(mx.nd.array(param))) for param in params])

        # Calculate variational posterior
        # log_var_posterior_sum = sum(
        #     [nd.sum(self.log_gaussian(mx.nd.array(params[i]), mx.nd.array(mus[i]), mx.nd.array(sigmas[i]))) for i in
        #      range(len(params))])
        log_var_posterior_sum = sum([
            nd.nansum(
                self.log_exponential(mx.nd.array(params[i]),
                                     mx.nd.array(lambdas[i])))
            for i in range(len(params))
        ])

        # return (1.0 / num_batches) * (log_var_posterior_sum - log_prior_sum) + self.neg_log_likelihood(label, output)
        kl_loss = (1.0 / num_batches) * (log_var_posterior_sum - log_prior_sum)
        # return kl_loss + self.neg_log_likelihood(label, output)  # for gaussian
        return kl_loss  # for expo
コード例 #8
0
def log_loss(output, y):
    yhat = logistic(output)
    return -nd.nansum(y * nd.log(yhat) + (1 - y) * nd.log(1 - yhat))
コード例 #9
0
ファイル: mlp_train.py プロジェクト: liangfu/mxnet-sparse-mlp
def softmax_cross_entropy(yhat_linear, y):
    return -nd.nansum(y * nd.log_softmax(yhat_linear), axis=0, exclude=True)
コード例 #10
0
ファイル: mlp_train.py プロジェクト: liangfu/mxnet-sparse-mlp
def cross_entropy(yhat, y):
    return -nd.nansum(y * nd.log(yhat), axis=0, exclude=True)
コード例 #11
0
ファイル: mlp_train.py プロジェクト: liangfu/mxnet-sparse-mlp
def softmax(y_linear):
    exp = nd.exp(y_linear - nd.max(y_linear))
    partition = nd.nansum(exp, axis=0, exclude=True).reshape((-1, 1))
    return exp / partition
コード例 #12
0
def softmax1(vector):
    exp = nd.exp(vector-nd.max(vector))
    return exp / nd.nansum(exp)
コード例 #13
0
 def softmax_cross_entropy(self, yhat_linear, y):
     return (-nd.nansum(y * nd.log_softmax(yhat_linear)))
def log_loss(output, y):
    yhat = logistic(output)  # a squashing sigmoid function
    return -nd.nansum(y * nd.log(yhat) + (1 - y) * nd.log(1 - yhat))
コード例 #15
0
ファイル: glu.py プロジェクト: kbtksk/mx-sandbox
def softmax(x):
    exp = np.exp(x - nd.max(x))
    partition = nd.nansum(exp, axis=0, exclude=True).reshape((-1, 1))
    return exp / partition
 def log_softmax_likelihood(self, yhat_linear, y):
     return nd.nansum(y * nd.log_softmax(yhat_linear), axis=0, exclude=True)
コード例 #17
0
 def sum_squared_error(self, yhat, y):
     return nd.nansum(nd.power(y - yhat, 2), axis=0, exclude=True)
コード例 #18
0
def tderror(rt, qval2, qval1, l):
    return nd.nansum((rt + l * qval2 - qval1)**2)