def nms(dets,proba, T): dets = dets.astype("float") if len(dets) == 0: return [] x1 = dets[:, 0] y1 = dets[:, 1] x2 = dets[:, 2] y2 = dets[:, 3] scores = proba areas = (x2 - x1 + 1) * (y2 - y1 + 1) order = scores.argsort()[::-1] keep = [] while order.size > 0: i = order[0] keep.append(i) xx1 = sp.maximum(x1[i], x1[order[1:]]) yy1 = sp.maximum(y1[i], y1[order[1:]]) xx2 = sp.minimum(x2[i], x2[order[1:]]) yy2 = sp.minimum(y2[i], y2[order[1:]]) w = sp.maximum(0.0, xx2 - xx1 + 1) h = sp.maximum(0.0, yy2 - yy1 + 1) inter = w * h ovr = inter / (areas[i] + areas[order[1:]] - inter) inds = sp.where(ovr <= T)[0] order = order[inds + 1] return keep
def test_Wells_and_Coppersmith_94(self): Wells_and_Copper_94 = conversion_functions['Wells_and_Coppersmith_94'] calc_max_width_in_slab = conversion_functions['calc_max_width_in_slab'] fault_type = 'reverse' max_width = 15 max_length = 20 Mw = array([4.1, 5.2, 6.3, 5.3, 9.5, 6.0, 3.3, 7]) (area, width) = Wells_and_Copper_94(fault_type, Mw, max_width) length = minimum((area / width), max_length) msg = ('Unexpected width or length values') self.failUnlessAlmostEqual(length[2], 16.25548756, 7, msg) self.failUnlessAlmostEqual(width[2], 9.39723311, 7, msg) # print Mw # print length # print width slab_width = 8 out_of_dip = array([0.0, 10, 90, 30, 110.0, 0.0, 111.0, 90.0]) (area, width) = Wells_and_Copper_94(fault_type, Mw, max_width) width = minimum( calc_max_width_in_slab(out_of_dip, slab_width, max_width), width) length = minimum((area / width), max_length) # print Mw # print length # print width self.failUnlessAlmostEqual(length[2], 19.09457573, 1, msg) self.failUnlessAlmostEqual(width[2], 8., 1, msg)
def wPGP_single(gt,in_pred): if gt.ndim == 2: gt = gt[0] r_max = gt.max() in_pred = S.minimum(in_pred,1.0) reward = (gt*S.minimum(gt,in_pred)).sum()/S.power(gt,2.0).sum() penalty_num = ( (r_max - gt) *(in_pred - gt) *S.array(in_pred > gt,dtype=S.float_) ).sum() penalty_denom = S.power(r_max - gt,2.0).sum() penalty = penalty_num / penalty_denom wpgp_score = 0.5 + 0.5*(reward-penalty) #assert wpgp_score <= 1.0 and wpgp_score >= 0.0, "PGP score not in acceptable range" if wpgp_score >1.0 or wpgp_score < 0.0: import sys sys.stderr.write("There was a problem withthe pgp score {} \n".format(wpgp_score)) return 0.5 return wpgp_score
def nms(boxes, T=0.5): if len(boxes) == 0: return [] boxes = boxes.astype("float") pick = [] x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] area = (x2 - x1 + 1) * (y2 - y1 + 1) idxs = sp.argsort(y2) while len(idxs) > 0: last = len(idxs) - 1 i = idxs[last] pick.append(i) xx1 = sp.maximum(x1[i], x1[idxs[:last]]) yy1 = sp.maximum(y1[i], y1[idxs[:last]]) xx2 = sp.minimum(x2[i], x2[idxs[:last]]) yy2 = sp.minimum(y2[i], y2[idxs[:last]]) w = sp.maximum(0, xx2 - xx1 + 1) h = sp.maximum(0, yy2 - yy1 + 1) I = w * h #overlap_ratio = I / area[idxs[:last]] overlap_ratio = I / (area[i] + area[idxs[:last]] - I) idxs = sp.delete( idxs, sp.concatenate(([last], sp.where(overlap_ratio > T)[0]))) return boxes[pick].astype("int")
def nms(boxes, T = 0.5): if len(boxes) == 0: return [] boxes = boxes.astype("float") pick = [] x1 = boxes[:,0] y1 = boxes[:,1] x2 = boxes[:,2] y2 = boxes[:,3] area = (x2 - x1 + 1) * (y2 - y1 + 1) idxs = sp.argsort(y2) while len(idxs) > 0: last = len(idxs) - 1 i = idxs[last] pick.append(i) xx1 = sp.maximum(x1[i], x1[idxs[:last]]) yy1 = sp.maximum(y1[i], y1[idxs[:last]]) xx2 = sp.minimum(x2[i], x2[idxs[:last]]) yy2 = sp.minimum(y2[i], y2[idxs[:last]]) w = sp.maximum(0, xx2 - xx1 + 1) h = sp.maximum(0, yy2 - yy1 + 1) I = w * h #overlap_ratio = I / area[idxs[:last]] overlap_ratio = I /(area[i] + area[idxs[:last]] - I) idxs = sp.delete(idxs, sp.concatenate(([last], sp.where(overlap_ratio > T)[0]))) return boxes[pick].astype("int")
def calcglobalcost_WDTW(self, Wmax, g): """ The function calcualtes the weighted DTW version Takes max wieght and restriction constant g (the smaller g is the constrained the DTW becomes)""" self.Globalcost_[0, 0] = math.square(self.FirstVector_[0] - self.SecondVector_[0]) temp = 0 for i in range(1, self.FirstVector_.shape[0]): weight = Wmax / (1 + math.exp(-g)) self.Globalcost_[0, i] = math.square( weight * (self.FirstVector_[i] - self.SecondVector_[0])) + self.Globalcost_[0, i - 1] for i in range(1, self.SecondVector_.shape[0]): weight = Wmax / (1 + math.exp(-g)) self.Globalcost_[i, 0] = math.square( weight * (self.SecondVector_[i] - self.FirstVector_[0])) + self.Globalcost_[i - 1, 0] for i in range(1, self.SecondVector_.shape[0]): for j in range(1, self.FirstVector_.shape[0]): weight = Wmax / (1 + math.exp(-g * math.absolute((i - j)))) self.Globalcost_[i, j] = math.square( weight * (self.FirstVector_[j] - self.SecondVector_[i])) temp = math.minimum(self.Globalcost_[i - 1, j], self.Globalcost_[i, j - 1]) temp = math.minimum(self.Globalcost_[i - 1, j - 1], temp) self.Globalcost_[i, j] += temp
def fitPairwiseModel(Y,XX=None,S_XX=None,U_XX=None,verbose=False): N,P = Y.shape """ initilizes parameters """ RV = fitSingleTraitModel(Y,XX=XX,S_XX=S_XX,U_XX=U_XX,verbose=verbose) Cg = covariance.freeform(2) Cn = covariance.freeform(2) gp = gp2kronSum(mean(Y[:,0:2]),Cg,Cn,XX=XX,S_XX=S_XX,U_XX=U_XX) conv2 = SP.ones((P,P),dtype=bool) rho_g = SP.ones((P,P)) rho_n = SP.ones((P,P)) for p1 in range(P): for p2 in range(p1): if verbose: print '.. fitting correlation (%d,%d)'%(p1,p2) gp.setY(Y[:,[p1,p2]]) Cg_params0 = SP.array([SP.sqrt(RV['varST'][p1,0]),1e-6*SP.randn(),SP.sqrt(RV['varST'][p2,0])]) Cn_params0 = SP.array([SP.sqrt(RV['varST'][p1,1]),1e-6*SP.randn(),SP.sqrt(RV['varST'][p2,1])]) params0 = {'Cg':Cg_params0,'Cn':Cn_params0} conv2[p1,p2],info = OPT.opt_hyper(gp,params0,factr=1e3) rho_g[p1,p2] = Cg.K()[0,1]/SP.sqrt(Cg.K().diagonal().prod()) rho_n[p1,p2] = Cn.K()[0,1]/SP.sqrt(Cn.K().diagonal().prod()) conv2[p2,p1] = conv2[p1,p2]; rho_g[p2,p1] = rho_g[p1,p2]; rho_n[p2,p1] = rho_n[p1,p2] RV['Cg0'] = rho_g*SP.dot(SP.sqrt(RV['varST'][:,0:1]),SP.sqrt(RV['varST'][:,0:1].T)) RV['Cn0'] = rho_n*SP.dot(SP.sqrt(RV['varST'][:,1:2]),SP.sqrt(RV['varST'][:,1:2].T)) RV['conv2'] = conv2 #3. regularizes covariance matrices offset_g = abs(SP.minimum(LA.eigh(RV['Cg0'])[0].min(),0))+1e-4 offset_n = abs(SP.minimum(LA.eigh(RV['Cn0'])[0].min(),0))+1e-4 RV['Cg0_reg'] = RV['Cg0']+offset_g*SP.eye(P) RV['Cn0_reg'] = RV['Cn0']+offset_n*SP.eye(P) RV['params0_Cg']=LA.cholesky(RV['Cg0_reg'])[SP.tril_indices(P)] RV['params0_Cn']=LA.cholesky(RV['Cn0_reg'])[SP.tril_indices(P)] return RV
def nms(dets, proba, T): dets = dets.astype("float") if len(dets) == 0: return [] x1 = dets[:, 0] y1 = dets[:, 1] x2 = dets[:, 2] y2 = dets[:, 3] scores = proba areas = (x2 - x1 + 1) * (y2 - y1 + 1) order = scores.argsort()[::-1] keep = [] while order.size > 0: i = order[0] keep.append(i) xx1 = sp.maximum(x1[i], x1[order[1:]]) yy1 = sp.maximum(y1[i], y1[order[1:]]) xx2 = sp.minimum(x2[i], x2[order[1:]]) yy2 = sp.minimum(y2[i], y2[order[1:]]) w = sp.maximum(0.0, xx2 - xx1 + 1) h = sp.maximum(0.0, yy2 - yy1 + 1) inter = w * h ovr = inter / (areas[i] + areas[order[1:]] - inter) inds = sp.where(ovr <= T)[0] order = order[inds + 1] return keep
def computeColorMapPval(listeElectrodes, zVal, pVal, minZZ=-0.02, maxZZ=0.02): import pylab zz = getZZ(listeElectrodes, zVal) pp = getZZ(listeElectrodes, pVal) zz[where(isnan(zz))] = 0.0 pp[where(isnan(pp))] = 0.0 circle = getHeadCircle() pylab.plot(circle[:, 0], circle[:, 1], 'k') s = (pp - 0.2) / (0.8 - 0.2) h = (zz - minZZ) / (maxZZ - minZZ) Mat = ones((zz.shape[0], zz.shape[1], 3)) Mat[:, :, 0] = minimum(maximum(h, 0.0)) # H Mat[:, :, 1] = minimum(maximum(s, 0.0), 1.0) # S # V = 1 Mat = hsv_to_rgb(Mat) #extent: [ None | scalars (left, right, bottom, top) ] pylab.imshow(Mat, origin="upper", extent=[0.025, 0.97, -0.015, 0.935]) img = mpimg.imread( os.path.dirname(os.path.realpath(__file__)) + "\\eeg_electrodes_10-20_small.png") pylab.imshow(img, extent=[0.0, 1.0, 0.0, 1.0])
def calcglobalcost_SDTW(self, gamma): """ The function calcualtes the soft DTW version Takes smoothing facto gamma, the larger gamma the better the approximation to the min function)""" self.Globalcost_[0, 0] = math.square(self.FirstVector_[0] - self.SecondVector_[0]) for i in range(1, self.FirstVector_.shape[0]): self.Globalcost_[0, i] = math.square( (self.FirstVector_[i] - self.SecondVector_[0])) + self.Globalcost_[0, i - 1] for i in range(1, self.SecondVector_.shape[0]): self.Globalcost_[i, 0] = math.square( (self.SecondVector_[i] - self.FirstVector_[0])) + self.Globalcost_[i - 1, 0] for i in range(1, self.SecondVector_.shape[0]): for j in range(1, self.FirstVector_.shape[0]): self.Globalcost_[i, j] = math.square( (self.FirstVector_[j] - self.SecondVector_[i])) # for the computational stability this is prefered as compared to direct application res1 = math.minimum(self.Globalcost_[i, j - 1], self.Globalcost_[i - 1, j]) res1 = math.minimum(res1, self.Globalcost_[i - 1, j - 1]) res2 = math.exp( -1 / gamma * (self.Globalcost_[i, j - 1] - res1)) + math.exp( -1 / gamma * (self.Globalcost_[i - 1, j] - res1)) + math.exp( -1 / gamma * (self.Globalcost_[i - 1, j - 1] - res1)) # Direct: res1=-gamma*math.log(sum(math.exp(-1*(self.Globalcost_[i-1,j-1])/gamma)+math.exp(-1*(self.Globalcost_[i-1,j])/gamma)+math.exp(-1*(self.Globalcost_[i,j-1])))) self.Globalcost_[i, j] += ((-gamma * math.log(res2)) + res1)
def calcglobalcost_DDTW(self): derv1 = npy.array(self.FirstVector_) derv2 = npy.array(self.SecondVector_) for indx in range(1, derv1.shape[0] - 1): derv1[indx] = ( (self.FirstVector_[indx + 1] - self.FirstVector_[indx - 1]) / 2 + (self.FirstVector_[indx] - self.FirstVector_[indx - 1])) / 2 for indx in range(1, derv2.shape[0] - 1): derv2[indx] = ( (self.SecondVector_[indx + 1] - self.SecondVector_[indx - 1]) / 2 + (self.SecondVector_[indx] - self.SecondVector_[indx - 1])) / 2 derv1[0] = derv1[1] derv1[derv1.shape[0] - 1] = derv1[derv1.shape[0] - 2] derv2[0] = derv2[1] derv2[derv2.shape[0] - 1] = derv2[derv2.shape[0] - 2] self.Globalcost_[0, 0] = math.square(derv1[0] - derv2[0]) temp = 0 for i in range(1, self.FirstVector_.shape[0]): self.Globalcost_[0, i] = math.square( derv1[i] - derv2[0]) + self.Globalcost_[0, i - 1] for i in range(1, self.SecondVector_.shape[0]): self.Globalcost_[i, 0] = math.square( derv2[i] - derv1[0]) + self.Globalcost_[i - 1, 0] for i in range(1, self.SecondVector_.shape[0]): for j in range(1, self.FirstVector_.shape[0]): self.Globalcost_[i, j] = math.square(derv1[j] - derv2[i]) temp = math.minimum(self.Globalcost_[i - 1, j], self.Globalcost_[i, j - 1]) temp = math.minimum(self.Globalcost_[i - 1, j - 1], temp) self.Globalcost_[i, j] += temp
def analize_graphs(N, xfile, y1file, y2file, y3file, maxfile="max_filename.txt", medfile="medium_filename.txt", minfile="min_filename.txt", srcfile="screenshot.png"): x = scipy.genfromtxt(xfile, delimiter=', ') x = x[:N] y1 = scipy.genfromtxt(y1file, delimiter=', ') y1 = y1[:N] y2 = scipy.genfromtxt(y2file, delimiter=', ') y2 = y2[:N] y3 = scipy.genfromtxt(y3file, delimiter=', ') y3 = y3[:N] plt.rcParams["figure.figsize"] = (20, 10) l = plt.subplot(121) # numrows, numcols, fignum l.plot(x, y1, c="red") l.plot(x, y2, c="green") l.plot(x, y3, c="blue") y1_patch = mpatches.Patch(color='red', label="y1") y2_patch = mpatches.Patch(color='green', label="y2") y3_patch = mpatches.Patch(color='blue', label="y3") plt.legend(handles=[y1_patch, y2_patch, y3_patch]) plt.xlabel("Abscissa") plt.ylabel("Ordinate") plt.title("Graph") mx = scipy.maximum(y1, y2) mx = scipy.maximum(mx, y3) mn = scipy.minimum(y1, y2) mn = scipy.minimum(mn, y3) av = scipy.average([y1, y2, y3], axis=0) r = plt.subplot(122) r.plot(x, mn, c="blue") r.plot(x, mx, c="red") r.plot(x, av, c="green") y1_patch = mpatches.Patch(color='red', label="Maximum") y2_patch = mpatches.Patch(color='green', label="Average") y3_patch = mpatches.Patch(color='blue', label="Minimum") plt.legend(handles=[y1_patch, y2_patch, y3_patch]) plt.xlabel("Abscissa") plt.ylabel("Ordinate") plt.title("Graph") scipy.savetxt(maxfile, mx, delimiter=", ") scipy.savetxt(minfile, mn, delimiter=", ") scipy.savetxt(medfile, av, delimiter=", ") plt.savefig(srcfile) plt.show()
def _computeBGDiff(self): self._flow.update( self._imageBuffer.getLast() ) n = len(self._imageBuffer) prev_im = self._imageBuffer[0] forward = None for i in range(0,n/2): if forward == None: forward = self._imageBuffer[i].to_next else: forward = forward * self._imageBuffer[i].to_next w,h = size = prev_im.size mask = cv.CreateImage(size,cv.IPL_DEPTH_8U,1) cv.Set(mask,0) interior = cv.GetSubRect(mask, pv.Rect(2,2,w-4,h-4).asOpenCV()) cv.Set(interior,255) mask = pv.Image(mask) prev_im = forward(prev_im) prev_mask = forward(mask) next_im = self._imageBuffer[n-1] back = None for i in range(n-1,n/2,-1): if back == None: back = self._imageBuffer[i].to_prev else: back = back * self._imageBuffer[i].to_prev next_im = back(next_im) next_mask = back(mask) curr_im = self._imageBuffer[n/2] prevImg = prev_im.asMatrix2D() curImg = curr_im.asMatrix2D() nextImg = next_im.asMatrix2D() prevMask = prev_mask.asMatrix2D() nextMask = next_mask.asMatrix2D() # Compute transformed images delta1 = sp.absolute(curImg - prevImg) #frame diff 1 delta2 = sp.absolute(nextImg - curImg) #frame diff 2 delta1 = sp.minimum(delta1,prevMask) delta2 = sp.minimum(delta2,nextMask) #use element-wise minimum of the two difference images, which is what # gets compared to threshold to yield foreground mask return sp.minimum(delta1, delta2)
def calcglobalcost_UDTW(self): self.Globalcost_[0,0]=math.square(self.FirstVector_[0]-self.SecondVector_[0]) temp=0; for i in range(1,self.FirstVector_.shape[0]): self.Globalcost_[0,i]=math.square(self.FirstVector_[i]-self.SecondVector_[0])+self.Globalcost_[0,i-1] for i in range(1,self.SecondVector_.shape[0]): self.Globalcost_[i,0]=math.square(self.SecondVector_[i]-self.FirstVector_[0])+self.Globalcost_[i-1,0] for i in range(1,self.SecondVector_.shape[0]): for j in range(1,self.FirstVector_.shape[0]): self.Globalcost_[i,j]=math.square(self.FirstVector_[j]-self.SecondVector_[i]) temp=math.minimum(self.Globalcost_[i-1,j],self.Globalcost_[i,j-1]) temp=math.minimum(self.Globalcost_[i-1,j-1],temp) self.Globalcost_[i,j]+=temp
def set_measured_activity(self): """ Set the full measured activity, from nonlinear response. """ # Learned background firing only utilizes average background signal self.Yy0 = receptor_activity(self.Ss0, self.Kk1, self.Kk2, self.eps, self.binding_competitive, self.num_binding_sites) self.Yy = receptor_activity(self.Ss, self.Kk1, self.Kk2, self.eps, self.binding_competitive, self.num_binding_sites) self.Yy += sp.random.normal(0, self.meas_noise, self.Mm) # Apply temporal kernel if self.temporal_run == False: pass else: kernel_params = [ self.kernel_T, self.kernel_dt, self.kernel_tau_1, self.kernel_tau_2, self.kernel_shape_1, self.kernel_shape_2, self.kernel_alpha, self.kernel_scale ] self.Yy0, self.memory_Yy0 = temporal_kernel( self.Yy0, self.memory_Yy0, self.signal_trace_Tt, kernel_params) self.Yy, self.memory_Yy = temporal_kernel(self.Yy, self.memory_Yy, self.signal_trace_Tt, kernel_params) # Nonlinearities self.Yy0 *= self.NL_scale * (self.Yy > self.NL_threshold) self.Yy *= self.NL_scale * (self.Yy > self.NL_threshold) self.Yy0 = sp.minimum(self.Yy0, self.firing_max) self.Yy = sp.minimum(self.Yy, self.firing_max) # Measured response above background self.dYy = self.Yy - self.Yy0 # Add effects of divisive normalization if called. if self.divisive_normalization == True: self.Yy0 = inhibitory_normalization(self.Yy0, self.inh_C, self.inh_D, self.inh_eta, self.inh_R) self.Yy = inhibitory_normalization(self.Yy, self.inh_C, self.inh_D, self.inh_eta, self.inh_R) self.dYy = self.Yy - self.Yy0
def logloss(self, y, pred): epsilon = 1e-15 pred = sp.maximum(epsilon, pred) pred = sp.minimum(1-epsilon, pred) ll = sum(y*sp.log(pred) + sp.subtract(1,y)*sp.log(sp.subtract(1,pred))) ll = ll * -1.0/len(y) return ll
def logloss(p, y): epsilon = 1e-15 p = sp.maximum(epsilon, p) p = sp.minimum(1-epsilon, p) ll = sum(y*sp.log(p) + sp.subtract(1,y)*sp.log(sp.subtract(1,p))) ll = ll * -1.0/len(y) return ll
def find_dt_artifacts(dt): r""" Finds points in a distance transform that are closer to wall than solid. These points could *potentially* be erroneously high since their distance values do not reflect the possibility that solid may have been present beyond the border of the image but lost by trimming. Parameters ---------- dt : ND-array The distance transform of the phase of interest Returns ------- image : ND-array An ND-array the same shape as ``dt`` with numerical values indicating the maximum amount of error in each volxel, which is found by subtracting the distance to nearest edge of image from the distance transform value. In other words, this is the error that would be found if there were a solid voxel lurking just beyond the nearest edge of the image. Obviously, voxels with a value of zero have no error. """ temp = sp.ones(shape=dt.shape) * sp.inf for ax in range(dt.ndim): dt_lin = distance_transform_lin(sp.ones_like(temp, dtype=bool), axis=ax, mode='both') temp = sp.minimum(temp, dt_lin) result = sp.clip(dt - temp, a_min=0, a_max=sp.inf) return result
def llfun(act, pred,idx): epsilon = 1e-15 pred = sp.maximum(epsilon, pred[idx]) pred = sp.minimum(1-epsilon, pred) ll = sum(act[idx]*sp.log(pred) + sp.subtract(1,act[idx])*sp.log(sp.subtract(1,pred))) ll = ll * -1.0/len(act[idx]) return ll
def logloss(p, y): epsilon = 1e-15 p = sp.maximum(epsilon, p) p = sp.minimum(1 - epsilon, p) ll = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p))) ll = ll * -1.0 / len(y) return ll
def log_loss(self, act, pred, epsilon=1e-07): pred = sp.maximum(epsilon, pred) pred = sp.minimum(1 - epsilon, pred) ll = sum(act * sp.log(pred) + sp.subtract(1, act) * sp.log(sp.subtract(1, pred))) ll = ll * -1.0 / len(act) return ll
def benjamini_hochberg_yekutieli(p_values=None,q_value=0.05,sort_idx=None,return_sort_idx=False): p_values = p_values.ravel() if sort_idx is None: sort_idx = sp.argsort(p_values) p_values = p_values[sort_idx] else: sort_idx = sort_idx.ravel() p_values = p_values[sort_idx] m = p_values.shape[0] idx_line = sp.arange(1,m+1) cV = (1.0/idx_line).sum() thr_line = (idx_line*q_value*cV)/float(m); thr_ind = sp.where(p_values<=thr_line)[0] if thr_ind.shape[0]==0: thr = 0.0; else: thr = p_values[thr_ind.max()] #adjust p_values p_values_adjusted = sp.ones(m) prev = 1.0 for i in range(m,0,-1): p_values_adjusted[i-1] = sp.minimum(prev,p_values[i-1]*float(m)*cV/float(i)) if p_values_adjusted[i-1]>1: p_values_adjusted[i-1]=1 prev = p_values_adjusted[i-1] #resort pvalues p_tmp = p_values_adjusted.copy() p_values_adjusted[sort_idx] = p_tmp if return_sort_idx==True: return [thr,p_values_adjusted,sort_idx] else: return [thr,p_values_adjusted]
def Wells_and_Coppersmith_94(fault_type, Mw, max_width): """Calculate the area and widths for ruptures; given fault_type and magnitude. The widths are limited by the fault width. fault_type fault type eg 'reverse' Mw magnitudes of the ruptures max_width width of the fault Returns an area for the rupture and the max width of rupture. """ if fault_type == "normal": area = 10**(-2.87 + (0.82 * Mw)) widthWC = 10**(-1.14 + (0.35 * Mw)) elif fault_type == "reverse": area = 10**(-3.99 + (0.98 * Mw)) widthWC = 10**(-1.61 + (0.41 * Mw)) elif fault_type == "strike_slip": area = 10**(-3.42 + (0.90 * Mw)) widthWC = 10**(-0.76 + (0.27 * Mw)) elif fault_type == "unspecified": area = 10**(-3.497 + (0.91 * Mw)) widthWC = 10**(-1.01 + (0.32 * Mw)) else: area = 10**(-3.497 + (0.91 * Mw)) widthWC = 10**(-1.01 + (0.32 * Mw)) width = minimum(widthWC, max_width) return area, width
def logloss(Y_true, Y_pred): epsilon = 1e-15 pred = sp.maximum(epsilon, Y_pred) pred = sp.minimum(1-epsilon, Y_pred) ll = sum(Y_true*sp.log(pred) + sp.subtract(1,Y_true)*sp.log(sp.subtract(1,Y_pred))) ll = ll * -1.0/len(Y_true) return ll
def Wells_and_Coppersmith_94(fault_type, Mw, max_width): """Calculate the area and widths for ruptures; given fault_type and magnitude. The widths are limited by the fault width. fault_type fault type eg 'reverse' Mw magnitudes of the ruptures max_width width of the fault Returns an area for the rupture and the max width of rupture. """ if fault_type == "normal": area = 10 ** (-2.87 + (0.82 * Mw)) widthWC = 10 ** (-1.14 + (0.35 * Mw)) elif fault_type == "reverse": area = 10 ** (-3.99 + (0.98 * Mw)) widthWC = 10 ** (-1.61 + (0.41 * Mw)) elif fault_type == "strike_slip": area = 10 ** (-3.42 + (0.90 * Mw)) widthWC = 10 ** (-0.76 + (0.27 * Mw)) elif fault_type == "unspecified": area = 10 ** (-3.497 + (0.91 * Mw)) widthWC = 10 ** (-1.01 + (0.32 * Mw)) else: area = 10 ** (-3.497 + (0.91 * Mw)) widthWC = 10 ** (-1.01 + (0.32 * Mw)) width = minimum(widthWC, max_width) return area, width
def generateThumbnail(inputFile, thumbSize): global size #logging.debug('Input File: %s\n' % inputFile) #logging.debug('Ouput File: %s\n' % outputFile) #logging.debug('Thumb Size: %s\n' % thumbSize) h5f = tables.open_file(inputFile) dataSource = HDFDataSource.DataSource(inputFile, None) md = MetaData.genMetaDataFromSourceAndMDH( dataSource, MetaDataHandler.HDFMDHandler(h5f)) xsize = h5f.root.ImageData.shape[1] ysize = h5f.root.ImageData.shape[2] if xsize > ysize: zoom = float(thumbSize) / xsize else: zoom = float(thumbSize) / ysize size = (int(xsize * zoom), int(ysize * zoom)) im = h5f.root.ImageData[min(md.EstimatedLaserOnFrameNo + 10, (h5f.root.ImageData.shape[0] - 1)), :, :].astype('f') im = im.T - min(md.Camera.ADOffset, im.min()) h5f.close() im = maximum(minimum(1 * (255 * im) / im.max(), 255), 0) return im.astype('uint8')
def Leonard_SCR_rup_width(dip, Mw, area, max_rup_width, **kwargs): """ From: Earthquake Fault Scaling; Self-Consistent relating of Rupture lenght width, average Displacement. Author: Mark Leonard parameters: area: the rupture area, km2 returns: the rupture width, km. """ # First the Length is calculated e, f = Leonard_SCR_constants(Mw) length = 10. ** (e * Mw + f) # length is in km if area is None: area = Leonard_SCR_rup_area(Mw) width = area / length if max_rup_width is not None: return minimum(width, max_rup_width) else: return width
def set_adapted_free_energy(self): """ Set free energy based on adapted activity activity. """ activity_stats = [ self.adapted_activity_mu, self.adapted_activity_sigma ] adapted_activity = random_matrix([self.Mm], params=activity_stats, seed=self.seed_adapted_activity) self.eps = free_energy(self.Ss, self.Kk1, self.Kk2, adapted_activity, self.binding_competitive, self.num_binding_sites) # Apply max and min epsilon value to each component self.min_eps = random_matrix( self.Mm, params=[self.mu_min_eps, self.sigma_min_eps], seed=self.seed_eps) self.max_eps = random_matrix( self.Mm, params=[self.mu_max_eps, self.sigma_max_eps], seed=self.seed_eps) self.eps = sp.maximum(self.eps.T, self.min_eps).T self.eps = sp.minimum(self.eps.T, self.max_eps).T
def generateThumbnail(inputFile, thumbSize): global size # logging.debug('Input File: %s\n' % inputFile) # logging.debug('Ouput File: %s\n' % outputFile) # logging.debug('Thumb Size: %s\n' % thumbSize) h5f = tables.openFile(inputFile) dataSource = HDFDataSource.DataSource(inputFile, None) md = MetaData.genMetaDataFromSourceAndMDH(dataSource, MetaDataHandler.HDFMDHandler(h5f)) xsize = h5f.root.ImageData.shape[1] ysize = h5f.root.ImageData.shape[2] if xsize > ysize: zoom = float(thumbSize) / xsize else: zoom = float(thumbSize) / ysize size = (int(xsize * zoom), int(ysize * zoom)) im = h5f.root.ImageData[min(md.EstimatedLaserOnFrameNo + 10, (h5f.root.ImageData.shape[0] - 1)), :, :].astype("f") im = im.T - min(md.Camera.ADOffset, im.min()) h5f.close() im = maximum(minimum(1 * (255 * im) / im.max(), 255), 0) return im.astype("uint8")
def watershed(i_d, logger): # compute neighbours logger.info('Computing differences...') nbs = compute_neighbours(i_d) # compute min altitude map logger.info('Computing minimal altitude map...') minaltitude = nbs[0] for nb in nbs[1:]: minaltitude = scipy.minimum(minaltitude, nb) # watershed logger.info('Watershed \w minaltitude pre-computation...') result = scipy.zeros(i_d.shape, dtype=scipy.uint16) nb_labs = 0 for x in range(result.shape[0]): for y in range(result.shape[1]): for z in range(result.shape[2]): if result[x,y,z] != 0: continue # 10% L, lab = stream_set(i_d, result, minaltitude, (x, y, z)) # 90% if -1 == lab: nb_labs += 1 for p in L: result[p] = nb_labs else: for p in L: result[p] = lab return result
def set_reach_dist(SetOfObjects, point_index, epsilon): # Assumes that the query returns ordered (smallest distance first) # entries. This is the case for the balltree query... # distances, indices = SetOfObjects.query(SetOfObjects.data[point_index], # SetOfObjects._nneighbors[point_index]) row = [SetOfObjects.data[point_index,:]] indices = np.argsort(row) distances = np.sort(row) # Checks to see if there more than one member in the neighborhood ## if scipy.iterable(distances): # Masking processed values ## unprocessed = indices[(SetOfObjects._processed[indices] < 1)[0].T] rdistances = scipy.maximum( distances[(SetOfObjects._processed[indices] < 1)[0].T], SetOfObjects._core_dist[point_index]) SetOfObjects._reachability[ unprocessed] = scipy.minimum( SetOfObjects._reachability[ unprocessed], rdistances) # Checks to see if everything is already processed; # if so, return control to main loop ## if unprocessed.size > 0: # Define return order based on reachability distance ### return sorted(zip(SetOfObjects._reachability[unprocessed], unprocessed), key=lambda reachability: reachability[0])[0][1] else: return point_index else: # Not sure if this else statement is actaully needed... ## return point_index
def set_reach_dist(SetOfObjects, point_index, epsilon): """ Sets reachability distance and ordering. This function is the primary workhorse of the OPTICS algorithm. SetofObjects: Instantiated and prepped instance of 'setOfObjects' class epsilon: Determines maximum object size that can be extracted. Smaller epsilons reduce run time. (float) """ row = [SetOfObjects.data[point_index,:]] indices = np.argsort(row) distances = np.sort(row) if scipy.iterable(distances): unprocessed = indices[(SetOfObjects._processed[indices] < 1)[0].T] rdistances = scipy.maximum(distances[(SetOfObjects._processed[indices] < 1)[0].T], SetOfObjects._core_dist[point_index]) SetOfObjects._reachability[unprocessed] = scipy.minimum( SetOfObjects._reachability[unprocessed], rdistances) if unprocessed.size > 0: return unprocessed[np.argsort(np.array(SetOfObjects._reachability[ unprocessed]))[0]] else: return point_index else: return point_index
def set_normal_free_energy(self): """ Set free energy as a function of odorant; normal tuning curve. """ self.eps_base = self.mu_eps + self.normal_eps_tuning_prefactor* \ sp.exp(-(1.*sp.arange(self.Mm))**2.0/(2.0* \ self.normal_eps_tuning_width)**2.0) self.eps_base += random_matrix(self.Mm, params=[0, self.sigma_eps], seed=self.seed_eps) # If dual signal, use the average of the FULL signal nonzero components if self.Kk_split == 0: self.eps = self.WL_scaling * sp.log(self.mu_Ss0) + self.eps_base else: self.eps = self.WL_scaling*sp.log(sp.average(self.Ss\ [self.Ss != 0])) + self.eps_base # Apply max and min epsilon value to each component self.min_eps = random_matrix( self.Mm, params=[self.mu_min_eps, self.sigma_min_eps], seed=self.seed_eps) self.max_eps = random_matrix( self.Mm, params=[self.mu_max_eps, self.sigma_max_eps], seed=self.seed_eps) self.eps = sp.maximum(self.eps, self.min_eps) self.eps = sp.minimum(self.eps, self.max_eps) # If an array of signals, replicate for each signal. if len(self.Ss.shape) > 1: self.eps = sp.tile(self.eps, [self.Ss.shape[1], 1]).T
def set_reach_dist(SetOfObjects,point_index,epsilon): ### Assumes that the query returns ordered (smallest distance first) entries ### ### This is the case for the balltree query... ### ### ...switching to a query structure that does not do this will break things! ### ### And break in a non-obvious way: For cases where multiple entries are tied in ### ### reachablitly distance, it will cause the next point to be processed in ### ### random order, instead of the closest point. This may manefest in edge cases ### ### where different runs of OPTICS will give different ordered lists and hence ### ### different clustering structure...removing reproducability. ### distances, indices = SetOfObjects.query(SetOfObjects.data[point_index], SetOfObjects._nneighbors[point_index]) ## Checks to see if there more than one member in the neighborhood ## if scipy.iterable(distances): ## Masking processed values ## unprocessed = indices[(SetOfObjects._processed[indices] < 1)[0].T] rdistances = scipy.maximum(distances[(SetOfObjects._processed[indices] < 1)[0].T],SetOfObjects._core_dist[point_index]) SetOfObjects._reachability[unprocessed] = scipy.minimum(SetOfObjects._reachability[unprocessed], rdistances) ### Checks to see if everything is already processed; if so, return control to main loop ## if unprocessed.size > 0: ### Define return order based on reachability distance ### return sorted(zip(SetOfObjects._reachability[unprocessed],unprocessed), key=lambda reachability: reachability[0])[0][1] else: return point_index else: ## Not sure if this else statement is actaully needed... ## return point_index
def Leonard_SCR_rup_width(dip, Mw, area, max_rup_width, **kwargs): """ From: Earthquake Fault Scaling; Self-Consistent relating of Rupture lenght width, average Displacement. Author: Mark Leonard parameters: area: the rupture area, km2 returns: the rupture width, km. """ # First the Length is calculated e, f = Leonard_SCR_constants(Mw) length = 10.**(e * Mw + f) # length is in km if area is None: area = Leonard_SCR_rup_area(Mw) width = area / length if max_rup_width is not None: return minimum(width, max_rup_width) else: return width
def find_largest_hole(parameters,ar): minimal_distances = [] all_pixels = sp.array(range(len(ar))) empty_pixels = all_pixels[(ar[all_pixels] == parameters.badval)] if len(empty_pixels) == 0: print "no empty pixels" return 2*sp.pi/(6*parameters.nside) print "the number of empty pixels is", len(empty_pixels) nonempty_pixels = all_pixels[(ar[all_pixels] != parameters.badval)\ & (ar[all_pixels] != parameters.unseen)] for p in empty_pixels: minimal_distance = 3.14 theta,phi = hp.pix2ang(parameters.nside,p) for p_i in nonempty_pixels: theta_i, phi_i = hp.pix2ang(parameters.nside,p_i) angular_distance = hp.rotator.angdist([theta,phi],[theta_i,phi_i]) minimal_distance = sp.minimum(minimal_distance,angular_distance) minimal_distances.append(minimal_distance) radius_of_largest_hole = sp.amax(minimal_distances) print "The angular radius of largest hole = ", radius_of_largest_hole, "rad." return radius_of_largest_hole
def _set_reach_dist(SetOfObjects, point_index, epsilon): # Assumes that the query returns ordered (smallest distance first) # entries. This is the case for the balltree query... distances, indices = SetOfObjects.query( SetOfObjects.data[point_index], SetOfObjects._nneighbors[point_index]) # Checks to see if there more than one member in the neighborhood ## if scipy.iterable(distances): # Masking processed values ## unprocessed = indices[(SetOfObjects._processed[indices] < 1)[0].T] rdistances = scipy.maximum( distances[(SetOfObjects._processed[indices] < 1)[0].T], SetOfObjects._core_dist[point_index]) SetOfObjects._reachability[unprocessed] = scipy.minimum( SetOfObjects._reachability[unprocessed], rdistances) # Checks to see if everything is already processed; # if so, return control to main loop ## if unprocessed.size > 0: # Define return order based on reachability distance ### return sorted(zip(SetOfObjects._reachability[unprocessed], unprocessed), key=lambda reachability: reachability[0])[0][1] else: return point_index else: # Not sure if this else statement is actually needed... ## return point_index
def histogram_intersection(h1, h2): # 6 us @array, 30 us @list \w 100 bins """ Calculate the common part of two histograms. The histogram intersection between two histograms \f$H\f$ and \f$H'\f$ of size \f$m\f$ is defined as \f[ d_{\cap}(H, H') = \sum_{m=1}^M\min(H_m, H'_m) \f] Attributes: - a real metric Attributes for normalized histograms: - \f$d(H, H')\in[0, 1]\f$ - \f$d(H, H) = 1\f$ - \f$d(H, H') = d(H', H)\f$ Attributes for not-normalized histograms: - not applicable Attributes for not-equal histograms: - not applicable @param h1 the first histogram, normalized, @type h1 array-like sequence @param h2 the second histogram, normalized, same bins as h1 @type h2 array-like sequence @return histogram intersection @rtype float """ h1, h2 = __prepare_histogram(h1, h2) return scipy.sum(scipy.minimum(h1, h2))
def llfun(act, pred): epsilon = 1e-15 pred = sp.maximum(epsilon, pred) pred = sp.minimum(1 - epsilon, pred) ll = sum(act * sp.log(pred) + sp.subtract(1, act) * sp.log(sp.subtract(1, pred))) ll = ll * -1.0 / len(act) return ll
def num_segregating_sites(gene_matrix): """ Input snp matrix Returns the raw number of segregating sites (polymorphic sites). Sum over collumns, if sum != 0 or sum != nrow(matrix) : segregating site """ from collections import OrderedDict #for i in len(gene_matrix.shape[0] - 1): #gene_matrix = numpy.delete(gene_matrix, (0), axis=0) freqs = sp.mean(gene_matrix, 0) mafs = sp.minimum(freqs, 1 - freqs) # Filtering very rare variants? # maf_filter = mafs > 0.001 # mafs = mafs[maf_filter] sum_list = mafs * gene_matrix.shape[0] data = [float(Decimal("%.2f" % e)) for e in sum_list] SFS = Counter(data) del SFS[0.0] # all fixed values total = sum(SFS.values(), 0.0) SFS_freq = {k: v / total for k, v in SFS.items()} SFS_counts = {k: v for k, v in SFS.items()} SFS_counts = dict(sorted(SFS_counts.items())) return SFS_counts
def findnext(self): if self.nsam==0: return [0.5*(sp.matrix(self.upper)+sp.matrix(self.lower)),0] if self.finished: raise StandardError("opt is finished") self.cc=0 fudge=2. EIwrap= lambda x,y : (-self.evalWEI(sp.matrix(x)),0) [x,EImin,ierror]=DIRECT.solve(EIwrap,self.lower,self.upper,user_data=[],algmethod=1,maxf=4000) while self.cc==0 and fudge<=self.fudgelimit: print "non nonzero eis found over full range. trying closer to current min with lengthfactor: "+str(fudge) u=sp.matrix(self.upper) l=sp.matrix(self.lower) dia=u-l lw=sp.maximum(l,self.best[0]-dia/fudge) up=sp.minimum(u,self.best[0]+dia/fudge) [x,EImin,ierror]=DIRECT.solve(EIwrap,lw,up,user_data=[],algmethod=1,maxf=4000) fudge*=2. print "nonzero EIs: " +str(self.cc) if self.cc==0: print "done. no nonzero EIs" self.finished=True #raise StandardError("opt is finished") return [self.best[0],0.] return sp.matrix(x),-EImin
def binary_logloss(p, y): epsilon = 1e-15 p = sp.maximum(epsilon, p) p = sp.minimum(1 - epsilon, p) res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p))) res *= -1.0 / len(y) return res
def set_reach_dist(SetOfObjects, point_index, epsilon): """ Sets reachability distance and ordering. This function is the primary workhorse of the OPTICS algorithm. SetofObjects: Instantiated and prepped instance of 'setOfObjects' class epsilon: Determines maximum object size that can be extracted. Smaller epsilons reduce run time. (float) """ row = [SetOfObjects.data[point_index, :]] indices = np.argsort(row) distances = np.sort(row) if scipy.iterable(distances): unprocessed = indices[(SetOfObjects._processed[indices] < 1)[0].T] rdistances = scipy.maximum( distances[(SetOfObjects._processed[indices] < 1)[0].T], SetOfObjects._core_dist[point_index]) SetOfObjects._reachability[unprocessed] = scipy.minimum( SetOfObjects._reachability[unprocessed], rdistances) if unprocessed.size > 0: return unprocessed[np.argsort( np.array(SetOfObjects._reachability[unprocessed]))[0]] else: return point_index else: return point_index
def entropyloss(act, pred): epsilon = 1e-15 pred = sp.maximum(epsilon, pred) pred = sp.minimum(1-epsilon, pred) el = sum(act*sp.log10(pred) + sp.subtract(1,act)*sp.log10(sp.subtract(1,pred))) el = el * -1.0/len(act) return el
def binary_logloss(p, y): epsilon = 1e-15 p = sp.maximum(epsilon, p) p = sp.minimum(1-epsilon, p) res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p))) res *= -1.0/len(y) return res
def plotgpsonly(TEClist,gpslist,plotdir,m,ax,fig,latlim,lonlim): """ Makes a set of plots when only gps data is avalible.""" maxplot = len(gpslist) strlen = int(sp.ceil(sp.log10(maxplot))+1) fmstr = '{0:0>'+str(strlen)+'}_' plotnum=0 for gps_cur in gpslist: gpshands = [] gpsmin = sp.inf gpsmax = -sp.inf for igpsn, (igps,igpslist) in enumerate(zip(TEClist,gps_cur)): print('Plotting GPS data from rec {0} of {1}'.format(igpsn,len(gps_cur))) # check if there's anything to plot if len(igpslist)==0: continue (sctter,scatercb) = scatterGD(igps,'alt',3.5e5,vbounds=[0,20],time = igpslist,gkey = 'vTEC',cmap='plasma',fig=fig, ax=ax,title='',cbar=True,err=.1,m=m) gpsmin = sp.minimum(igps.times[igpslist,0].min(),gpsmin) gpsmax = sp.maximum(igps.times[igpslist,0].max(),gpsmax) gpshands.append(sctter) scatercb.set_label('vTEC in TECu') #change he z order print('Ploting {0} of {1} plots'.format(plotnum,maxplot)) plt.savefig(os.path.join(plotdir,fmstr.format(plotnum)+'GPSonly.png')) plotnum+=1 for i in reversed(gpshands): i.set_zorder(i.get_zorder()+1)
def evaluate_ll(y, yhat): epsilon = 1e-15 yhat = sp.maximum(epsilon, yhat) yhat = sp.minimum(1-epsilon, yhat) ll = sum(y*sp.log(yhat) + sp.subtract(1,y)*sp.log(sp.subtract(1,yhat))) ll = ll * -1.0/len(y) return ll
def logloss(act, pred): epsilon = 1e-15 pred = sp.maximum(epsilon, pred) pred = sp.minimum(1-epsilon, pred) ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred))) ll = ll * -1.0/len(act) return ll
def plot_dist_grid(length=20,start=[(0,0)],n=1,metric='euclidean',interp=None,cmap=None,cbar=False,title='Distance to Closest Point',xlabel="X Coord",ylabel="Y Coord"): # Create array of x and y coordinates x_array = sp.zeros((length,length)) + sp.arange(length) y_array = sp.zeros((length,length)) + sp.expand_dims(sp.arange(length),length) coords = zip(x_array.ravel(),y_array.ravel()) # Iterate over coords to calculate distance from 'start' minima = [] for i in range(len(start)): val = distance.cdist([start[i]], coords, metric).reshape(length,length) if i == 0: minima = sp.copy(val) # Assume all are minimums else: minima = sp.minimum(minima,val) # Take smaller # Create plot from 'minima' array fig, ax = plt.subplots() cax = ax.imshow(minima,interpolation=interp,cmap=cmap) if cbar: cbar = fig.colorbar(cax, ticks=[range(int(sp.amax(minima)))]) ax.set_title(title) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) fig.savefig('figure' + str(n) + '.pdf')
def logloss(act, pred): epsilon = 1e-4 pred = sp.maximum(epsilon, pred) pred = sp.minimum(1-epsilon, pred) ll = -1.0/len(act) * sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred))) return ll
def _set_reach_dist(setofobjects, point_index, epsilon): # Assumes that the query returns ordered (smallest distance first) # entries. This is the case for the balltree query... dists, indices = setofobjects.query(setofobjects.data[point_index], setofobjects._nneighbors[point_index]) # Checks to see if there more than one member in the neighborhood ## if sp.iterable(dists): # Masking processed values ## # n_pr is 'not processed' n_pr = indices[(setofobjects._processed[indices] < 1)[0].T] rdists = sp.maximum(dists[(setofobjects._processed[indices] < 1)[0].T], setofobjects.core_dists_[point_index]) new_reach = sp.minimum(setofobjects.reachability_[n_pr], rdists) setofobjects.reachability_[n_pr] = new_reach # Checks to see if everything is already processed; # if so, return control to main loop ## if n_pr.size > 0: # Define return order based on reachability distance ### return n_pr[sp.argmin(setofobjects.reachability_[n_pr])] else: return point_index
def logloss(act, pred): epsilon = 1e-6 pred = sp.maximum(epsilon, pred) pred = sp.minimum(1-epsilon, pred) #print np.mean(pred) ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred))) ll = ll * -1.0/len(act) return ll
def logloss(self, act, pred): epsilon = 1e-15 pred = sp.maximum(epsilon, pred) pred = sp.minimum(1-epsilon, pred) pred[pred >= 1] = 0.9999999 ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred))) ll = ll * -1.0/len(act) return ll
def llfun(act, pred): p_true = pred[:, 1] epsilon = 1e-15 p_true = sp.maximum(epsilon, p_true) p_true = sp.minimum(1 - epsilon, p_true) ll = sum(act * sp.log(p_true) + sp.subtract(1, act) * sp.log(sp.subtract(1, p_true))) ll = ll * -1.0 / len(act) return ll
def log_loss(act, pred): epsilon = 1e-15 pred = sp.maximum(epsilon, pred) pred = sp.minimum(1 - epsilon, pred) ll = sum(act * sp.log(pred.astype(float)) + sp.subtract(1, act.astype(float)) * sp.log( sp.subtract(1, pred.astype(float)))) ll = ll * -1.0 / len(act) return ll