def expand(self): node_cpy = dp(self.config) config_lst = [] pos = 0 for i in range(9): if node_cpy[i] == 0: pos = i break for add in [-3, -1, 1, 3]: new_pos = pos + add col = pos % 3 if col == 2 and add == 1: continue if col == 0 and add == -1: continue if -1 < new_pos < 9: nxt_config = dp(node_cpy) nxt_config[pos], nxt_config[new_pos] = nxt_config[new_pos], nxt_config[pos] config_lst.append(Node(nxt_config)) return config_lst
def wSAT(self, k13,sym1, p=0.5, max_flips=10000): k4=dp(k13) d = dict([(s, random.choice([True, False])) for s in sym1]) for i in range(max_flips): satisfied, unsatisfied = [], [] for a1 in k4: if self.kb_conv(a1,d): satisfied.append(a1) else: unsatisfied.append(a1) if not unsatisfied: print("Satisfiable") print("Model = ", d , file=sys.stderr) return a1 = random.choice(unsatisfied) if random.random()> p: sym2 = random.choice(self.symbols(a1.split(" "))) else: sym11 = [] l = [] for i in self.symbols(a1.split(" ")): sym11.append(i) sym11 = list(set(sym11)) for i in range(len(sym11)): j = 0 d1 = dp(d) d1[sym11[i]] = not d1[sym11[i]] for a2 in k4: if self.kb_conv(a2,d1): j += 1 l.append(j) sym2 = sym11[l.index(max(l))] d[sym2] = not d[sym2] print("Unsatisfiable")
def thr3(Sr, Ntilde, sigma, dof, pvalue, J): import scipy.special gm = np.zeros((J)) g1 = thr2(Sr, sigma, dof, pvalue, J) gammaValue = np.sqrt(2) * scipy.special.gamma( (dof + 1.) / 2.) / scipy.special.gamma(dof / 2.) gstd = np.sqrt(dof - gammaValue**2) prodN = norm_rec(np.dot(Ntilde, np.diag(Sr)), J) for j in range(J): if np.sum(prodN[:, j] > g1[j]) > 1: print(np.sum(prodN[:, j] > g1[j])) prodF = dp(prodN[prodN[:, j] > g1[j]]) sigma_n = mad(prodF) sigmaF = (float(sigma_n)) / float(gstd) gm[j] = np.maximum(stats.chi.ppf(pvalue, dof, scale=sigmaF), g1[j]) else: gm[j] = dp(g1[j]) return (gm)
def Denoise_FBS(X,Yin=None,nmax=100,kmad=3,tol=1e-6,gamma=0.5,J=3,verb=0,L0=False,Fixed=None,wscale=None,WithRef=None): """ Solves min_{Y in Sn} lambda ||F_Sn(Y)||_1 + 0.5*||X - Y||_F^2 """ if Yin is not None: Y = dp(Yin) else: Y = dp(X) Go_On = 1 it = 0 dtol = 1. L = 1. alpha = gamma/L Yold = dp(Y) f = [] while Go_On: it += 1 if it > nmax: Go_On = 0 if dtol < tol: Go_On = 0 # Compute the gradient / gradient step dg = X-Y Y = Y + alpha*dg # The update could also be done on the hypersphere - should not change # Projecting onto the hypersphere mY = np.maximum(0,1e-32+np.sqrt(np.sum(Y**2,axis=0))) # Keep the modulus constant Y = Y/mY # Thresholding if Fixed is None: thf = None else: thf = alpha*Fixed Y = Threshold_Sn(Y,kmad=kmad,J=J,L0=L0,Fixed=thf,wscale=wscale,WithRef=Yin) #mY = np.maximum(0,1e-32+np.sqrt(np.sum(Y**2,axis=0))) # Keep the modulus constant #Y = Y/mY # convergence criterion #dtol = np.mean(abs(np.arccos(np.minimum(1.,np.sum(Y*Yold,axis=0))))) # Angular variation dtol = np.linalg.norm(Y-Yold)/np.linalg.norm(Yold) Yold = dp(Y) f.append(dtol) if verb: print("It. #",it," - dtol = ",dtol) return Y,f
def TTCheck(self,kb3,al3,sym1): kb4=dp(kb3) al4=dp(al3) sym4 = dp(sym1) l = self.ttval(len(sym4)) d={} mod = [] kl=1 for i in l: for m in range(len(i)): d[sym4[m]]=i[m] t1 = True t2 = True for i1 in kb4.split(","): t1 = t1 & self.KB_conv(i1,d) for i2 in al4.split(","): t2 = t2 & self.KB_conv(i2,d) if t1: if not t2: kl=0 break else: l=dp(d) mod.append(l) if kl==0: print(al4, "cannot be inferred") else: print(al4, " can be inferred") i=0 for j in mod: print("Model",i+1,":",j,file=sys.stderr) i +=1
def threshold_finalstep(Option, sigma, perc, Si, Ai, Arefi, X, dof, Weights, stepg=.8, pvalue=.996, eps=1e-3, J=2): """ Option : 1 : Threshold computed on the MAD operator of the norm of the propagated noise 2 : Threshold based on the statistic of the inupt noise 3 : Threshold based on the MAD operator of the residual of the noise over the noise-dependent threshold """ (nb_obs, nb_pix, nb_sources) = np.shape(Ai) seuil, ww = threshold_interm(Option, sigma, Si, Ai, Arefi, X, dof, Weights, eps, stepg, pvalue, J) for H in range(nb_sources): norme = norm_rec(Ai[:, :, H] - Arefi[:, :, H], J) seuilT = dp(seuil[:, :, H]) for j in range(J): normeR = norme[:, j] seuil_i = (seuilT[:, j]) if Weights: indNZ = np.where(abs(normeR) - seuil_i / ww[:, j, H] > 0)[0] else: indNZ = np.where(abs(normeR) - seuil_i > 0)[0] if len(indNZ) == 0: seuil[:, j, H] = dp(seuil_i) print('no elt detected') else: I = abs(normeR[indNZ]).argsort()[::-1] Kval = np.int(np.floor(perc * len(indNZ))) if Kval > len(I) - 1 or I[Kval] > len(indNZ) - 1: seuil[:, j, H] = dp(seuil_i) print('threshold source-dpdt only') else: print('threshold based on the nbr of coefs', Kval, len(indNZ)) IndIX = np.int(indNZ[I[Kval]]) thr = abs(normeR[IndIX]) if Weights: seuil[:, j, H] = ww[:, j, H] * dp(thr) else: seuil[:, j, H] = dp(thr) return (seuil)
def lasso_direct(X, Ain, Sin, kend=3, stepgg=1., resol=3, lim=2e-6): """ FISTA with sources sparse in starlets """ S = dp(Sin) A = dp(Ain) n = np.shape(S)[0] (m, h, n) = np.shape(A) ee = np.zeros((h)) for k in range(h): ee[k] = LA.norm(np.dot(A[:, k, :].T, A[:, k, :]), ord=2) L = np.max(ee) convS_sub = 1 t_ = 1 y = S.copy() while convS_sub > lim: S_old = S.copy() diff = np.zeros((n, h)) for k in range(h): diff[:, k] = np.dot( A[:, k, :].T, np.reshape((X - np.sum(A * y.T, axis=2))[:, k], (m, 1)))[:, 0] prod = y + stepgg / L * diff for i in range(n): S[i, :] = seuillage(prod[i, :], diff[i, :] * stepgg / L, kend) t = (1. + np.sqrt(1 + 4 * (t_)**2)) / 2. y = S + (t_ - 1.) / (t) * (S - S_old) t_ = t convS_sub = np.linalg.norm(S_old - S) / np.linalg.norm(S) print(convS_sub) return (S)
def Sources(t ,w1, ptot,ptot1=.2, dynamic=2, n=2, J=3, Opt=1 , w2=1): ''' Creation of the sources, exactly sparse in DCT. Output: a n*t matrix, sparse in DCT. ''' if Opt==1: Sw=np.zeros((n, t, J+1)) for l in range(J): S=np.zeros((n,t)) X,X0,A0,S,N,sigma_noise,kern=bssJ.Make_Experiment_Coherent(t_samp=t,ptot=ptot,w=w1,dynamic=dynamic) Sw[:,:, l]=dp(S) X,X0,A0,S,N,sigma_noise,kern=bssJ.Make_Experiment_Coherent(t_samp=t,ptot=ptot1,w=w2,dynamic=dynamic) Sw[:,:,-1]=dp(S) Su=ps.backward1d(Sw) else: Sw=np.zeros((n, t, J+1)) for l in range(J): S=np.zeros((n,t)) X,X0,A0,S,N,sigma_noise,kern=bssJ.Make_Experiment_Coherent(t_samp=t,ptot=ptot,w=w1,dynamic=dynamic) Sw[:,:, l]=dp(S) Su=ps.backward1d(Sw) return Su
def OPT_Rand(self, id, pos): import random from copy import deepcopy as dp cnt = 0 tmp = dp(self.psoRoute) flag = False while(cnt < 100): s, f = random.randint(0, len(tmp) - 1), random.randint(0, len(tmp) - 1) tmp[s], tmp[f] = tmp[f], tmp[s] fitNewRoute, routesNewRoute = self.calcFitnessPsoRoute(tmp) if(fitNewRoute < self.fitness): # self.psoRoute = dp(tmp) self.fitness = fitNewRoute # self.routes = dp(routesNewRoute) flag = True cnt = 0 else: tmp[s], tmp[f] = tmp[f], tmp[s] cnt += 1 if(flag): self.psoRoute = dp(tmp) # fitNewRoute, routesNewRoute = self.calcFitnessPsoRoute(tmp) # self.fitness = fitNewRoute self.fitness, self.routes = self.calcFitnessPsoRoute(tmp) return flag
def main(): color_dict = {} color_dict['All_variants'] = "#fa6525" color_dict['GWAS_only'] = "#00b8a5" df = pd.read_csv("snp_data.tsv", sep="\t") target = "HbF" df = df.fillna(0) mu = df[target].mean() sigma = df[target].std() print(mu, sigma) df['label'] = [define_high_low(x, mu, sigma) for x in df[target]] df = df[df['label'] >= 0] Y = df['label'] print(Y[Y == 0].shape) print(Y[Y == 1].shape) X = df.drop([target, 'label'], axis=1) sel_columns = [] for c in X.columns: if not "chr" in c: sel_columns.append(c) X1 = X[sel_columns] model, params = sklearn_RF() auROC_list_a = [] auROC_list_b = [] auPRC_list_a = [] auPRC_list_b = [] df_list = [] for i in range(100): sample_index = X.sample(frac=1).index.tolist() ddf, a, c = simple_CV_evaluation(model, params, X.loc[sample_index], Y.loc[sample_index]) ddf.to_csv("%s_prediction.csv" % ("ALL"), index=False) plot_top_features(dp(model), X, Y, "ALL") ddf['label'] = "All_variants" auROC_list_a += a auPRC_list_a += c ddf2, b, d = simple_CV_evaluation(model, params, X1.loc[sample_index], Y.loc[sample_index]) ddf2.to_csv("%s_prediction.csv" % ("GWAS"), index=False) plot_top_features(dp(model), X1, Y, "GWAS") ddf2['label'] = "GWAS_only" df_list.append(ddf) df_list.append(ddf2) auROC_list_b += b auPRC_list_b += d plot_df = pd.concat(df_list) plot_auROC_multi(plot_df, color_dict) plot_auPRC_multi(plot_df, color_dict) boxplot_paired_t_test(auROC_list_a, auROC_list_b, color_dict, "Area under ROC curve", "auROC_boxplot") boxplot_paired_t_test(auPRC_list_a, auPRC_list_b, color_dict, "Area under Precision-Recall curve", "auPRC_boxplot")
def OPT_Rand(self, id, pos, itera=-1, totItera=-1): import random from copy import deepcopy as dp cnt = 0 tmp = dp(self.psoRoute) flag = False prob = itera / float(totItera) while (cnt < 100): s, f = random.randint(0, len(tmp) - 1), random.randint( 0, len(tmp) - 1) tmp[s], tmp[f] = tmp[f], tmp[s] fitNewRoute, routesNewRoute = self.geraRotas(tmp) if (itera != -1): aux = random.uniform(0, 1) flag2 = aux > prob else: flag2 = False if (flag2 or fitNewRoute < self.fitness): # self.psoRoute = dp(tmp) self.fitness = fitNewRoute # self.routes = dp(routesNewRoute) flag = True cnt += 1 else: tmp[s], tmp[f] = tmp[f], tmp[s] cnt += 1 if (flag): self.psoRoute = dp(tmp) # fitNewRoute, routesNewRoute = self.calcFitnessPsoRoute(tmp) # self.fitness = fitNewRoute self.fitness, self.routes = self.geraRotas(tmp) return flag
def thr3(Sr, Ntilde, sigma, dof, pvalue): import scipy.special g1 = thr2(Sr, sigma, dof, pvalue) gammaValue = np.sqrt(2) * scipy.special.gamma( (dof + 1.) / 2.) / scipy.special.gamma(dof / 2.) gstd = np.sqrt(dof - gammaValue**2) prodN = LA.norm(ft.dct(np.dot(Ntilde, np.diag(Sr)), axis=1, norm='ortho'), axis=0) if np.sum(prodN > g1) > 1: print(np.sum(prodN > g1)) prodF = dp(prodN[prodN > g1]) sigma_n = mad(prodF) sigmaF = (float(sigma_n)) / float(gstd) gm = np.maximum(stats.chi.ppf(pvalue, dof, scale=sigmaF), g1) else: gm = dp(g1) return (gm)
def kb_conv(self,kb2,dict1): d1 = dp(dict1) k2 =dp(kb2) if len(k2.split())>3: if "<>" in k2.split(): l = k2.split("<>") return self.comp(self.kb_conv(l[0],d1),self.kb_conv(l[1],d1),"<>") if "->" in k2.split(): l = k2.split("->") return self.comp(self.kb_conv(l[0],d1),self.kb_conv(l[1],d1),"->") if "^" in k2.split(): l = k2.split("^",1) return self.comp(self.kb_conv(l[0],d1),self.kb_conv(l[1],d1),"^") if "v" in k2.split(): l = k2.split("v",1) return self.comp(self.kb_conv(l[0],d1),self.kb_conv(l[1],d1),"v") else: k3 = k2.split() if len(k3) ==3: return self.comp(d1[k3[0]],d1[k3[2]],k3[1]) elif len(k3) == 2: return (not d1[k3[1]]) elif len(k3) == 1: return d1[k3[0]] else: print("Error empty clause" , file=sys.stderr)
def merge_send(self, channel, chanel2): """ Send own process data to another process and suicide. """ channel.put(dp(self.qubits)) channel.put(dp(self.qubit)) chanel2.put(dp(self.qubits)) return
def give_statevector(self, channel): """ Sends the Qubit IDs and their state vectors over a channel. Args: channel (Queue): Channel to return the requested data to. """ channel.put((dp(self.qubits), dp(self.qubit)))
def seuillage(Sini, diffi, K): S_ = dp(Sini) grad_ = dp(diffi) S_ = softThres(S_, K * mad(grad_)) return (S_)
def BSSEval(A0, S0, gA, gS): import numpy as np from copy import deepcopy as dp na = np.shape(A0) n = na[1] ns = np.shape(S0) t = ns[1] A, S = CorrectPerm(A0, S0, gA, gS) s_target = np.zeros((n, t)) e_interf = dp(s_target) e_noise = dp(s_target) e_artif = dp(s_target) s_target = np.dot( np.dot(np.diag(1. / np.diag(np.dot(S0, S0.T))), np.diag(np.diag(np.dot(S0, S.T)))), S0) Ps = np.dot(np.dot(np.linalg.inv(np.dot(S0, S0.T)), np.dot(S0, S.T)), S0) e_interf = Ps - s_target Psn = dp(Ps) SNR = 1e24 e_artif = S - Psn SDR = 10 * np.log10( np.linalg.norm(s_target, ord='fro')**2 / (np.linalg.norm(e_interf, ord='fro')**2 + np.linalg.norm( e_artif, ord='fro')**2 + np.linalg.norm(e_noise, ord='fro')**2)) SIR = 10 * np.log10( np.linalg.norm(s_target, ord='fro')**2 / (np.linalg.norm(e_interf, ord='fro')**2)) SAR = 10 * np.log10( (np.linalg.norm(s_target, ord='fro')**2 + np.linalg.norm( e_interf, ord='fro')**2 + np.linalg.norm(e_noise, ord='fro')**2) / (np.linalg.norm(e_interf, ord='fro')**2)) sRES = np.zeros((n, 3)) for r in range(n): sRES[r, 0] = 10 * np.log10( np.linalg.norm(s_target[r, :], ord=2)**2 / (np.linalg.norm(e_interf[r, :], ord=2)**2 + np.linalg.norm(e_artif[r, :], ord=2)**2 + np.linalg.norm(e_noise[r, :], ord=2)**2)) sRES[r, 1] = 10 * np.log10( np.linalg.norm(s_target[r, :], ord=2)**2 / (np.linalg.norm(e_interf[r, :], ord=2)**2)) sRES[r, 2] = 10 * np.log10((np.linalg.norm(s_target[r, :], ord=2)**2 + np.linalg.norm(e_interf[r, :], ord=2)**2 + np.linalg.norm(e_noise[r, :], ord=2)**2) / (np.linalg.norm(e_interf[r, :], ord=2)**2)) return SDR, SAR, SIR, sRES
def Inpaint_Sn(b,kend = 3,mask=None,nmax=100,J=3,gamma=1,tol=1e-6,xinit=None,xref=None,verb=0): # # x belongs to Sn and is sparse in the manifold-based starlet domain # # Initialize useful parameters nb = np.shape(b) x = prox_Sn((1.-mask)*np.random.rand(nb[0],nb[1],nb[2]) + mask*b) if xinit != None: x = dp(xinit) xold = dp(x) tk = 1. Go_On = True it = 0 # Main loop while Go_On: it += 1 # Compute the gradient of the data fidelity term g = mask*(x - b) # Project onto Sn xp_half = prox_Sn(x - gamma*g) xp = prox_StarletSn(xp_half,kmad=kend,W=None,xref=xref,J=J) # Update x tkp = 0.5*(1 + np.sqrt(1 + 4*tk**2)) x = xp + (tk - 1)/tkp*(xp - xold) tkp = dp(tk) d_x = abs(np.sum(np.sum(xp*xold,axis=0))/(32.**2)-1) #-- better adapted to signals that belong to S1 #d_x = np.linalg.norm(xp - xold)/np.linalg.norm(xold) if d_x < tol: Go_On = False if it > nmax: Go_On = False if verb: print('It. #: ',it,' - Relative variation: ',d_x) xold = dp(xp) return x
def seuillage_weights(Sini, diffi, K): S_ = dp(Sini) grad_ = dp(diffi) thr = K * mad(grad_) WS = thr / (thr + np.abs(S_)) S_ret = softThres(S_, WS * thr) return (S_ret)
def Inpaint_FBS_Rn(X,Mask=None,Yin=None,nmax=100,kmad=3,tol=1e-6,gamma=0.5,J=3,verb=0,L0=False,Fixed=None): """ Solves min_{Y in Sn} lambda ||F_Sn(Y)||_1 + 0.5*||X - Y||_F^2 """ if Yin is not None: Y = dp(Yin) else: Y = dp(X) Go_On = 1 it = 0 dtol = 1. L = 1. alpha = gamma/L Yold = dp(Y) f = [] while Go_On: it += 1 if it > nmax: Go_On = 0 if dtol < tol: Go_On = 0 # Compute the gradient / gradient step if Mask is None: dg = X-Y else: dg = Mask*(X-Mask*Y) Y = Y + alpha*dg # The update could also be done on the hypersphere - should not change # Thresholding if Fixed is None: thf = None else: thf = alpha*Fixed Y = Threshold_Rn(Y,kmad=kmad,J=J,L0=L0,Fixed=thf) # convergence criterion #dtol = np.mean(abs(np.arccos(np.minimum(1.,np.sum(Y*Yold,axis=0))))) # Angular variation dtol = np.linalg.norm(Y-Yold)/np.linalg.norm(Yold) Yold = dp(Y) f.append(dtol) if verb: print("It. #",it," - dtol = ",dtol) return Y,f
def symbols(self,ja1,jb1): ja2 = dp(ja1) jb2 = dp(jb1) sm =[] for a1 in ja2: if a1 not in {",","^","v","->","<>","!"," ",'',"|="}: sm.append(a1) for b1 in jb2: if b1 not in {",","^","v","->","<>","!"," ",'',"|="}: sm.append(b1) return list(set(sm))
def merge_send(self, channel, channel2): """ Send own process data to another process and suicide. Args: channel(Queue): Channel to send own data to other Qubit Thread. channel2(Queue): Channel to send qubit ids to parent, to update the qubit ids in its dictionary. """ channel.put(dp(self.qubits)) channel.put(dp(self.qubit)) channel2.put(dp(self.qubits)) return
def Denoise_Sn(b,kend = 3,nmax=100,J=3,gamma=1,tol=1e-6,xinit=None,verb=0): # Initialize useful parameters x = prox_Sn(dp(b)) if xinit != None: x = dp(xinit) xold = dp(x) tk = 1. Go_On = True it = 0 # Main loop while Go_On: it += 1 # Compute the gradient of the data fidelity term g = x - b # Project onto Sn xp_half = prox_Sn(x - gamma*g) xp = prox_StarletSn(xp_half,kmad=kend,W=None,J=J) # Update x tkp = 0.5*(1 + np.sqrt(1 + 4*tk**2)) x = xp + (tk - 1)/tkp*(xp - xold) tkp = dp(tk) d_x = abs(np.max(np.sum(xp*xold,axis=0))-1) #-- better adapted to signals that belong to S1 #d_x = np.linalg.norm(xp - xold)/np.linalg.norm(xold) if d_x < tol: Go_On = False if it > nmax: Go_On = False if verb: print('It. #: ',it,' - Relative variation: ',d_x) xold = dp(xp) return x
def Prox_Inp(b,mask,nmax=100,J=2,k_mad=3,tol=1e-4): import numpy as np from copy import deepcopy as dp x = dp(b) y = dp(x) L = 1 tk = 1 Go_On = 1 it = 0 while Go_On: it += 1 # -- computation of the gradient g = - (b - mask*y) # -- gradient descent x_half = y - 1/L*g # -- thresholding / or applying mask c,w = Starlet_Forward(x=x_half,J=J) for s in range(0,J): thrd = k_mad*mad(w[:,:,s]) w[:,:,s] = (w[:,:,s] - thrd*np.sign(w[:,:,s]))*(abs(w[:,:,s]) > thrd) xp = Starlet_Inverse(c=c,w=w) tkp = 0.5*(1 + np.sqrt(1 + 4*tk*tk)) y = xp + (tk-1)/tkp * (xp - x) d_iff = np.linalg.norm(xp - x)/(1e-12 + np.linalg.norm(xp)) if d_iff < tol: Go_On = 0 if it > nmax: Go_On = 0 x = dp(xp) tk = dp(tkp) return xp
def OPT_Inter(self, id, pos, itera=-1, totItera=-1): import random from copy import deepcopy as dp import networkx as nx import matplotlib.pyplot as plt n = len(self.routes) # print("Fitness agora: {}".format(self.fitness)) flag = True ultraFlag = False prob = itera / float(totItera) while (flag): flag = False for x in range(n): for y in range(x + 1, n): if (x == y): continue a = dp(self.routes[x][0]) capA = self.routes[x][1] b = dp(self.routes[y][0]) capB = self.routes[y][1] for i in range(1, len(a) - 1): dA = self.demanda_clientes[a[i] - 1] for j in range(1, len(b) - 1): dB = self.demanda_clientes[b[j] - 1] if (capA - dA + dB <= self.capacidade_max and capB - dB + dA <= self.capacidade_max): fitA = self.calcFitnessOneRoute(a) fitB = self.calcFitnessOneRoute(b) a[i], b[j] = b[j], a[i] fitAA = self.calcFitnessOneRoute(a) fitBB = self.calcFitnessOneRoute(b) if (itera != -1): tmp = random.uniform(0, 1) flag2 = tmp > prob else: flag2 = False if (flag2 or fitAA + fitBB < fitA + fitB): self.fitness = self.fitness - ( (fitA + fitB) - (fitAA + fitBB)) self.psoRoute[a[i] - 1], self.psoRoute[ b[j] - 1] = self.psoRoute[ b[j] - 1], self.psoRoute[a[i] - 1] capA = capA - dA + dB capB = capB - dB + dA self.routes[x] = (a, capA) self.routes[y] = (b, capB) flag = True ultraFlag = True else: a[i], b[j] = b[j], a[i] return ultraFlag
def successors(self): node_cpy = dp(self.config) config_lst = [] for i, key in enumerate(keys): new_config = dp(node_cpy) for j, x in enumerate(key): new_config[j] += x new_config[j] = min(new_config[j], 0) config_lst.append(Node(new_config)) return config_lst
def Denoise_FBS_Rn(X,Yin=None,nmax=100,kmad=3,tol=1e-6,gamma=0.5,J=3,verb=0,L0=False,Fixed=None,wscale=None): """ Solves min_{Y in Sn} lambda ||F_Sn(Y)||_1 + 0.5*||X - Y||_F^2 """ if Yin is not None: Y = dp(Yin) else: Y = dp(X) Go_On = 1 it = 0 dtol = 1. L = 1. alpha = gamma/L Yold = dp(Y) f = [] while Go_On: it += 1 if it > nmax: Go_On = 0 if dtol < tol: Go_On = 0 # Compute the gradient / gradient step dg = X-Y Y = Y + alpha*dg # The update could also be done on the hypersphere - should not change # Thresholding if Fixed is None: thf = None else: thf = alpha*Fixed Y = Threshold_Rn(Y,kmad=kmad,J=J,L0=L0,Fixed=thf,wscale=wscale) # convergence criterion dtol = np.linalg.norm(Y-Yold)/np.linalg.norm(Yold) Yold = dp(Y) f.append(dtol) if verb: print("It. #",it," - dtol = ",dtol) return Y,f
def seuillage_star(Sini, diffi, K, res, t, eps=1e-3): Si = dp(Sini) diff = dp(diffi) S_ = ps.forward1d(Si.reshape(1, t), J=res) grad_ = ps.forward1d(diff.reshape(1, t), J=res) ww = np.zeros((t)) for j in range(res): thr = K * mad(grad_[0, :, j]) valmax = np.max(abs(S_[0, :, j])) ww = eps / (eps + abs(S_[0, :, j]) / valmax) S_[0, :, j] = softThres(S_[0, :, j], thr * ww) S_ret = ps.backward1d(S_.reshape(1, t, res + 1))[0, :] return (S_ret)
def sc(train, test, mark=None): train = dp(train) #deepcopy test = dp(test) if not mark: pass else: train.x = train.x[mark] test.x = test.x[mark] s = StandardScaler().fit(train.x) train.x = s.transform(train.x) test.x = s.transform(test.x) # train.x=train.x.get_values() # test.x=test.x.get_values() return train, test
def get_message(message, user, level=None, guild_id=None): global messages_cache try: if message not in messages_cache: with open(MESSAGES_PATH, "r") as f: messages_cache = json.load(f) msg = dp(messages_cache[message]) msg = msg.replace("{user}", user.mention) if level is not None: msg = msg.replace("{level}", level) if guild_id is not None: warns = get_warns(user.id, guild_id) if warns is None: warns = 0 msg = msg.replace("{warns}", str(warns)) return msg except: traceback.print_exc() return None
def train(self, errorRate): realRate = 999 J1 = PosIf J2 = 0 ptr = 0 times = 0 while realRate >= errorRate: trainSetX = [] trainSetY = [] for i in xrange(self.k): if ptr + i >= self.m: ptr -= self.m trainSetX.append(self.x[ptr+i]) trainSetY.append(self.y[ptr+i]) ptr += self.k tmpParam = dp(self.param) J2 = 0 for j in xrange(len(self.param)): sumX = 0 for i in xrange(self.k): cost = 0 for t in xrange(len(trainSetX[i])): cost += trainSetX[i][t] * self.param[t] cost -= trainSetY[i] if j == 0: J2 += cost ** 2 sumX += cost * trainSetX[i][j] sumX /= self.k tmpParam[j] = tmpParam[j] - self.alfa * 50000 / (times + 50000) * sumX self.param = tmpParam if J1 != PosIf: realRate = abs(J1 - J2) / J2 J1 = J2 times += 1
def paint(src, tgt=None): """ paint the parameters values of a source onto the target network. If the target is None, a new network is automatically instantiated, otherwise the call must be sure the two are homogenuous. src, tgt: source and target networks. """ if tgt is None: return dp(src) # the stack to hold recursive networks stk = [(src, tgt)] while len(stk) > 0: # pop up the networks at the top s, d = stk.pop() # do a shallow painting s.__pcpy__(d) # push in child networks stk.extend(zip(s, d)) # dummy return return tgt
def main(): n = 10 _num = [] length = 0 while 1: num = solve(n) if len(num) == 4 and num != _num: _num = dp(num) length += 1 print 'length =', length print n else: length = 0 if length == 4: break n += 1 print '' print n - 3
def change(n): s = str(n) length = len(s) for i in xrange(length - 1): for k in xrange(1, length - i): for j in xrange(1, 10): nums = [] _s = dp(s) _n = int(s[:i] + str(i) + s[i+1:k] + str(i) + s[k+1:]) if isprime_2(_n): nums.append(_n) if len(nums) > 1: print len(nums) if len(nums) == 8: return min(nums) else: continue return False
def test_sorted(self): i = '1324756890' correct = '0123456789' j = dp(i) l, inversions = merge_sort(j) self.assertSequenceEqual(correct, l)
def stability_test(): project_name = raw_input('\n+++ Enter project name (project that returned the best results):\n\n') model_mod = raw_input('\n+++ Which velocity model should to be used?\n\n1- Mean Model.\n2- Minimum Model (default).\n\n') max_dep = raw_input('\n+++ Maximum depth to plot velocity model (def=30.0km):\n\n') print '\n+++ Please set the following damping and inversion mode parameters:\n\n' othet = raw_input('Origin Time (def=0.01) = ') xythet = raw_input('Epicenter (def=0.01) = ') zthet = raw_input('Depth (def=0.01) = ') vthet = raw_input('Velocity (def=1.0) = ') stathet = raw_input('Station Correction (def=1.0) = ') invrat = raw_input('Invert ratio (def=2) = ') numitr = raw_input('Number of iteration (def=7) = ') dmp_def_val = {'othet':0.01, 'xythet':0.01, 'zthet':0.01, 'vthet':1.0, 'stathet':1.0, 'invrat':2, 'numitr':7, 'max_dep':30.0} for i in dmp_def_val.keys(): dmp = eval(i) if dmp: dmp_def_val[i] = float(dmp) if not os.path.exists(os.path.join('Check-Test',project_name)): os.makedirs(os.path.join('Check-Test',project_name)) # PREPARE REQUIRED FILES vel_mod_file = open(os.path.join('Check-Test',project_name,'model.mod'),'w') with open(os.path.join('figs',project_name,'model.mod')) as f: flag = False for l in f: if model_mod == '1': if 'Model: mean.mod' in l: flag = True if flag and not l.strip(): flag = False if flag: vel_mod_file.write(l) else: if 'Model: min.mod' in l: flag = True if flag and not l.strip(): flag = False if flag: vel_mod_file.write(l) vel_mod_file.close() with open(os.path.join('figs',project_name,'report.dat')) as f: for l in f: if 'ID number' in l: ind = float(l.split()[-1]) copy(os.path.join('velout',project_name,'velest%d.out'%(ind)), 'tmp.dat') velestcmn = open(os.path.join('Check-Test',project_name,'velest.cmn'), 'w') velestcmn.write('Velocity Stability Test') with open('tmp.dat') as f: h0 = '*** othet xythet zthet vthet stathet' h1 = '*** Modelfile:' h2 = '*** Stationfile:' h3 = '*** File with Earthquake data:' h4 = '*** Main print output file:' h5 = '*** File with final hypocenters in *.cnv format:' h6 = '*** File with new station corrections:' h7 = '*** delmin ittmax invertratio' f0 = False f1, f2, f3 = False, False, False f4, f5, f6 = False, False, False f7 = False c = 0 for l in f: if 31 <= c <= 121: if h0 in l: f0 = True if f0 and h0 not in l: velestcmn.write(' %.3f %.3f %.3f %.3f %.3f\n'%(dmp_def_val['othet'], dmp_def_val['xythet'], dmp_def_val['zthet'], dmp_def_val['vthet'], dmp_def_val['stathet'])) f0 = False continue if h1 in l: f1 = True if f1 and h1 not in l: velestcmn.write('model.mod\n') f1 = False continue if h2 in l: f2 = True if f2 and h2 not in l: velestcmn.write(os.path.join('..'+os.sep+'..','figs',project_name,'sta_cor.out')+'\n') f2 = False continue if h3 in l: f3 = True if f3 and h3 not in l: velestcmn.write(os.path.join('..'+os.sep+'..','velinp','noisy.cnv')+'\n') f3 = False continue if h4 in l: f4 = True if f4 and h4 not in l: velestcmn.write('velest.out\n') f4 = False continue if h5 in l: f5 = True if f5 and h5 not in l: velestcmn.write('final_loc.cnv\n') f5 = False continue if h6 in l: f6 = True if f6 and h6 not in l: velestcmn.write('station_cor.sta\n') f6 = False continue if h7 in l: f7 = True if f7 and h7 not in l: velestcmn.write(' 0.010 %d %d\n'%(dmp_def_val['numitr'], dmp_def_val['invrat'])) f7 = False continue velestcmn.write(l) c+=1 velestcmn.close() # RUN VELEST TO PERFORM TEST here = os.getcwd() os.chdir(os.path.join('Check-Test',project_name)) os.system('velest > /dev/null') os.chdir(here) # PLOT STABILITY RESULTS x_ini = [] y_ini = [] z_ini = [] x_nsy = [] y_nsy = [] z_nsy = [] x_fin = [] y_fin = [] z_fin = [] # DEFINE RESOURCES d_ini = os.path.join('figs',project_name,'fin_hyp.cnv') d_nsy = os.path.join('velinp','noisy.cnv') d_fin = os.path.join('Check-Test',project_name,'final_loc.cnv') for x,y,z,inp in zip([x_ini,x_nsy,x_fin],[y_ini,y_nsy,y_fin],[z_ini,z_nsy,z_fin],[d_ini,d_nsy,d_fin]): with open(inp) as f: for l in f: if l[25:26] == 'N' and l[35:36] == 'E': y.append(float(l[17:25])) x.append(float(l[26:35])) z.append(float(l[36:43])) vel_ini = [[],[]] with open(os.path.join('Check-Test',project_name,'model.mod')) as f: flag = False for l in f: if 'P-VELOCITY MODEL' in l: flag = True if flag and len(l.split()) == 1: break if flag: vel_ini[0].append(float(l.split()[0])) vel_ini[1].append(float(l.split()[1])) vi = dp(vel_ini[0]) y = [-1*d for d in vel_ini[1]] x = list(hstack([[m,n] for m,n in zip(vel_ini[0],vel_ini[0])])) y = list(hstack([[m,n] for m,n in zip(y,y)])) y.pop(0) y.append(-1*dmp_def_val['max_dep']) vel_ini = [x,y] with open(os.path.join('Check-Test',project_name,'velest.out')) as f: flag = False for l in f: if ' Velocity model 1' in l: flag = True vel_fin = [[],[]] continue if flag and not l.strip(): flag = False if flag and l.split()[0][0].isdigit(): vel_fin[0].append(float(l.split()[0])) vel_fin[1].append(float(l.split()[2])) vf = dp(vel_fin[0]) y = [-1*d for d in vel_fin[1]] x = list(hstack([[m,n] for m,n in zip(vel_fin[0],vel_fin[0])])) y = list(hstack([[m,n] for m,n in zip(y,y)])) y.pop(0) y.append(-1*dmp_def_val['max_dep']) vel_fin = [x,y] init_plotting_isi(17,9) plt.rcParams['axes.labelsize'] = 7 ax1 = plt.subplot(221) x_diff = d2k(array(x_nsy) - array(x_ini)) xma, xmi = max(x_ini), min(x_ini) yma, ymi = max(x_diff), min(x_diff) ax1.plot(x_ini, x_diff, color='r', marker='x', ms=3, linestyle='', zorder=102) x_diff = d2k(array(x_fin) - array(x_ini)) ax1.plot(x_ini, x_diff, color='b', marker='x', ms=3, linestyle='', zorder=102) ax1.set_xlim(xmi-(xma-xmi)*.05, xma+(xma-xmi)*.05) ax1.set_ylim(ymi-(yma-ymi)*.05, yma+(yma-ymi)*.05) ax1.set_xlabel('Longitude') ax1.set_ylabel('Dislocation (km)') ax1.grid(True, linestyle='--', linewidth=.5, color='k', alpha=.3) ax1.locator_params(axis='x',nbins=6) ax1.locator_params(axis='y',nbins=6) # Plot KDE xmin, xmax = ax1.get_xlim() ymin, ymax = ax1.get_ylim() X, Y = mgrid[xmin:xmax:100j, ymin:ymax:100j] positions = vstack([X.ravel(), Y.ravel()]) values = vstack([x_ini, x_diff]) kernel = gaussian_kde(values) Z = reshape(kernel(positions).T, X.shape) im = ax1.contourf(X, Y, Z, cmap=plt.cm.gist_earth_r, alpha=.9, zorder=101) divider = make_axes_locatable(ax1) cax = divider.append_axes("right", size="4%", pad=0.05) cb = plt.colorbar(im, ax=ax1, cax=cax) tk_locator = ticker.MaxNLocator(nbins=6) cb.locator = tk_locator cb.update_ticks() cb.outline.set_linewidth(.75) cb.set_label(label='PDF', size=6) cb.ax.tick_params(labelsize=6) ax2 = plt.subplot(222) y_diff = d2k(array(y_nsy) - array(y_ini)) xma, xmi = max(y_ini), min(y_ini) yma, ymi = max(y_diff), min(y_diff) ax2.plot(y_ini, y_diff, color='r', marker='x', ms=3, linestyle='', zorder=102) y_diff = d2k(array(y_fin) - array(y_ini)) ax2.plot(y_ini, y_diff, color='b', marker='x', ms=3, linestyle='', zorder=102) ax2.set_xlim(xmi-(xma-xmi)*.05, xma+(xma-xmi)*.05) ax2.set_ylim(ymi-(yma-ymi)*.05, yma+(yma-ymi)*.05) ax2.set_xlabel('Latitude') ax2.set_ylabel('Dislocation (km)') ax2.grid(True, linestyle='--', linewidth=.5, color='k', alpha=.3) ax2.locator_params(axis='x',nbins=6) ax2.locator_params(axis='y',nbins=6) # Plot KDE xmin, xmax = ax2.get_xlim() ymin, ymax = ax2.get_ylim() X, Y = mgrid[xmin:xmax:100j, ymin:ymax:100j] positions = vstack([X.ravel(), Y.ravel()]) values = vstack([y_ini, y_diff]) kernel = gaussian_kde(values) Z = reshape(kernel(positions).T, X.shape) im = ax2.contourf(X, Y, Z, cmap=plt.cm.gist_earth_r, alpha=.9, zorder=101) divider = make_axes_locatable(ax2) cax = divider.append_axes("right", size="4%", pad=0.05) cb = plt.colorbar(im, ax=ax2, cax=cax) tk_locator = ticker.MaxNLocator(nbins=5) cb.locator = tk_locator cb.update_ticks() cb.outline.set_linewidth(.75) cb.set_label(label='PDF', size=6) cb.ax.tick_params(labelsize=6) ax3 = plt.subplot(223) z_diff = array(z_nsy) - array(z_ini) xma, xmi = max(z_ini), min(z_ini) yma, ymi = max(z_diff), min(z_diff) ax3.plot(z_ini, z_diff, color='r', marker='x', ms=3, linestyle='', zorder=102) z_diff = array(z_fin) - array(z_ini) ax3.plot(z_ini, z_diff, color='b', marker='x', ms=3, linestyle='', zorder=102) ax3.set_xlim(xmi-(xma-xmi)*.05, xma+(xma-xmi)*.05) ax3.set_ylim(ymi-(yma-ymi)*.05, yma+(yma-ymi)*.05) ax3.set_xlabel('Depth (km)') ax3.set_ylabel('Dislocation (km)') ax3.grid(True, linestyle='--', linewidth=.5, color='k', alpha=.3) ax3.locator_params(axis='x',nbins=6) ax3.locator_params(axis='y',nbins=6) # Plot KDE xmin, xmax = ax3.get_xlim() ymin, ymax = ax3.get_ylim() X, Y = mgrid[xmin:xmax:100j, ymin:ymax:100j] positions = vstack([X.ravel(), Y.ravel()]) values = vstack([z_ini, z_diff]) kernel = gaussian_kde(values) Z = reshape(kernel(positions).T, X.shape) im = ax3.contourf(X, Y, Z, cmap=plt.cm.gist_earth_r, alpha=.9, zorder=101) divider = make_axes_locatable(ax3) cax = divider.append_axes("right", size="4%", pad=0.05) cb = plt.colorbar(im, ax=ax3, cax=cax) tk_locator = ticker.MaxNLocator(nbins=6) cb.locator = tk_locator cb.update_ticks() cb.outline.set_linewidth(.75) cb.set_label(label='PDF', size=6) cb.ax.tick_params(labelsize=6) ax4 = plt.subplot(224) [i.set_linewidth(0.6) for i in ax4.spines.itervalues()] ax4.plot(vel_ini[0], -array(vel_ini[1]), 'r-', linewidth=1.5, label='Initial') ax4.plot(vel_fin[0], -array(vel_fin[1]), 'b-', linewidth=1.5, label='Inverted') model_rms_fit = norm(array(vf)-array(vi)) ax4.set_xlabel('Velocity (km/s)') ax4.set_ylabel('Depth (km)') ax4.invert_yaxis() ax4.text(0.82, 0.9,'||model||=%.2f (km/s)'%(model_rms_fit), ha='center', va='center', transform=ax4.transAxes, bbox=dict(facecolor='w', alpha=0.5, pad=2), fontsize=6) ax4.grid(True, linestyle='--', linewidth=.5, color='k', alpha=.3) ax4.locator_params(axis='x',nbins=6) ax4.locator_params(axis='y',nbins=5) ax4.set_ylim(-min(vel_ini[1]),0) plt.legend(loc=3, fontsize=6) plt.tight_layout() plt.savefig(os.path.join('Check-Test',project_name,'StabilityTest.tiff'),dpi=300) plt.close() print '\n+++ Result was saved in "Check-Test%s" directory.\n'%(os.sep+project_name)
def main(): f = codecs.open(SOURCE, "r", "utf-8") users = json.load(f) udict = {} # list of each user's topics f.close() topics = {} ufmt = u'<li><a href="%s">%s</a> (<a href="%s/answers%s">%d answers</a>)</li>\n' for u in users: if len(u["topics"]) == 0: print "No topics for %s" % u["name"] continue u["topics"].sort(key=lambda t: t["count"], reverse=True) primary = u["topics"][0] pc = primary["count"] u2 = dp(u) u2["count"] = primary["count"] del u2["topics"] topics.setdefault(primary["name"], {"href": primary["href"], "users": []}) topics[primary["name"]]["users"].append(u2) udict[u["href"]] = [primary["href"]] threshold = pc * RATIO for tp in u["topics"][1:]: if tp["count"] < MIN_ANS or tp["count"] < threshold: continue u3 = dp(u2) u3["count"] = tp["count"] topics.setdefault(tp["name"], {"href": tp["href"], "users": []}) topics[tp["name"]]["users"].append(u3) udict[u["href"]].append(tp["href"]) topics = [t for t in topics.iteritems()] topics.sort(key=lambda t: t[0].lower()) for _, v in topics: v["users"].sort(key=lambda u: u["count"], reverse=True) # This remove people from popular topics if they are included anywhere else # to have smaller lists (i.e. < MAX_PER_TOPIC) for each topic. for t, v in topics: for i, u in enumerate(dp(v["users"])): if i >= MAX_PER_TOPIC and len(udict[u["href"]]) > 1: udict[u["href"]].remove(v["href"]) v["users"].remove(u) # stats stats = {"users": 0, "average": 0, "median": 0} tss = [] for u, ts in udict.iteritems(): l = len(ts) tss.append(l) stats["users"] += 1 stats["average"] += l # print "%25s: %2d" % (u, len(ts)) tss.sort(reverse=True) stats["median"] = tss[len(tss) / 2] stats["average"] /= 0.0 + stats["users"] del tss del udict # /stats f = codecs.open("tw-topics.out.html", "w", "utf-8") f.write('<html><head><meta charset="utf-8"/></head><body>') cpt = 0 for t, v in topics: href = v["href"] f.write('<h1><a href="%s"/>%s</a></h1>\n' % (href, t)) f.write("<ul>\n") for i, u in enumerate(v["users"]): f.write(ufmt % (u["href"], u["name"], u["href"], href, u["count"])) f.write("</ul>\n<br/>\n") f.write("</body></html>") f.close() fmt = "Wrote %d users (average topics/user: %.2f, med: %.2f)." print fmt % (stats["users"], stats["average"], stats["median"])