def H_not_split_width(Hfile, numrs, width): with open(Hfile) as f: data = f.readlines() # data=data[9309:] H = np.array([-3.0]) max = 0 min = 1000 for row in data: if (float(row.split()[0]) != -3.0): H = np.append(H, [float(row.split()[0]) * 3.96], axis=0) if (float(row.split()[0]) < min): min = float(row.split()[0]) print min if (float(row.split()[0]) > max): max = float(row.split()[0]) print max H = H[1:] print "" print max print min print max - min print(max - min) * 3.96 / width print int((max - min) / width) v = u.histogram(H, int((max - min) * 3.96 / width)) v = v[1:] return v
def H_not_split(Hfile, numrs, boxesperR): with open(Hfile) as f: data = f.readlines() data = data[9309:] H = np.array([-3.0]) for row in data: if (float(row.split()[0]) != -3.0): H = np.append(H, [float(row.split()[0]) * 3.96], axis=0) H = H[1:] print H[1] v = u.histogram(H, int(boxesperR * numrs)) v = v[1:] return v
def E_not_split(Hfile, numrs, boxesperR): with open(Hfile) as f: data = f.readlines() with open(Hfile) as dist: data2 = dist.readlines() H = np.array([-3.0]) for x in range(0, data.shape[0]): if (float(data2[x].split()[0]) != -3.0): H = np.append(H, [float(data[x].split()[2]) * .002872], axis=0) H = H[1:] print H[1] v = u.histogram(H, int(boxesperR * numrs)) v = v[1:] return v
def LJEsplit(Hfile, numrs, boxesperR): with open("mylog.log") as f: data = f.readlines() data = data[2:] #print data[1].split() with open(Hfile) as dist: data2 = dist.readlines() data2 = data2[2:] H = np.array([-3.0]) search = np.array([-3.0]) for row in data2: search = np.append(search, [float(row.split()[0]) * 3.96], axis=0) for x in range(0, len(data)): H = np.append(H, [float(data[x].split()[6]) * 0.002872], axis=0) array = np.array([0.0]) index = 0 rnum = 0 H = H[1:] print len(H) search = search[1:] #print 'H array made' splitt = np.zeros((numrs * 2, boxesperR + 2)) perwindow = len(H) / numrs cutoff = perwindow / 2 while (index < len(H)): if (float(search[index]) != -3.0 * 3.96 and index % perwindow > cutoff): array = np.append(array, [float(H[index])], axis=0) print 'enter' elif (float(search[index]) == -3.0 * 3.96): print "Gonna make a histogram" array = array[1:] v = u.histogram(array, int(boxesperR)) v = v[1:] Ni = 0 for c in range(0, len(v)): Ni += v[c][1] print Ni for g in range(0, len(v)): splitt[2 * rnum][g] = v[g][0] splitt[2 * rnum + 1][g] = v[g][1] rnum += 1 print(len(array)) array = np.array([0.0]) index += 1 return splitt
def Hsplit(Hfile, numrs, boxesperR): with open(Hfile) as f: data = f.readlines() H = np.array([-3.0]) for row in data: H = np.append(H, [float(row.split()[0]) * 3.96], axis=0) array = np.array([0.0]) index = 0 rnum = 0 H = H[1:] #print 'H array made' splitt = np.zeros((numrs * 2, boxesperR + 2)) while (index < len(H)): if (float(H[index]) != -3.0 * 3.96): #print(H[index]) array = np.append(array, [float(H[index])], axis=0) index += 1 else: print "Gonna make a histogram" array = array[1:] v = u.histogram(array, int(boxesperR)) v = v[1:] Ni = 0 for c in range(0, len(v)): Ni += v[c][1] print Ni for g in range(0, len(v)): splitt[2 * rnum][g] = v[g][0] splitt[2 * rnum + 1][g] = v[g][1] rnum += 1 print(len(array)) array = np.array([0.0]) index += 1 return splitt
def Hsplitwidth(Hfile, numrs, width): with open(Hfile) as f: data = f.readlines() #data= data[199328:] H = np.array([-3.0]) for row in data: H = np.append(H, [float(row.split()[0]) * 3.96], axis=0) index = 0 aindex = 0 rnum = 0 H = H[1:] #print 'H array made' min = 10000 max = 0 widthmax = 0 notsplitt = np.zeros((numrs, (len(H) / numrs) - 1)) while (index < len(H)): if (float(H[index]) != -3.0 * 3.96): #print(H[index]) #print rnum #print aindex notsplitt[rnum][aindex] = float(H[index]) if (float(H[index]) > max): max = float(H[index]) if float(H[index] < min): min = float(H[index]) index += 1 aindex += 1 else: if ((max - min) > widthmax): widthmax = max - min print max print min rnum += 1 index += 1 aindex = 0 min = 10000 max = 0 print width print widthmax splitt = np.zeros((numrs * 2, (int(widthmax / width) + 1))) print "Gonna make a histogram" for z in range(0, numrs): max = 0 min = 10000 for y in range(0, len(notsplitt[z])): if (notsplitt[z][y] > max): max = notsplitt[z][y] if (notsplitt[z][y] < min): min = notsplitt[z][y] print min boxes = int((max - min) / width) - 2 v = u.histogram(notsplitt[z], int(boxes)) v = v[1:] print boxes print splitt.shape print v.shape for c in range(0, boxes + 2): splitt[2 * z][c] = v[c][0] print splitt[2 * z][c] splitt[2 * z + 1][c] = v[c][1] print splitt[2 * z + 1][c] return splitt
print "usage: trace_path.py database natoms nsteps" sys.exit() db_file = sys.argv[1] stem = db_file.rstrip(".db") natoms = int(sys.argv[2]) nsteps = int(sys.argv[3]) ntotal = natoms*nsteps npaths = 1 if (len(sys.argv) > 4): npaths = int(sys.argv[4]) dmax = 1e10 if (len(sys.argv) > 5): dmax = float(sys.argv[5]) paths = [] db.init(db_file,False) for i in range(npaths): path = db.compute_path_distances(range(i,ntotal,natoms)) paths.append(path) f = open(stem+".paths","w") for i in range(nsteps-1): print >>f, i, for j in range(npaths): d = paths[j][i] if d < dmax: print >>f," {0:12g}".format(d), else : print >>f," *", print >>f util.histogram(util.flatten(paths),stem+".phist",[0,0.05])
import matplotlib.pyplot as plt import radial_distribution as rad import util as u """ This graphs the rho sub i in the WHAM method for the pair force. It also prints out a H, which is the histogram not normallized by all the N states """ with open("H.txt") as f: data = f.readlines() H=np.array([-3.0]) for row in data: H=np.append(H,[float(row.split()[0])],axis=0) H=H[1:] v=u.histogram(H,int(10*3.96*10)) v=v[1:] print v x = [float(row[0])*3.96 for row in v] y1 = [float(row[1])/len(H) for row in v] fig1 = plt.figure() ax1 = fig1.add_subplot(111) var1='probability' ax1.set_title(var1) ax1.set_xlabel('r') ax1.set_ylabel(var1)
def histogram(self): print "> histogram of distances" util.histogram(self.Pair_distances) print "> histogram of min distances" util.histogram(self.Min_distances)
lib_path = os.path.abspath('..') sys.path.append(lib_path) import metric_search import util ##################################################################### ## MAIN ##################################################################### if __name__ == "__main__": # parse if (len(sys.argv) < 2): print "usage: database_test.py database [nbins]" sys.exit() db_file = (sys.argv[1]).rstrip(".db") nb = int(sys.argv[2]) stem = (db_file.split("/"))[-1] for m in ["RMSD","OGTO"]: print "================= METRIC:",m,"===================" tag = stem+"_"+m s = metric_search.metric_search() N_neighborhoods = s.init(db_file,m) # min distances for each neighborhood #print "============ minimum distances =============" min_ds = s.Db.min_distances() util.histogram(min_ds,tag+".min_distances",nbins=nb) # histogram #print "============ distance histogram =============" util.histogram(s.Db.Pair_distances,tag+".histogram",nbins=nb) print
def get_histogram(): hist = util.histogram() return json.dumps(hist)
if not os.path.isfile(dfile): stem = (dfile.split("."))[0] db = database.database() db.init(stem) if not os.path.isfile(dfile): print dfile, "does not exist" sys.exit() # init distances = util.read_distances(dfile, drange, nmax) util.save_distances(distances, dfile + ".npy") # histogram print "============ distance histogram =============" util.histogram(distances, dfile + ".dhist", drange) sys.exit() # min distances for each neighborhood print "============ minimum distances =============" min_ds = db.min_distances() util.histogram(min_ds, db_file + ".min_dhist", drange) print "============ mean distances =============" mean_ds = db.mean_distances() util.histogram(mean_ds, db_file + ".mean_dhist", drange) print "============ max distances =============" max_ds = db.max_distances() util.histogram(max_ds, db_file + ".max_dhist", drange)
if not os.path.isfile(dfile): stem = (dfile.split("."))[0] db = database.database() db.init(stem) if not os.path.isfile(dfile): print dfile,"does not exist" sys.exit() # init distances = util.read_distances(dfile,drange,nmax) util.save_distances(distances,dfile+".npy") # histogram print "============ distance histogram =============" util.histogram(distances,dfile+".dhist",drange) sys.exit() # min distances for each neighborhood print "============ minimum distances =============" min_ds = db.min_distances() util.histogram(min_ds,db_file+".min_dhist",drange) print "============ mean distances =============" mean_ds = db.mean_distances() util.histogram(mean_ds,db_file+".mean_dhist",drange) print "============ max distances =============" max_ds = db.max_distances() util.histogram(max_ds,db_file+".max_dhist",drange)
packed = [] area = sum([w*h*unpacked[(w,h)] for w,h in unpacked]) window = animation.LMAO_Window((W,H)) if anim: on_update = lambda active, packed, lmao, rect, unpacked, msg: window.update_on_next_click(active, packed, lmao, rect, unpacked, msg) else: on_update = lambda active, packed, lmao, rect, unpacked, msg: 0 ret = lmao.pack((W,H), unpacked, packed, active, area, on_update) window.quit() return ret def TSBP((W,H), unpacked, anim1=True, anim2=True): hist = util.histogram((W,H)) packed = [] area = sum([w*h*unpacked[(w,h)] for w,h in unpacked]) window = animation.TSBP_Window((W,H)) if anim1: on_update1 = lambda hist, item, unpacked, msg: window.update_on_next_click((hist, item), [], [], None, None, unpacked, msg) else: on_update1 = lambda hist, item, unpacked, msg: 0 partialpack, state = tsbp.pack1((W,H), unpacked, packed, hist, area, on_update1) while partialpack: active = util.heapset([util.ActivePoint(0,0,axis=None)]) packed = [] #area = sum([w*h*unpacked[(w,h)] for w,h in unpacked])
if (npath>0) : for k in range(nSteps): if (k < npath) : Steps[k] += path[ k][2] print >>dfile,path[k][0],path[k][1],N_refs else : Steps[k] += path[-1][2] if (j < 10): print evals, j += 1 sumEvals += evals sum2Evals += evals*evals if len(rs) > 9: print "...", print print >>dfile print >>dfile util.histogram(Evals,tag+"_histogram.dat") sfile = open(tag+"_steps.dat","w") sprev = N_refs for j in Steps: print >>sfile, j, float(j)/count,(j-sprev) sprev = j sfile.close() ave = sumEvals/count eff = ave/N_clusters var = 0. if (count > 2): var = (sum2Evals-sumEvals*ave)/(count-1) sd = math.sqrt(var/count) sdeff = sd/N_clusters print >>rfile,N_clusters,N_refs,ave,sd print "> average evaluations {0:8g} +/-{1:<8g} ".format(ave,math.sqrt(var/count)) print "> percentage explored {0:8.2f} +/-{1:<4.2f} for size {2:<8d} ".format(eff,sdeff,N_clusters)
if k < npath: Steps[k] += path[k][2] print >> dfile, path[k][0], path[k][1], N_refs else: Steps[k] += path[-1][2] if j < 10: print evals, j += 1 sumEvals += evals sum2Evals += evals * evals if len(rs) > 9: print "...", print print >> dfile print >> dfile util.histogram(Evals, tag + "_histogram.dat") sfile = open(tag + "_steps.dat", "w") sprev = N_refs for j in Steps: print >> sfile, j, float(j) / count, (j - sprev) sprev = j sfile.close() ave = sumEvals / count eff = ave / N_clusters var = 0.0 if count > 2: var = (sum2Evals - sumEvals * ave) / (count - 1) sd = math.sqrt(var / count) sdeff = sd / N_clusters print >> rfile, N_clusters, N_refs, ave, sd print "> average evaluations {0:8g} +/-{1:<8g} ".format(ave, math.sqrt(var / count))
## MAIN ##################################################################### if __name__ == "__main__": s = metric_search.metric_search() # parse if (len(sys.argv) < 2): print "usage: database_test.py database [queries]" sys.exit() db_file = sys.argv[1] # init N_neighborhoods = s.init(db_file) # min distances for each neighborhood print "============ minimum distances =============" min_ds = s.Db.min_distance(s.Db.Pair_distances,s.Db.N_neighborhoods) util.histogram(min_ds,db_file+".min_distances") # histogram print "============ distance histogram =============" util.histogram(s.Db.Pair_distances,db_file+".histogram") # graph cluster print "============ cluster =======================" #dcut = 1.5*min(min_ds) dcut = 1.5*max(min_ds) dcut = 3.0 print "using d_cut=",dcut #dcut = 3. #dcut = 2. #dcut = 2.0 s.Db.cluster(s.Db.N_neighborhoods,s.Db.Pair_distances,dcut)
z = d.density_from_V(str(last), rstep, rmax, volume_division) u.write_array(z, 'density.txt') else: x = nd.trans_v_timestep_matrix(numfiles, first, second, length) y = nd.n_files(numfiles, first, second) for i in range(0, length - 2): u.write_array(nd.trans_v_timestep_combine(y, i, x, length), 'trans_Tvar' + str(i) + '.txt') x = dnc.dist_v_timestep_lat_matrix(numfiles, first, second, length, 1, 1, numsites) y = nd.n_files(numfiles, first, second) for i in range(1, length): u.write_array(dnc.nth_distance_lat(y, i, x, length), 'dist' + str(i) + 'thC.txt') x = dnc.dist_c_c_v_timestep_lat_matrix(numfiles, first, second, length, 1, 1, numsites) y = nd.n_files(numfiles, first, second) for i in range(1, length): u.write_array(dnc.nth_distance_lat(y, i, x, length), 'dist_' + str(i - 1) + '_' + str(i) + '.txt') z = d.density_from_V(str(last), rstep, rmax, volume_division) u.write_array(z, 'density.txt') a = u.histogram(ia.inter_angle(last, length - 1), boxes) u.write_array(a, 'angle12thC.txt')
#from operator import itemgetter lib_path = os.path.abspath('..') sys.path.append(lib_path) import metric_search import util ##################################################################### ## MAIN ##################################################################### if __name__ == "__main__": # parse if (len(sys.argv) < 2): print "usage: database_test.py database [nbins]" sys.exit() db_file = (sys.argv[1]).rstrip(".db") nb = int(sys.argv[2]) stem = (db_file.split("/"))[-1] for m in ["RMSD", "OGTO"]: print "================= METRIC:", m, "===================" tag = stem + "_" + m s = metric_search.metric_search() N_neighborhoods = s.init(db_file, m) # min distances for each neighborhood #print "============ minimum distances =============" min_ds = s.Db.min_distances() util.histogram(min_ds, tag + ".min_distances", nbins=nb) # histogram #print "============ distance histogram =============" util.histogram(s.Db.Pair_distances, tag + ".histogram", nbins=nb) print
import util import model #TODO: Normalize RGB channelse to zero mean and unit variance tf.logging.set_verbosity(tf.logging.INFO) dataset_path = 'dataset' gen = BatchGenerator() gen.load_dataset(dataset_path) i = 0 for batch, labels in gen.train_input_gen(num_triplets=2): i += 1 if i > 1: break print(np.shape(batch['x'])) cnn = tf.estimator.Estimator( model_fn=model.cnn_model_fn) #,model_dir="/tmp/logg3") for i in range(200): print("runn nr: ", i) cnn.train(input_fn=lambda: next(gen.train_input_gen(num_triplets=2)), steps=10) if i % 100 == 0: print("Calculate the db space") db_space_np = util.get_db_space_np(gen, cnn) print("Get histogram array") histogram_arrya = util.get_histogram_array(gen, cnn, db_space_np) bin = [10, 20, 40, 180] util.histogram(histogram_arrya, bin)