def do(): duration_in_ms = 10 * 1000 rate_in_ms = 100 / 1000 """ Generate spikes for 10s (or longer if you want better statistics) using a Poisson spike generator with a constant rate of 100Hz, and record their times of occurrence. """ spikes = generator.short_way(duration_in_ms, rate_in_ms) """ Compute the coefficient of variation of the interspike intervals, """ coefficient_of_variation = analysis.coefficient_of_variation(spikes) print(f'Coefficient of Variation: %1.3f' % coefficient_of_variation) """ and the Fano factor for spike counts obtained over counting intervals ranging from 1 to 100ms. """ windows = [1, 10, 25, 50, 100] for window in windows: fano_factor = analysis.fano_factor(window, duration_in_ms, spikes) print(f'Fano factor: %1.3f' % fano_factor) """ Plot the interspike interval histogram. """ window = 10 bins = np.linspace(window, duration_in_ms, int(duration_in_ms / window)) values, _ = np.histogram(spikes, bins) plotter.show(spikes, bins, f'Bucket size of {window}ms')
def main(): training_data_file = open(argv[1], 'r') test_data_file = open(argv[2], 'r') algorithm_number = int(argv[3]) training_data = [] for line in training_data_file.readlines(): split_line = line.split() target_class = int(split_line[-1]) training_data.append((tuple(float(x) for x in split_line[:-1]), target_class)) test_data = [] for line in test_data_file.readlines(): test_data.append(tuple(float(x) for x in line.split())) if algorithm_number == 1: class_count = int(argv[4]) perceptron = Perceptron(vector_size=2, class_count=class_count) else: class_count = 2 perceptron = AlternativePerceptron(vector_size=2) has_classification_errors = True while has_classification_errors: has_classification_errors = perceptron.train(training_data) classified_data = [] for vector in test_data: classified_data.append((vector, perceptron.guess(vector))) colors = [random_color() for _ in range(class_count)] plotter.draw_classes(training_data, 'Perceptron Classifier: Training Data', 1, colors) plotter.draw_classes(classified_data, 'Perceptron Classifier: Classified Data', 2, colors) plotter.show()
def input_graph(): global graph from sphereofinfluence import SoIGraph graph = SoIGraph.randomize(0, 100, 100) plotter.start_gui(graph) # plotter.show() plotter.plt.ion() plotter.plot_graph(graph) plotter.plot_arrows(graph) plotter.show() # breadth_first_search(graph) # depth_first_search(graph) dijkstra(graph)
for i in range(len(x)): vetor.append([x[i], y[i]]) plot_dot(vetor, c='m') corr = correlacao(x, y) reg = regressao(x, y) plot_line(calc(reg, x, y), c='r', label=" Correlação = " + str(corr) + "\n ß0 = " + str(reg[0]) + "\n ß1 = " + str(reg[1])) x1 = [10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5] y1 = [8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68] x2 = [10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5] y2 = [9.14, 8.14, 8.47, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74] x3 = [8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 19] y3 = [6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 5.56, 7.91, 6.89, 12.50] plot(x1, y1) show() plot(x2, y2) show() plot(x3, y3) show()
import compute import loader import plotter dataset = loader.load_dataset('patient_1') data = dataset["data"] mask = dataset["mask"] threshold = 95.0 scaled = compute.to_hounsfield_units(data) masked = list(map(lambda i: compute.apply_mask(i, mask, threshold), scaled)) gradients = compute.compute_gradients(masked) masked_gradients = list( map(lambda i: compute.apply_mask(i, mask, threshold), gradients)) def slicer(arr): return arr[30, :, :] scaled_slices = list(map(slicer, scaled)) masked_slices = list(map(slicer, masked)) gradient_slices = list(map(slicer, masked_gradients)) plotter.plot(scaled_slices, masked_slices, gradient_slices) plotter.show()
def main(): choices = ['func1', 'func2', 'func3', 'func4', 'func5', 'func6', 'func7'] parser = argparse.ArgumentParser() parser.add_argument('fn', help='function to plot', choices=choices) parser.add_argument('vars', nargs='*', type=int, help='function variables') parser.add_argument('-a', '--anim', action='store_true', help='animate plot') args = parser.parse_args() if args.fn == 'func1': nt, a, b, c = args.vars func = functions.func1 z = np.array([ np.exp(2 * np.pi * 1j * func(n, a, b, c)) for n in range(1, nt + 1) ]) if args.fn == 'func2': nt, a = args.vars func = functions.func2 z = np.array( [np.exp(2 * np.pi * 1j * func(n, a)) for n in range(1, nt + 1)]) if args.fn == 'func3': nt, = args.vars func = functions.func3 z = np.array( [np.exp(2 * np.pi * 1j * func(n)) for n in range(1, nt + 1)]) if args.fn == 'func4': nt, a = args.vars func = functions.func4 z = np.array( [np.exp(2 * np.pi * 1j * func(n, a)) for n in range(1, nt + 1)]) if args.fn == 'func5': nt, a, b = args.vars func = functions.func5 z = np.array( [np.exp(2 * np.pi * 1j * func(n, a, b)) for n in range(1, nt + 1)]) if args.fn == 'func6': nt, a = args.vars func = functions.func6 z = np.array( [np.exp(2 * np.pi * 1j * func(n, a)) for n in range(1, nt + 1)]) if args.fn == 'func7': nt, a = args.vars func = functions.func7 z = np.array( [np.exp(2 * np.pi * 1j * func(n, a)) for n in range(1, nt + 1)]) z = z.cumsum() plotter.plot_expsum(z) if args.anim: plotter.animate_expsum(z) plotter.show()
import sys sys.path.insert(1, '../') import plotter as plt p1 = plt.plot([1, 2, 3], [1, 3, 2], 'r-o', [1, 2, 2.5, 3], [3, 2, 1.5, 1], 'b-o', new=True, position=[2, 1, 1]) p2 = plt.plot([1, 2, 3], [1, 3, 2], 'r-o', [1, 2, 3], [3, 2, 1], 'b-o', new=True, position=[2, 1, 2]) #print p1._datapairs[0].clipPath() plt.show() #plt.plot([1,2,3],[1,3,2], '') #plt.show() # #plt.show() # #plt.plot([1,2,3],[1,3,2], '') #plt.plot([1,2,3,4],[1,3,2,4], '') #plt.show()
def preParse(file, tagAndProbe): # Versuche die Datei zu öffnen try: f = open(file,"r") except IOError: print "Datei wurde nicht gefunden." exit(0) # optional: die Darstellung für den Fortschritt # (Initialisierung) lineNums = getLen(file)/2 # 1 Event = 2 Zeilen #lineNums = 4967428 currentLine = 0 currentPercent = 0 startTime = int(time.time()) lastNumber = 0 lastSeconds = startTime print "Beginne. Parse %i Events"%lineNums fh = plotter.FilterHisto() diagrams = ((0, "Ladungskriterium", (0,0)), (1, "Richtungskriterium", (0,1)), (2, "Spurqualität", (0,2)), (3, "Detektorkritierum", (1,0)), (4, "Kammernzahl", (1,1)), (5, "Rapiditätskriterium", (1,2)), (6, "Impulskriterium", (2,0)), (7, "Isolationskriterium", (2,1)), (8, "Massenfilter", (2,2))) for i in diagrams: fh.initSubdiagram(str(i[0]), i[1], i[2]) spektrum = plotter.Histo("Spektrum") detaildiagram = plotter.DetailDiagram("Gefilterte Ereignisse", MIN_M_INV, MAX_M_INV, NBINS) # Kopf der Datei wegspalten l = f.readline() while l[0] == "#": l = f.readline() # DiagramMassList # Generiere eine Liste mit 11 leeren Listen als Inhalt, die nachher # die invarianten Massen der gefilterten Variablen (die Filter und # auch die Filter passierenden) invarianten Massen. # In dieser Implementierung gibt es folgende Reihenfolge: # 1-9 Filter # 1. gleiche Ladung # 2. gleicher Jet # 3. Spurqualität # 4. Spurendetektor und Pixeldetektor-Hits vorhanden # 5. mindestens 10 Kammern # 6. Pseudorapidität kleiner 2,1 # 7. Mindestransversalimpuls 20 GeV # 8. Isolationsfaktor des Myons # 9. Massenfilter (nur bei Tag+Probe) # 10. volles Spektrum # 11. gefiltertes Spektrum while l: # Fortschrittsanzeige - Optional currentLine += 1 if int(float(currentLine)*100/lineNums) != round(currentPercent): curRate = (currentLine - lastNumber)/(time.time()-lastSeconds)/1000 avgRate = (currentLine)/(time.time()-startTime)/1000 lastNumber = currentLine lastSeconds = time.time() currentPercent = round(float(currentLine)*100/lineNums) totalRate = currentLine/(time.time()-startTime) estimated = round((lineNums-currentLine)/totalRate) print "Habe %i Prozent geschafft. Current Rate: %.3f kHz, Avg: %.3f kHz, estimated remaining time: %is"%(currentPercent, curRate, avgRate, estimated) if currentPercent == 1 and False: break # Zeile benennen und nächste Zeile auslesen l1 = l l2 = f.readline() # Generiert einen Fehler, falls die Dateiinhalte nicht Modulo2-Teilbar sind if not l2: raise IndexError("Inhalt der Eingabedatei ungültig. Zeilenanzahl muss Modulo3-Teilbar sein") # Erstelle Teilchen aus den Eingabezeilen m1 = particleFromFileLine(l1) m2 = particleFromFileLine(l2) # Filtere die Myonen nach den implementierten Filtern if tagAndProbe: tagAndProbeFill(fh, spektrum, detaildiagram, m1, m2) else: normalFill(fh, spektrum, detaildiagram, m1, m2) # Nächste Zeile lesen l = f.readline() #for f in eventsList: # f.close() # Statistik print "Parsing beendet. Benötigte Zeit: %i Sekunden. Avg Rate: %.3f kHz"%(int(time.time()-startTime), (currentLine)/(time.time()-startTime)/1000) print "Zeichne Plots ..." t = time.time() binContents = detaildiagram.getBinContents() detaildiagram.drawErrors(binContents, np.sqrt(binContents)) print "Zeichnen beendet. Benötigte Zeit: %i Sekunden"%int(time.time()-t) fh.plot() spektrum.addLabels() spektrum.plot() fp = Fitpanel(detaildiagram, binContents, MIN_M_INV, MAX_M_INV, NBINS, np.sqrt(binContents), '../diagrams-2/fits.txt') detaildiagram.plot() #detaildiagram.save("../diagrams-2/zoomed.png") plotter.show()
X = load(filename) flowWEA = X[:, 2] flowNSA = X[:, 3] avgDelayWEA = X[:, 4] avgDelayNSA = X[:, 5] [X, Y] = meshgrid(range(300, 1300, 100), range(300, 1300, 100)) Z = griddata(flowWEA, flowNSA, avgDelayWEA, X, Y) delayFCWE = Z[0] Z = griddata(flowWEA, flowNSA, avgDelayNSA, X, Y) delayFCNS = Z[0] figure(figsize=(12, 6)) subplot(1, 2, 1) plot(flow, delayFCWE, flow, delayVAWE) ylim(0, 60) xlabel("Flow") ylabel("Average Delay WE") legend(("FC", "VA"), loc='upper left') subplot(1, 2, 2) plot(flow, delayFCNS, flow, delayVANS) ylim(0, 60) xlabel("Flow") ylabel("Average Delay NS") legend(("FC", "VA"), loc='upper left') show()
#!/usr/bin/python2 import sys sys.path.insert(1, '../') import plotter as plt p1 = plt.plot([1,2,3],[1,3,2], 'r-o', [1,2,2.5,3], [3,2,1.5,1], 'b-o', new=True, position=[2,1,1]) p2 = plt.plot([1,2,3],[1,3,2], 'r-o', [1,2,3], [3,2,1], 'b-o', new=True, position=[2,1,2]) #print p1._datapairs[0].clipPath() plt.show() #plt.plot([1,2,3],[1,3,2], '') #plt.show() # #plt.show() # #plt.plot([1,2,3],[1,3,2], '') #plt.plot([1,2,3,4],[1,3,2,4], '') #plt.show()