def display_isophotes(data, isolist, bad='black', cbar_label='', cmap=cm.gray, norm=LogNorm(), vmin=None, vmax=None) : global currentFig fig = plt.figure(currentFig) currentFig += 1 plt.clf() ax = fig.add_subplot(111) cmap = cmap cmap.set_bad(bad, 1) frame = ax.imshow(data, origin='lower', cmap=cmap, norm=norm, vmin=vmin, vmax=vmax) cbar = plt.colorbar(frame) cbar.set_label(cbar_label) smas = isolist.sma for i in range(len(isolist)) : if (isolist.stop_code[i] == 0) and (isolist.nflag[i] == 0) : iso = isolist.get_closest(isolist.sma[i]) x, y, = iso.sampled_coordinates() ax.plot(x, y, color='white') plt.tight_layout() plt.show() return
def plot_progress(tech, T): P_tech = T[T['tech'] == tech]['progress'] P_tech = [P for P in P_tech if len(P.shape) > 0] nDists = np.concatenate([P['new_dist'].astype(float) for P in P_tech]) oDists = np.concatenate([P['old_dist'].astype(float) for P in P_tech]) dDists = oDists - nDists fig, ax = plotting.subplots(1, 2, figsize_adjust=(1.0, 0.5), constrained_layout=True) ax[0].hist(nDists, bins=args.hist_bins, **tech_style(tech)) ax[0].axvline(x=0, lw=1, color='black') ax[0].set_ylabel(r'\#steps where new distance is $d$') ax[0].set_xlabel(r'Distance ($d$ — ' + tech + r')') ax[1].hist(dDists, bins=args.hist_bins, **tech_style(tech)) ax[1].axvline(x=0, lw=1, color='black') ax[1].set_ylabel(r'\#steps where progress is $\delta$') ax[1].set_xlabel(r'Progress ($\delta$ — ' + tech + r')') plotting.show(fig, outdir=outdir, basefilename=filename(tech + '-dist-n-progress'), w_pad=0.06)
def main(): """ execute task according to program options """ ustring = "%prog [options]\n\nAvailable tasks:" keys = tasklist.keys() keys.sort() for i in keys: ustring += "\n\n" + i + ": " + tasklist[i].get_doc() optparser = OptionParser(ustring) optparser.add_option("-t", "--task", help="Run task T (see above for info)", metavar="T") optparser.add_option("--all", help="Run all tasks (see above)", action="store_true", default=False) optparser = process_script_options(optparser) options, args = optparser.parse_args() if options.print_tasks: print_tasks() tasks = [] elif options.all: tasks = tasklist elif options.task is None: optparser.error("Either --all or --task option must be used.") else: tasks = [os.path.splitext(os.path.basename(options.task))[0]] for i in tasklist.values(): i.options = options i.args = args memoize.set_config(readcache=options.memoize) run(options=options, tasks=tasks) if options.show: plotting.show()
def main(): #~ #basic test #~ if len(sys.argv) > 1: #~ state_id = int(sys.argv[1]) #~ else: #~ state_id=3 #~ print "state " + str(state_id) + " distance to goal is: " + str(hw.distance_to_goal(state_id)) #plt.show() #~ #~ hwd=houseWorldAnalysis.load_data() #~ #~ #subjects=['181'] #~ subjects=hwd.get_subjects()#[0:16] #~ #~ #~ cps=houseWorldAnalysis.consecutive_ps(hwd, subjects) #~ splitavs, splitdevs, splitps=houseWorldAnalysis.split_todays(cps) #~ #~ plt.bar([-0.5,1.5],[splitavs[0],splitavs[1]], color='r', yerr=splitdevs) #~ print len(splitps[0]), len(splitps[1]) #~ plt.show() #print cps #avs, bins, nums=utils.binned_average(cps,2) #plt.plot(bins, nums, 'rx') #plt.show() #plt.plot(bins, avs, 'ko') #plt.ylim([0,1]) #plt.show() #~ #~ plt.plot([cp[0] for cp in cps], [cp[1] for cp in cps], 'ko') #~ plt.ylim([-0.1,1.1]) #~ plt.show() #~ return hwd=houseWorldAnalysis.load_data() subs=hwd.get_subjects()[0:3] print houseWorldAnalysis.single_G(hwd,subs) #return # kbs,choices=houseWorldAnalysis.subject_kullbacks(hwd,subs) # skbs=houseWorldAnalysis.surrogate_subject_kullbacks(hwd,subs) # pvs=houseWorldAnalysis.subject_pvals(hwd, subs) gs,pvs,choices=houseWorldAnalysis.subject_Gs(hwd,subs) ## SNIPPET for sub in subs: try: if len(choices[sub][0])>1: print sub, len(choices[sub][0]), gs[sub], pvs[sub] except: KeyError ##### print pvs print str.format("mean p: {0}, median p: {1}", np.mean(pvs.values()), np.median(pvs.values())) return plt.hist(pvs.values()) plt.xlim([0, 1]) #plt.hist(gs.values()) plt.show() return for i,sub in enumerate(subs): if sub in kbs.keys(): print str.format("subject {0}, D={1}. Days: {2}; total choices: {3}\n entropy={4}, p={5}", sub,kbs[sub],len(choices[sub]), np.sum(choices[sub]), utils.H([sum(choice) for choice in choices[sub]]), pvs[sub]) plt.subplot(4,4,i+1) plt.hist(skbs[sub]) plt.axvline(x=kbs[sub], linewidth=2, color='r') plt.xlim([0,1]) plt.show() return pvs=houseWorldAnalysis.subject_pvals(hwd)#, subject) print pvs print str.format("mean p: {0}, median p: {1}", np.mean(pvs), np.median(pvs)) plt.hist(pvs.values()) plt.show() return ## pvals vs entropies #~ entropies=[] #~ pvls=[] #~ #~ for sub in subs: #~ entropies.append(utils.H([sum(choice) for choice in choices[sub]])) #~ pvls.append(pvs[sub]) #~ #~ plt.plot(entropies,pvls,'ko') #~ plt.show() #~ return #test move choice hw=HouseWorld.HouseWorld() hwd=HouseWorldData.HouseWorldData() hwd.load_from_mat() dates,moves=hwd.select_actions(312, '181') days,choices,multiplicities=houseWorldAnalysis.parse_in_days(dates,moves) kullbacks=houseWorldAnalysis.compute_kullbacks(choices) seconds=[(day-min(days)).total_seconds() for day in days] #plt.plot(seconds, kullbacks) print np.mean(kullbacks) print multiplicities print kullbacks plt.hist(kullbacks) plt.show() return #choices=[hw.action_to_id(move) for move in moves] #print dates #print moves #~ #~ plotting.running_plot(dates, choices) #~ plt.hist(choices) #~ plt.show() #~ plotting.joint_plot(dates, choices) plotting.show() return all_moves=list(set(moves)) # I do this before to prevent altering the ordering print all_moves move_codes=[all_moves.index(move) for move in moves] plt.hist(move_codes) plt.show() #intervals, choices=running_bar_plot(dates,moves) #print intervals #print choices choicesT=map(list, zip(*choices)) bot2=[choicesT[0][i]+choicesT[1][i] for i in range(len(choicesT[0]))] #width=100000 width=0.5 plt.bar(intervals, choicesT[0], width, color='b') plt.bar(intervals, choicesT[1], width, color='r', bottom=choicesT[0]) plt.bar(intervals, choicesT[2], width, color='y', bottom=bot2) plt.show()
t = f['t'] # read yml file yml_path = files.find(args[0], 'yml') ps = yaml.load(open(yml_path).read()) plmat = ps['plot_matrix'] if 'plot_matrix' in ps else None if save: files.delete_images() # show/save a graph for every connection # or a single graph if plot_matrix is specified if plmat: conns = {} for n in nodes: conns[n.name] = {} for c in connections: conns[c.origin_node.name][c.dest_node.name] = c pl.plot_matrix(plmat, conns, t) if save: pl.savefig(files.image_path()) else: for c in connections: pl.plot_connection(c, t) if save: pl.savefig(files.image_path(c)) if not save: pl.show()
def plot_error(error_array): plotting.plot(error_array, None, "Error plane for nu", "nu_e coded", "nu_i coded", "nu_error_space.png") plotting.show(error_array, "nu_error_image_test_300.png")
import preprocess import plotting import matplotlib.pyplot as plt import pickle import os import sys import database model_pat = os.path.dirname(os.path.realpath(__file__)) + "/model.sav" model = pickle.load(open(model_pat, "rb")) env = preprocess.Preprocess("test_image/car4.jpg") env.plate_detection() segmented_characters = env.character_segmentation() plotting.show() segmented_characters.sort() ans = [] for char in segmented_characters: #print(plt.imshow(char[1])) ans.append(model.predict(char[1].reshape(1, -1))) license_plate = [] for val in ans: license_plate.append(val[0]) for idx in range(len(license_plate)): if (idx == 0 or idx == 1 or idx == 4 or idx == 5): if (license_plate[idx] == '0'): license_plate[idx] = str('O') elif (license_plate[idx] == '1'):
with gzip.open(filename, 'rb') as f: train, valid, test = pickle.load(f) if 0: # make each pixel zero mean and unit std for images, labels in [train, valid, test]: images -= images.mean(axis=0, keepdims=True) images /= np.maximum(images.std(axis=0, keepdims=True), 1e-3) if 1: plt.figure(1) plt.clf() # print train[0][10] plotting.show(train[0][10].reshape(28, 28)) # --- train images, labels = train n_epochs = 10 n_vis = images.shape[1] n_hid = 500 batch_size = 100 batches = images.reshape( images.shape[0] / batch_size, batch_size, images.shape[1]) rbm = RBM(n_vis, n_hid) persistent = theano.shared(np.zeros((batch_size, n_hid), dtype=rbm.dtype), name='persistent')
def plots(args): T = read_reports(args.dir) outdir = OutputDir(args.outputs, log=True) filename = lambda f: args.prefix + '-' + f if args.prefix is not None else f T = T[T['crit'] == args.criterion] T_init_tests = { n: T[T['init_tests'] == n] for n in np.unique(T['init_tests']) } for init_tests in T_init_tests: generated_tests = sum(run['report']['#tests'][-1] - init_tests for run in T_init_tests[init_tests]) n_runs = len(T_init_tests[init_tests]) print(f'{generated_tests} tests generated for |X_0|={init_tests}' '(average = {} test{}/run).'.format(*s_(generated_tests * 1. / n_runs))) def tech_style(tech): return dict(color='blue' if tech == 'pca' else 'red') # Progress/ICA def plot_progress(tech, T): P_tech = T[T['tech'] == tech]['progress'] P_tech = [P for P in P_tech if len(P.shape) > 0] nDists = np.concatenate([P['new_dist'].astype(float) for P in P_tech]) oDists = np.concatenate([P['old_dist'].astype(float) for P in P_tech]) dDists = oDists - nDists fig, ax = plotting.subplots(1, 2, figsize_adjust=(1.0, 0.5), constrained_layout=True) ax[0].hist(nDists, bins=args.hist_bins, **tech_style(tech)) ax[0].axvline(x=0, lw=1, color='black') ax[0].set_ylabel(r'\#steps where new distance is $d$') ax[0].set_xlabel(r'Distance ($d$ — ' + tech + r')') ax[1].hist(dDists, bins=args.hist_bins, **tech_style(tech)) ax[1].axvline(x=0, lw=1, color='black') ax[1].set_ylabel(r'\#steps where progress is $\delta$') ax[1].set_xlabel(r'Progress ($\delta$ — ' + tech + r')') plotting.show(fig, outdir=outdir, basefilename=filename(tech + '-dist-n-progress'), w_pad=0.06) if not args.no_pca_progress: plot_progress('pca', T) # Progress/ICA if not args.no_ica_progress: plot_progress('ica', T) # Summary if not args.no_summary: def plot_style(report): return tech_style(report['tech']) def it_(ax): return ax if len(T_init_tests) > 1 else [ax] Nms = args.dnn_name # r'\mathcal{N}_{\mathsf{ms}}' cov_label_ = lambda d, n, x: r'\mathrm{' + d + r'}(\mathcal{B}_{' + n + r', ' + x + '})' cov_label = lambda n, x: \ cov_label_ ('BFCov', n, x) if args.criterion == 'bfc' else \ cov_label_ ('BFdCov', n, x) fig, ax = plotting.subplots(3, len(T_init_tests), sharex='col', sharey='row', constrained_layout=True) for axi in it_(ax[-1]): # unshare x axes for the bottom row: g = axi.get_shared_x_axes() g.remove(axi) for a in g.get_siblings(axi): g.remove(a) for init_tests, axi in zip(T_init_tests, it_(ax[0])): for run in T_init_tests[init_tests]: axi.plot(run['report']['#tests'] - init_tests, **plot_style(run)) from matplotlib.ticker import StrMethodFormatter for init_tests, axi in zip(T_init_tests, it_(ax[1])): for run in T_init_tests[init_tests]: if len(run['report']) == 0: continue axi.plot( run['report']['coverage'] - run['report']['coverage'][0], **plot_style(run)) axi.yaxis.set_major_formatter(StrMethodFormatter('{x:2.1f}')) axi.yaxis.set_ticks( np.arange(0, np.amax(axi.get_yticks()), step=0.1)) for init_tests, axi in zip(T_init_tests, it_(ax[2])): init_covs = [ run['report']['coverage'][0] for run in T_init_tests[init_tests] if len(run['report']) > 0 ] final_covs = [ run['report']['coverage'][-1] for run in T_init_tests[init_tests] if len(run['report']) > 0 ] bp = axi.boxplot( [init_covs, final_covs], positions=[0, 20], widths=6, # labels = [r'initial ($i=0$)', 'final'], flierprops=dict(marker='.', markersize=1), bootstrap=1000, manage_ticks=False) axi.yaxis.set_major_formatter(StrMethodFormatter('{x:2.1f}')) for box in bp['boxes']: box.set(linewidth=.5) for box in bp['caps']: box.set(linewidth=.5) plt.setp(axi.get_xticklabels(), visible=False) for init_tests, axi in zip(T_init_tests, it_(ax[1])): axi.xaxis.set_tick_params(which='both', labelbottom=True) # Set labels and column titles: for init_tests, axi in zip(T_init_tests, it_(ax[0])): axi.set_title(f'$|X_0| = {init_tests}$') for axi in it_(ax[-1]): axi.set_xlabel(r'iteration ($i$)') it_(ax[0])[0].set_ylabel(r'$|X_i| - |X_0|$') it_(ax[1])[0].set_ylabel(r'$' + cov_label(Nms, r'X_i') + '-' + cov_label(Nms, r'X_0') + '$') it_(ax[2])[0].set_ylabel(r'$' + cov_label(Nms, r'X_i') + '$') # it_(ax[-1])[(len (T_init_tests) - 1) // 2 + 1].set_xlabel (r'iteration ($i$)') plotting.show(fig, basefilename=filename('summary-per-X0'), outdir=outdir, rect=(.01, 0, 1, 1))
def main(argv): # defaults window_length = 50 overlap = window_length / 2 featdim = 10 #data_115818,sgmdata_115818 = load_dataset(window_length,overlap) training_data, training_sgmdata = load_dataset(window_length, overlap) training_featdata, header = build_dataset_features(training_sgmdata) cl.rnn_test(training_featdata) return data_120250, sgmdata_120250 = load_dataset( window_length, overlap, median_filter=True, alldatafile= '../../acquisizione20062014/acquisizione_20062014/Data_120250.txt') # questi dati son completamente diversi dagli altri tre # data_120611,sgmdata_120611 = load_dataset(window_length,overlap,median_filter=True,alldatafile='../../acquisizione20062014/acquisizione_20062014/Data_120611.txt') """ data_120922,sgmdata_120922 = load_dataset(window_length,overlap,median_filter=True,alldatafile='../../acquisizione20062014/acquisizione_20062014/Data_120922.txt') all_data = [(data_115818,"115818"),(data_120250,"120250"),(data_120611,"120611"),(data_120922,"120922")] sgm_data = [sgmdata_115818,sgmdata_120250,sgmdata_120611,sgmdata_120922] cols = ['b','r','g','m'] for (data,title),c in zip(all_data,cols): print "Acquisizione", title plt.plot_in_subplots(data,0,1,c) return """ return training_data, training_sgmdata = load_dataset(window_length, overlap) training_featdata, header = build_dataset_features(training_sgmdata) training_targets = fm.assign_target(training_featdata) """ data1,sgmdata1 = load_dataset(window_length,overlap,alldatafile='/home/ilaria/Scrivania/marsupio/acquisizione20062014/acquisizione_20062014/Data_120250.txt') featdata1,_ = build_dataset_features(sgmdata1) targets1 = fm.assign_target(featdata1) """ #write_feature_data_to_file(featdata,header) #print featdata[0,idxs] #plt.plot_in_subplots(featdata,idxs) #plt.plot_all(featdata1[:,idxs]) #X_r=preprocessing.scale(featdata) #pca = PCA(n_components=featdim) #kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=0.1) #X_r = kpca.fit_transform(X_r) #X_r = pca.fit(X_r).transform(X_r) X_r = training_featdata targets = training_targets pca = PCA(n_components=2) X_r = preprocessing.scale(X_r) X_r = pca.fit(X_r).transform(X_r) kmeans = KMeans(n_clusters=10) kmeans.fit(X_r) plt.plot_clustering_and_targets(X_r, kmeans, 0, 1, targets) return pars = [{ 'clf__kernel': ['rbf'], 'clf__gamma': [1e-3, 1e-5, 1e-2, 1e-1, 1e-4], 'clf__C': [0.001, 0.01, 0.1, 1, 10, 100], 'pca__n_components': [5, 10, 20, 50, 80] }, { 'clf__kernel': ['linear'], 'clf__C': [0.001, 0.01, 0.1, 0.5, 1, 10, 100], 'pca__n_components': [5, 10, 20, 50, 80] }] #evaluation set cl.cross_model_selection(X_r, targets, pars, save=True) c = cl.load_model('model.pkl') print c return #print X_train.shape, X_test.shape clf = svm.SVC(kernel='rbf', gamma=0.7, C=0.8) pca = PCA(n_components=featdim) pca_svm = Pipeline([ ('pca', pca), ('svm', clf), ]) scores = cross_validation.cross_val_score(clf, X_r, targets, cv=5, scoring='acc') print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2)) #pca_svm.fit(X_train, y_train) #print pca_svm.score(X_test,y_test) return #X_r = pca.fit(sint).transform(sint) #X_r = preprocessing pca = PCA(n_components=featdim) #kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=0.1) #X_r = kpca.fit_transform(X_r) X_r = pca.fit(X_r).transform(X_r) ncluster = 10 """ from sklearn.cluster import DBSCAN dbscan = DBSCAN() plt.plot_DBSCAN_clustering_result(X_r,dbscan,0,1) return """ #X_r = preprocessing.scale(X_r) kmeans = KMeans(n_clusters=ncluster) #print X_r kmeans.fit(X_r) plt.plot_clustering_and_targets(X_r, kmeans, 0, 1, target) return """ test = open('./test.csv','w') for dt in sint: for ft in dt: test.write(str(ft)+',') test.write('\n') """ #colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk']) #colors = np.hstack([colors] * 20) featdim = 10 Y = randomtargets(sint) clf = svm.SVC(kernel='rbf', gamma=0.7) pca = PCA(n_components=featdim) pca_svm = Pipeline([ ('pca', pca), ('svm', clf), ]) pca_svm.fit(sint, Y) X_r = pca.fit(sint).transform(sint) cX_r = pca.fit(sint).transform(cint) #th1 = [l[1] for l in sint] #accx1 = [l[2] for l in sint] #print(th1) #plt.scatter(th1, accx1, 50,c=Y) #plt.show() features = [] for i in range(0, featdim): features.append([l[i] for l in cX_r]) Yp = [int(i) for i in pca_svm.predict(cint)] print Yp s = 411 for f in features[1:5]: # plt.subplot(s) # plt.scatter(features[0], f, 50,c=Yp) i += 1 s += 1 #plt.show() s = 511 for f in features[5:10]: # plt.subplot(s) # plt.scatter(features[0], f, color=colors[Yp].tolist()) i += 1 s += 1 #plt.show() print clf.support_vectors_ # plt.scatter(clf.support_vectors_,range(0,3), color=colors[range(0,3)].tolist()) # create a mesh to plot in sint = np.array(sint) Y = (np.array(Y)) x_min, x_max = sint[:, 2].min() - 1, sint[:, 2].max() + 1 y_min, y_max = Y.min() - 1, Y.max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, .02), np.arange(y_min, y_max, .02)) #print len(Y), yy.shape #Z = Y.reshape(yy.shape) pl.contourf(xx, yy, Y, cmap=pl.cm.Paired) pl.axis('off') # Plot also the training points pl.scatter(X[:, 1], X[:, 2], c=Y, cmap=pl.cm.Paired) pl.show() return #intervalslist=scale(intervalslist) #print intervalslist featdim = 5 ncluster = 8 clusters = range(1, ncluster + 1) pca = PCA(n_components=featdim) X_r = pca.fit(intervalslist).transform(intervalslist) features = [] for i in range(0, featdim): features.append([l[i] for l in X_r]) #return kmeans = KMeans() #print X_r pca_clustering = Pipeline([('pca', pca), ('minmaxnorm', preprocessing.Normalizer()), ('kmeans', kmeans)]) clustering = Pipeline([('kmeans', kmeans)]) print pca_clustering.fit(intervalslist) #return pca_clusters = pca_clustering.predict(intervalslist) clustering.fit(intervalslist) nopca_clusters = clustering.predict(intervalslist) clustered = [] i = 0 s = 411 for f in features[1:]: plt.subplot(s) plt.scatter(features[0], f, color=colors[pca_clusters].tolist()) i += 1 s += 1 plt.show() """
[grain_groups, event_list, event_groups, features] = grp.group_events(source_audio, params) if params.debug > 0: stats.num_events = len(event_list) if params.mode == 'loop': streams = gen.group_loop(sample_rate, params, grain_groups, features, stats) elif params.mode == 'block': streams = gen.block_generator(sample_rate, params, grain_groups, features, stats) print "Mixing down.." output_audio = au.post_process(streams, params) au.write_audio(params.outfile, sample_rate, output_audio) if params.debug > 0: print "Run stats:" print " Number of events: %d" % stats.num_events print " Number of grains: %d" % stats.num_grains print " Number of effect convolutions: %d" % stats.convolutions print " Number of filter uses: %d" % stats.filterings if params.debug > 1: import plotting as pl print "Plotting.." pl.plot_features(event_groups, features, params.num_groups) pl.plot_source_audio(source_audio, sample_rate, event_list, event_groups) pl.plot_generated_audio(output_audio, sample_rate) pl.show()