def get_snre( window_types, snre_record, reg, filename_creator ): rec = pylab.csv2rec( filename_creator('blackman')) strains = list( set( rec['strain_percent'] ) ) strains.sort() index_is_reg = rec['regularization'] == reg mean_snre = np.ones( (len( window_types ), len( strains ))) * 10**-5 snre_error = np.zeros( (len( window_types ), len( strains ))) for ri, rad in enumerate( window_types ): rec = pylab.csv2rec( filename_creator(rad)) strains = list( set( rec['strain_percent'] ) ) strains.sort() index_is_reg = rec['regularization'] == reg for si, strain in enumerate( strains ): index_is_strain = rec['strain_percent'] == strain strain_indices = np.arange( len( rec ) )[ np.nonzero( np.logical_and( index_is_strain, index_is_reg ))] if snre_record.find( 'lateral' ) == -1: strains[si] = strain else: strains[si] = strain / 2.0 trials = rec[snre_record][strain_indices] if len( trials ) > 0: mean_snre[ri, si] = np.mean( trials ) snre_error[ri,si] = 2*np.std( trials )/np.sqrt( len( trials )) return strains, mean_snre, snre_error
def read_listfile(filename): """ Reads in data sample list stored as a CSV file with optional meta-data. Parameters: ----------- filename: string Input filename. Returns: -------- meta: dict Meta-data from the header. data: recarray Data from the CSV table. """ if filename.endswith('.gz'): open_fn = gzip.GzipFile else: open_fn = open with open_fn(filename, 'r') as f: meta = [ line[1:] for line in f.readlines() if line.startswith('#') ] meta = yaml.load(''.join(meta)) data = csv2rec(filename, delimiter='\t', comments='#') assert 'id' in data.dtype.names assert len(data) == len(np.unique(data['id'])) return meta, data
def draw3d(filename): imSimCRec = pylab.csv2rec(filename) imSimCArray = np.array(imSimCRec.tolist()) fig1 = plt.figure() ax1 = fig1.gca(projection='3d') X = np.arange(20, 170, 1) Y = np.arange(20, 170, 1) X, Y = np.meshgrid(X, Y) ax1.plot_surface(X, Y, imSimCArray, cmap=cm.coolwarm)# rstride=8, cstride=8, alpha=0.3) ax1.set_xlabel('X') ax1.set_xlim(20, 170) ax1.set_ylabel('Y') ax1.set_ylim(20, 170)
markeredgewidth=1.0, markevery=1, ms=9.0, alpha=0.5, ) plt.xlabel("Strain Percent Magnitude") plt.xlim(0.0, 8.0) plt.ylabel(ylabel) # plt.title( str( variable_name )) plt.legend(loc="best") if logplot: plt.gca().set_yscale("log") plt.ylim((10 ** -2, 10 ** 2)) r = pylab.csv2rec(sys.argv[1]) variable_name = r.dtype.names[0] # I don't think this is actually necessary the way I ended up doing it. sorted_indices = np.lexsort((r["strain_percent"], r[variable_name], r["regularization"])) regs = set(r["regularization"]) for reg in regs: print(reg) index_is_reg = r["regularization"][sorted_indices] == reg plot_results = PlotResults(r, sorted_indices) print("axial strain SNRe") plot_results.plot_snre("axial_strain_snre", "Axial $SNR_e$ [dB]", index_is_reg) print("lateral strain SNRe") plot_results.plot_snre("lateral_strain_snre", "lateral $SNR_e$ " + str(reg), index_is_reg)
# coding: utf-8 # In[19]: import numpy as np import matplotlib.pylab as plt import matplotlib.dates as mdates # import the csv to a numpy array datas = plt.csv2rec("programmingLanguageComparaison.csv") # Set the figure size big to see it clearly plt.figure(figsize=(20, 10)) dates = datas["week"] # Init the trendsNames for the legend trendsNames = ["python", "javascript", "C++", "SQL"] # Init the trends datas to plot them trendsDatas = [datas["python"], datas["javascript"], datas["c"], datas["sql"]] # Init the colors trendsColors = ["#71D4FF", "white", "#FCCF4D", "#FC5757"] # print(datas["sql"]) # For each trends : [trendName, maxValue, maxValueDate, minValue, minValueDate] peaks = [] for i in range(len(trendsDatas)): # create a temporary list
def is_touching(coarse_segmentation, dirname, ids): d = os.path.join(coarse_segmentation, dirname) objects = csv2rec(os.path.join(d, 'objects.csv'), delimiter=';') components = csv2rec(os.path.join(d, 'components.csv'), delimiter=';') return [ components[components['bbox'] == objects[objects['obj'] == int(metasys_id)][0]['bbox']]['cut'].any() for metasys_id in ids ]
for metasys_id in ids ] def is_touching(coarse_segmentation, dirname, ids): d = os.path.join(coarse_segmentation, dirname) objects = csv2rec(os.path.join(d, 'objects.csv'), delimiter=';') components = csv2rec(os.path.join(d, 'components.csv'), delimiter=';') return [ components[components['bbox'] == objects[objects['obj'] == int(metasys_id)][0]['bbox']]['cut'].any() for metasys_id in ids ] fine_segmentation_dir = '/groups/stark/projects/vt/Segmentation/Fine/MetaSys/' coarse_segmentation_dir = '/groups/stark/projects/vt/Segmentation/Coarse/MetaSys/' if __name__ == '__main__': import sys filename = sys.argv[1] data = csv2rec(filename, delimiter=',') dirname = data['dirname'][0] ids = data['metasys_id'] blurred = is_blurred(fine_segmentation_dir, dirname, ids) touching = is_touching(coarse_segmentation_dir, dirname, ids) import pandas df = pandas.DataFrame(data) df['blurred'] = blurred df['touching'] = touching df['bad_outline'] = df['blurred'] + df['touching'] outname = os.path.splitext(filename) outname = outname[0] + '-wb.csv' rec2csv(df.to_records(), outname, delimiter=',')
#plt.title( str( variable_name )) plt.legend( loc='best' ) if self.logplot: plt.gca().set_yscale( 'log' ) plt.ylim( (10**-1.2, 10**1.5 ) ) plt.xlim( (0.0, s[-1]+0.5) ) #plt.ylim( (0.0, 6.0 ) ) plot_results = PlotResults() curves = [ ('noInterp_sim', 'No Interpolation'), ('cosine_sim', 'Cosine'), ('parabolic_sim', 'Parabolic'), ('sinc_sim', 'Sinc - Amoeba'), ('gradientDescent_sim', 'Sinc - Gradient Descent') ] for curve, curve_name in curves: r = pylab.csv2rec( 'SNRe_' + curve + '.csv' ) plot_results.plot_snre_curves( curve, curve_name, r ) plt.figure( 1 ) plt.savefig( '../../../../doc/images/interp_method_simulation_regularization_axial.png' ) plt.savefig( '../../../../doc/images/interp_method_simulation_regularization_axial.eps' ) plt.figure( 2 ) plt.savefig( '../../../../doc/images/interp_method_simulation_regularization_lateral.png' ) plt.savefig( '../../../../doc/images/interp_method_simulation_regularization_lateral.eps' ) plt.figure( 3 ) plt.savefig( '../../../../doc/images/interp_method_simulation_no_regularization_axial.png' ) plt.savefig( '../../../../doc/images/interp_method_simulation_no_regularization_axial.eps' ) plt.figure( 4 ) plt.savefig( '../../../../doc/images/interp_method_simulation_no_regularization_lateral.png' ) plt.savefig( '../../../../doc/images/interp_method_simulation_no_regularization_lateral.eps' )
#! /usr/bin/env python import sys, argparse, datetime import storage, cvutils, utils import matplotlib.pylab as pylab import matplotlib.pyplot as plt import numpy as np annotations = pylab.csv2rec(sys.argv[1]) frameRate = 30 dirname = "/home/nicolas/Research/Data/montreal/infractions-pietons/" videoDirnames = {'amherst': '2011-06-22-sherbrooke-amherst/', 'iberville': '2011-06-28-sherbrooke-iberville/'} # for amherst, subtract 40 seconds: add a delta for annotation in annotations: video = annotation['video_name'].lower() print('{} {}'.format(annotation['conflict_start_time'], annotation['conflict_end_time'])) print(annotation['road_user_1']+' '+annotation['road_user_2']+' '+annotation['conflict_quality']) print(annotation['comments']) cvutils.playVideo(dirname+videoDirnames[video]+video+'-{}.avi'.format(annotation['video_start_time']), utils.timeToFrames(annotation['conflict_start_time']+datetime.timedelta(seconds=-40), frameRate), frameRate, True, False, annotation['road_user_1']+' '+annotation['road_user_2']+' '+annotation['conflict_quality'])
labels = truth_labels del truth_labels[-1] dtype += [(opts.truth, int)] rescan_map = {'rescan': 1, 'rescan2': 2, 'rescan3': 3} dst = np.array([], dtype=dtype) t = -1 dirnames = [] csvnames = sorted(glob.glob(csv_pattern)) if len(csvnames) == 0: raise RuntimeError('No input files matching the pattern.') for csvname in csvnames: label = os.path.splitext(os.path.basename(csvname))[0] if label.find('#') != -1: continue t = max(t, os.path.getmtime(csvname)) src = csv2rec(csvname, delimiter=',', converterd=converterd) for sample in src: sample_dirname = sample['dirname'].split('/') sample_date = sample_dirname[-2].split('-') if len(sample_date) > 1: sample_date = int( sample_date[0]) * 10 + rescan_map[sample_date[1]] else: sample_date = int(sample_date[0]) * 10 sample_vt = sample_dirname[-1].split('~') if len(sample_vt) > 1: sample_suffix = ord(sample_vt[1]) - ord('A') + 1 sample_vt = int(sample_vt[0]) else: sample_suffix = 0 sample_vt = int(sample_vt[0])
#! /usr/bin/env python import sys, argparse, datetime import storage, cvutils, utils import matplotlib.pylab as pylab import matplotlib.pyplot as plt import numpy as np annotations = pylab.csv2rec(sys.argv[1]) frameRate = 30 dirname = "/home/nicolas/Research/Data/montreal/infractions-pietons/" videoDirnames = { 'amherst': '2011-06-22-sherbrooke-amherst/', 'iberville': '2011-06-28-sherbrooke-iberville/' } # for amherst, subtract 40 seconds: add a delta for annotation in annotations: video = annotation['video_name'].lower() print('{} {}'.format(annotation['conflict_start_time'], annotation['conflict_end_time'])) print(annotation['road_user_1'] + ' ' + annotation['road_user_2'] + ' ' + annotation['conflict_quality']) print(annotation['comments']) cvutils.playVideo( dirname + videoDirnames[video] + video + '-{}.avi'.format(annotation['video_start_time']),
labels = truth_labels del truth_labels[-1] dtype += [(opts.truth, int)] rescan_map = { 'rescan': 1, 'rescan2': 2, 'rescan3': 3 } dst = np.array([], dtype=dtype) t = -1 dirnames = [] csvnames = sorted(glob.glob(csv_pattern)) if len(csvnames) == 0: raise RuntimeError('No input files matching the pattern.') for csvname in csvnames: label = os.path.splitext(os.path.basename(csvname))[0] if label.find('#') != -1: continue t = max(t, os.path.getmtime(csvname)) src = csv2rec(csvname, delimiter=',', converterd=converterd) for sample in src: sample_dirname = sample['dirname'].split('/') sample_date = sample_dirname[-2].split('-') if len(sample_date) > 1: sample_date = int(sample_date[0]) * 10 + rescan_map[sample_date[1]] else: sample_date = int(sample_date[0]) * 10 sample_vt = sample_dirname[-1].split('~') if len(sample_vt) > 1: sample_suffix = ord(sample_vt[1]) - ord('A') + 1 sample_vt = int(sample_vt[0]) else: sample_suffix = 0 sample_vt = int(sample_vt[0]) sample_id = int(sample['metasys_id']) + 10000 * (
def draw2d(filename): imSimHRec = pylab.csv2rec(filename) imSimHArray = np.array(imSimHRec.tolist()) fig2 = plt.figure() im2 = plt.imshow(imSimHArray, interpolation="none", )