def like2d(datx,daty,weights=None, nbins=15, which=[.68,.95], range=None, filled=True, color=None, cmap=None, smooth=None, ax=None, **kwargs): from matplotlib.pyplot import gca, get_cmap from matplotlib.mlab import movavg from matplotlib.colors import LinearSegmentedColormap from scipy.ndimage import gaussian_filter if ax is None: ax = gca() if weights is None: weights=ones(len(datx)) if color is None: color = kwargs.pop('c') if 'c' in kwargs else 'b' H,xe,ye = histogram2d(datx,daty,nbins,weights=weights, range=range) xem, yem = movavg(xe,2), movavg(ye,2) kwargs = dict(levels=confint2d(H, sorted(which)[::-1]+[0]),**kwargs) if smooth: H = gaussian_filter(H,smooth) args = (xem,yem,transpose(H)) if cmap is None: cmap = {'b':'Blues', 'g':'Greens', 'r':'Reds', 'orange':'Oranges', 'grey':'Greys'}.get(color) if cmap is None: cmap = LinearSegmentedColormap.from_list(None,['w',color]) else: cmap = get_cmap(cmap) if filled: ax.contourf(*args,cmap=cmap,**kwargs) ax.contour(*args,colors=color,**kwargs)
def filter_buffer(cls, raw_buffer, data_interval, bema_wing = 6, gmt_offset = 0): ''' Return bema filtered version of the buffer, with optional time_zone_offset. bema filter uses the minimal found value to represent the data points within a range (bema_window) bema_wing = 6 => window = 13 (bema_wing + evaluating point + bema_wing) ''' length = len(raw_buffer) # Extend 2 wings to the raw data buffer before taking min and average dstack = numpy.hstack((raw_buffer[length-bema_wing:length], raw_buffer[0:length], raw_buffer[0:bema_wing])) # Fill the 2 wings with the values at the edge dstack[0:bema_wing] = raw_buffer[0] # dstack[bema_wing] dstack[length+bema_wing:length+bema_wing*2] = raw_buffer[-1] # dstack[length+bema_wing-1] # Use the lowest point found in window to represent its value dmin = numpy.zeros(len(dstack)) for i in range(bema_wing, length+bema_wing): dmin[i] = min(dstack[i-bema_wing:i+bema_wing]) # The points beyond the left edge, set to the starting point value dmin[0:bema_wing] = dmin[bema_wing] # The points beyond the right edge, set to the ending point value dmin[length+bema_wing:length+bema_wing*2] = dmin[length+bema_wing-1] # Moving Average. This actually truncates array to original size daverage = movavg(dmin, (bema_wing*2+1)) if gmt_offset == 0: return daverage else: gmt_mark = gmt_offset * (60/data_interval) * 60 doffset = numpy.hstack((daverage[gmt_mark:length],daverage[0:gmt_mark])) return doffset
def eliminateJumps(eps,sigma,numSteep=10,gapWidth=5,movWd=40): from matplotlib.mlab import movavg from numpy import diff,abs import numpy # get histogram of 'derivatives' ds=abs(diff(sigma)) n,bins=numpy.histogram(ds) i=1; sum=0 # numSteep steepest pieces will be discarded while sum<numSteep: #print n[-i],bins[-i] sum+=n[-i]; i+=1 #print n[-i],bins[-i] threshold=bins[-i] # old algo: replace with nan's ##rEps,rSigma=eps[:],sigma[:]; nan=float('nan') ##indices=where(ds>threshold)[0] ##for i in indices: ## for ii in range(max(0,i-gapWidth),min(len(rEps),i+gapWidth+1)): rEps[ii]=rSigma[ii]=nan ## doesn't work with older numpy (?) # indices1=where(ds>threshold)[0] indices1=[] for i in range(len(ds)): if ds[i]>threshold: indices1.append(i) indices=[] for i in indices1: for ii in range(i-gapWidth,i+gapWidth+1): indices.append(ii) #print indices1, indices rEps=[eps[i] for i in range(0,len(eps)) if i not in indices] rSigma=[sigma[i] for i in range(0,len(sigma)) if i not in indices] # apply moving average to the result rSigma=movavg(rSigma,movWd) rEps=rEps[movWd/2:-movWd/2+1] return rEps,rSigma.flatten().tolist()
def feature(self, name='range'): [datesA, wattsA] = self.building.dailyData dayMeans = self.building.dailyStats['mean'] / 1000 dayMaxes = self.building.dailyStats['max'] / 1000 dayMins = self.building.dailyStats['min'] / 1000 dayRatio = self.building.dailyStats['mxmn'] dayRange = dayMaxes - dayMins plots = [ (dayMaxes, 'max', 'daily max', 'kW', '', 0), (dayMins, 'min', 'daily min', 'kW', '', 0), (dayMeans, 'avg', 'daily average', 'kW', '', 0), (dayRatio, 'max / min ratio', 'max / min', 'ratio', '', 1), (dayRange, 'range', 'range: max-min', 'kW', '%m-%y', 0), ] n = len(plots) window = 7 fig = Figure(figsize=(3, 6), facecolor='white', edgecolor='none') for i, attr in enumerate(plots): mn = attr[0].mean() ax = fig.add_subplot(n, 1, i + 1) ax.plot(datesA[(window - 1):, 0], mlab.movavg(attr[0], window), '-', color='#000000', alpha=1, label=attr[1]) ax.plot(ax.get_xlim(), [mn, mn], '--', color='b') ax.text( .5, 0.85, attr[2], weight='bold', # set the title inside the plot horizontalalignment='center', fontsize=10, transform=ax.transAxes) # makes the location 0-1 for both axes ax.text( .5, 0.07, 'avg=%0.2f' % mn, # print the mean horizontalalignment='center', color='b', fontsize=10, transform=ax.transAxes) # makes the location 0-1 for both axes ax.set_ylabel(attr[3], fontsize=9) ax.yaxis.set_major_formatter(mplt.FormatStrFormatter('%0.2f')) ax.xaxis.set_major_locator(mpld.MonthLocator(interval=1)) ax.xaxis.set_major_formatter(mpld.DateFormatter(attr[4])) ax.xaxis.grid(True) ax.set_ylim(bottom=attr[5], top=ax.get_ylim()[1] * 1.2) # anchor max/min at 1:1 for label in ax.xaxis.get_majorticklabels( ): # move the labels into the day range label.set_fontsize(8) label.set_rotation(70) for label in ax.yaxis.get_majorticklabels( ): # move the labels into the day range label.set_fontsize(8) fig.subplots_adjust(left=0.2) return fig
def dateplot_data(axis, dataset, linestyle='', marker='AUTO', movavg=0, movavg_style='-'): mymarker = marker if not isinstance(dataset,list): dataset = [dataset] for d in dataset: if marker == 'AUTO': mymarker = next(auto_markers) if linestyle not in ['', ' ', 'bar']: # if data-points are connected, need to sort by date # (otherwise, line can go left and right) d.sort_by_date() if linestyle == 'bar': # 1 width = 1 days barcolor=next(colors) axis.bar( d.dates(), d.values(), width=0.005, label=d.name(), color=barcolor, edgecolor=barcolor) axis.xaxis_date() else: axis.plot_date( d.dates(), d.values(), label=d.name(), linestyle=linestyle, marker=mymarker, color=next(colors)) if movavg != 0 and len(d.values()) > 2: if movavg >= 1: # just a number of data points weight = int(movavg) elif movavg > 0: # ratio; 0.05 means 5% weight = int(len( d.values() ) * movavg) else: # if it's negative, use 1/weight of data points: -20 means 1/20 = 5% weight = int(len( d.values() ) / abs(movavg)) if weight < 2: weight = 2 d.sort_by_date() ma = mlab.movavg( d.values(), weight ) label = "%s (moving avg for %d datapoints)" % (d.name(),weight) axis.plot_date( d.dates()[weight-1:], ma, label=label, linestyle=movavg_style, marker=' ', color=next(colors))
def retrieve_error(self): err_Martin = [] movavg_taps = 1000 for i_mode in range(self.nmodes): err_Martin.append(mlab.movavg(abs(self.error[i_mode]), movavg_taps)) return err_Martin
def like2d(datx, daty, weights=None, nbins=15, which=[.68, .95], range=None, filled=True, color=None, cmap=None, smooth=None, ax=None, **kwargs): from matplotlib.pyplot import gca, get_cmap from matplotlib.mlab import movavg from matplotlib.colors import LinearSegmentedColormap from scipy.ndimage import gaussian_filter if ax is None: ax = gca() if weights is None: weights = ones(len(datx)) if color is None: color = kwargs.pop('c') if 'c' in kwargs else 'b' H, xe, ye = histogram2d(datx, daty, nbins, weights=weights, range=range) xem, yem = movavg(xe, 2), movavg(ye, 2) kwargs = dict(levels=confint2d(H, sorted(which)[::-1] + [0]), **kwargs) if smooth: H = gaussian_filter(H, smooth) args = (xem, yem, transpose(H)) if cmap is None: cmap = { 'b': 'Blues', 'g': 'Greens', 'r': 'Reds', 'orange': 'Oranges', 'grey': 'Greys' }.get(color) if cmap is None: cmap = LinearSegmentedColormap.from_list(None, ['w', color]) else: cmap = get_cmap(cmap) if filled: ax.contourf(*args, cmap=cmap, **kwargs) ax.contour(*args, colors=color, **kwargs)
def like1dandy(dat,weights=None, nbins=30,range=None,maxed=True, ax=None, **kw): from matplotlib.pyplot import gca from matplotlib.mlab import movavg if ax is None: ax = gca() if weights is None: weights=ones(len(dat)) H, xe = histogram(dat,bins=nbins,weights=weights,normed=True,range=range) if maxed: H=H/max(H) xem=movavg(xe,2) ax.plot(xem,H,**kw)
def retrieve_error(self): error = self.errorcalcs[0].error for ecalc in self.errorcalcs: for i_mode in range(error.shape[0]): error[i_mode] = np.maximum(abs(ecalc.error[i_mode]), abs(error[i_mode])) err_Averaged = [] movavg_taps = 1000 for i_mode in range(error.shape[0]): err_Averaged.append(mlab.movavg(error[i_mode], movavg_taps)) return err_Averaged
def like1d(dat, weights=None, nbins=30, range=None, maxed=True, ax=None, **kw): from matplotlib.pyplot import gca from matplotlib.mlab import movavg if ax is None: ax = gca() if weights is None: weights = ones(len(dat)) H, xe = histogram(dat, bins=nbins, weights=weights, normed=True, range=range) if maxed: H = H / max(H) xem = movavg(xe, 2) ax.plot(xem, H, **kw)
def like1d(dat,weights=None, nbins=30,ranges=None,maxed=False, ax=None,smooth=False, kde=True, zero_endpoints=False, filled=False, **kw): from matplotlib.pyplot import gca if ax is None: ax = gca() if kde: try: from getdist import MCSamples except ImportError as e: raise Exception("Plotting with kde, kde1d, or kde2d set to True requires package 'getdist'. Install this package or set to False.") from e if ranges: i = bitwise_and(dat>(ranges[0] or -Inf), dat<(ranges[1] or Inf)) dat = dat[i] weights = weights[i] d = MCSamples(samples=dat, weights=weights, names=['x'], ranges={'x':ranges or (None,None)}, settings={'smooth_scale_1D':(smooth or -1)}).get1DDensity(0) d.normalize('max' if maxed else 'integral') xem, H = d.x, d.P * (maxed or 1) else: from matplotlib.mlab import movavg H, xe = histogram(dat,bins=nbins,weights=weights,normed=True,range=ranges) xem=movavg(xe,2) if smooth: from scipy.interpolate import PchipInterpolator itp = PchipInterpolator(xem,H) xem = linspace(xem.min(),xem.max(),100) H = itp(xem) if maxed: H = H/max(H) * (maxed or 1) if zero_endpoints: xem = hstack([[xem[0]],xem,[xem[-1]]]) H = hstack([[0],H,[0]]) if filled: ax.fill_between(xem,H,alpha=(0.5 if filled is True else filled),**kw) kw.pop('label') ax.plot(xem,H,**kw)
def feature(self,name='range'): [datesA,wattsA] = self.building.dailyData dayMeans = self.building.dailyStats['mean'] / 1000 dayMaxes = self.building.dailyStats['max'] / 1000 dayMins = self.building.dailyStats['min'] / 1000 dayRatio = self.building.dailyStats['mxmn'] dayRange = dayMaxes - dayMins plots = [ (dayMaxes,'max','daily max','kW','',0), (dayMins,'min','daily min','kW','',0), (dayMeans,'avg','daily average','kW','',0), (dayRatio,'max / min ratio','max / min','ratio','',1), (dayRange,'range','range: max-min','kW','%m-%y',0), ] n = len(plots) window = 7 fig = Figure(figsize=(3,6),facecolor='white',edgecolor='none') for i,attr in enumerate(plots): mn = attr[0].mean() ax = fig.add_subplot(n,1,i+1) ax.plot(datesA[(window-1):,0],mlab.movavg(attr[0],window),'-',color='#000000',alpha=1,label=attr[1]) ax.plot(ax.get_xlim(),[mn,mn],'--',color='b') ax.text(.5,0.85,attr[2],weight='bold', # set the title inside the plot horizontalalignment='center', fontsize=10, transform=ax.transAxes) # makes the location 0-1 for both axes ax.text(.5,0.07,'avg=%0.2f' % mn, # print the mean horizontalalignment='center', color='b', fontsize=10, transform=ax.transAxes) # makes the location 0-1 for both axes ax.set_ylabel(attr[3],fontsize=9) ax.yaxis.set_major_formatter(mplt.FormatStrFormatter('%0.2f')) ax.xaxis.set_major_locator(mpld.MonthLocator(interval=1)) ax.xaxis.set_major_formatter(mpld.DateFormatter(attr[4])) ax.xaxis.grid(True) ax.set_ylim(bottom=attr[5],top=ax.get_ylim()[1]*1.2) # anchor max/min at 1:1 for label in ax.xaxis.get_majorticklabels(): # move the labels into the day range label.set_fontsize(8) label.set_rotation(70) for label in ax.yaxis.get_majorticklabels(): # move the labels into the day range label.set_fontsize(8) fig.subplots_adjust(left=0.2) return fig
def get_buffer(self, gmt_offset = 0): ''' Return bema filterred version of the buffer, with optional time_zone_offset. bema filter uses the minimal found value to represent the data points within a range (bema_window) bema_wing = 6 => window = 13 (bema_wing + evaluating point + bema_wing) ''' length = self.buffer_size #len(self.raw_buffer) # Extend 2 wings to the raw data buffer before taking min and average dstack = hstack((self.raw_buffer[length-self.bema_wing:length],\ self.raw_buffer[0:length],\ self.raw_buffer[0:self.bema_wing])) # Fill the 2 wings with the values at the edge for i in range(0, self.bema_wing): dstack[i] = dstack[self.bema_wing] for i in range(length+self.bema_wing, length+self.bema_wing*2): dstack[i] = dstack[length+self.bema_wing-1] dmin = zeros(len(dstack)) # Use the lowest point found in window to represent its value for i in range(self.bema_wing, length+self.bema_wing): dmin[i] = min(dstack[i-self.bema_wing:i+self.bema_wing]) # The points beyond the left edge, set to the starting point value for i in range (0, self.bema_wing): dmin[i] = dmin[self.bema_wing]; # The points beyond the right edge, set to the ending point value for i in range (length+self.bema_wing, length+self.bema_wing*2): dmin[i] = dmin[length+self.bema_wing-1]; # Moving Average. This actually truncates array to original size daverage = movavg(dmin, (self.bema_wing*2+1)) if gmt_offset == 0: return daverage else: gmt_mark = gmt_offset * (60/self.data_interval) * 60 doffset = hstack((daverage[gmt_mark:length],daverage[0:gmt_mark])) return doffset
def getSpksSortedBounded( g, spk_chan, unit_num, sort_by, t_start, t_stop, t_align, pre_offset, post_offset, cutoff=.5, boxcar_width=50, gauss_sigma=25, trial_order='rt', ret_spk_bins=True, limit=None): depth = lambda L: isinstance(L, list) and max(map(depth, L)) + 1 if depth(sort_by) != 1: raise RuntimeError("Depth of sort_by list has to be 1") # Check length of sort_by, function only supports 2 or less sort variables if len(sort_by) > 2: raise RuntimeError("Number of sort variables can only be 1 or 2") # Check that t_start and t_stop are feasible if (g[t_stop] - g[t_start])[0] < 0: raise RuntimeError("t_stop needs to be a timestamp after t_start") # Select a subset of trials based on a user-provided boolean selector if limit is not None: g = selectDataStruct(g, limit) g[t_start] = np.int64(np.round(np.float64(g[t_start]))) g[t_stop] = np.int64(np.round(np.float64(g[t_stop]))) # Get rid of trials that do not "fit" into the time window requested durations = g[t_stop] - g[t_start] keep_boolean = np.repeat(True, len(durations)) for i in range(len(durations)): if (g[t_stop][i] + post_offset) - (g[t_start][i] + pre_offset) <= 0: keep_boolean[i] = False g = selectDataStruct(g, keep_boolean) n_trials = len(g['spk_times']) trial_ids = np.arange(n_trials) if len(sort_by) == 1: insert_dummy = True label1_name = 'dummy variable' label2_name = sort_by[0] sort_by_list = [g[sort_by[0]]] elif len(sort_by) == 2: insert_dummy = False label1_name = sort_by[0] label2_name = sort_by[1] sort_by_list = [g[sort_by[0]], g[sort_by[1]]] if trial_order == 'rt': g['t_trial_order'] = g['t_response'] - g['t_dotson'] elif trial_order == 'duration': g['t_trial_order'] = g['t_response'] - g['t_targetson'] elif trial_order == 'chronological': g['t_trial_order'] = g['t_response'] trialorder_sorted = sortByLists(g['t_trial_order'], sort_by_list) trialids_sorted = sortByLists(trial_ids, sort_by_list) trial_durations = [(g[t_stop][i] + post_offset) - (g[t_start][i] + pre_offset) for i in range(n_trials)] trial_durations_sorted = sortByLists( np.array(trial_durations), sort_by_list) # To ensure that each trial has at least one NaN, we will add 1 msec # to max_trial_duration, as that simplifies the calculations below max_trial_duration = np.int64(np.max(trial_durations)) + 1 # Timestamps if t_align == t_start: timestamps = [g[t_stop][i] - g[t_align][i] + post_offset for i in range(n_trials)] timestamps_sorted = sortByLists( np.array(timestamps), sort_by_list) elif t_align == t_stop: timestamps = [g[t_start][i] - g[t_align][i] + pre_offset for i in range(n_trials)] timestamps_sorted = sortByLists( np.array(timestamps), sort_by_list) # PSTHs will be computed with attrition spk_times_bounded = getSpkTimesBounded( g, spk_chan, unit_num, t_start, t_stop, t_align, pre_offset, post_offset) spk_times_bounded_sorted = sortByLists( np.array(spk_times_bounded), sort_by_list) labels = spk_times_bounded_sorted['labels'] if insert_dummy: labels = np.column_stack([np.repeat(1, labels.shape[0]), labels]) u_labels = np.unique(labels[:, 0]) n_u_labels = len(u_labels) all_sorted_timestamps = list() all_sorted_trialids = list() all_sorted_trial_counts = list() all_sorted_spk_times = list() all_sorted_spk_counts = list() all_sorted_spk_bins = list() all_sorted_times = list() all_sorted_chopped_times = list() all_sorted_frs = list() all_sorted_psths = list() all_sorted_psths_gauss = list() all_sorted_chopped_times_gauss = list() all_sorted_psths_alpha = list() all_sorted_chopped_times_alpha = list() all_sorted_chopped_trial_counts = list() for i in range(n_u_labels): u_sublabels = labels[:, 1][labels[:, 0] == u_labels[i]] for j in range(len(u_sublabels)): n_trials = np.array(trial_durations_sorted['sorted'])[ np.logical_and(labels[:, 0] == u_labels[i], labels[:, 1] == u_sublabels[j])][0].shape[0] if n_trials == 0: all_sorted_trial_counts.append(0) all_sorted_trialids.append([]) all_sorted_timestamps.append([]) all_sorted_spk_times.append([]) all_sorted_spk_counts.append([]) all_sorted_spk_bins.append([]) all_sorted_times.append([]) all_sorted_chopped_times.append([]) all_sorted_chopped_times_gauss.append([]) all_sorted_chopped_times_alpha.append([]) all_sorted_chopped_trial_counts.append([]) all_sorted_frs.append([]) all_sorted_psths.append([]) all_sorted_psths_gauss.append([]) all_sorted_psths_alpha.append([]) continue sel = np.logical_and(labels[:, 0] == u_labels[i], labels[:, 1] == u_sublabels[j]) cur_sorted_trialorder = np.array(trialorder_sorted['sorted'])[sel] cur_sorted_trialids = np.array(trialids_sorted['sorted'])[sel] cur_sorted_trial_durations = \ np.array(trial_durations_sorted['sorted'])[sel] cur_sorted_spk_times = \ np.array(spk_times_bounded_sorted['sorted'])[sel] cur_sorted_timestamps = np.array(timestamps_sorted['sorted'])[sel] cur_sorted_trialids = cur_sorted_trialids[0][ np.argsort(cur_sorted_trialorder[0])] cur_sorted_trial_durations = cur_sorted_trial_durations[0][ np.argsort(cur_sorted_trialorder[0])] cur_sorted_spk_times = cur_sorted_spk_times[0][ np.argsort(cur_sorted_trialorder[0])] cur_sorted_spk_counts = [len(k) for k in cur_sorted_spk_times] cur_sorted_frs = [ cur_sorted_spk_counts[k] / cur_sorted_trial_durations[k] * 1000 for k in range(len(cur_sorted_spk_counts))] cur_sorted_timestamps = cur_sorted_timestamps[0][ np.argsort(cur_sorted_trialorder[0])] # We now create a spk_bin array where the rows are trials, each # column is 1 ms, and 0's and 1's indicate the absence and presence # of spikes. Nan's indicate that the trial does not extend that far # relative to the user-specified align events and offset times. # Working in ms!!! cur_sorted_spk_bin = np.empty([len(cur_sorted_spk_times), max_trial_duration]) cur_sorted_spk_bin[:] = np.nan if t_align == t_start: times = np.int64( np.arange(0, max_trial_duration, 1)) + pre_offset for k in range(len(cur_sorted_spk_times)): cur_sorted_spk_bin[ k, 0:np.int64(cur_sorted_trial_durations[k])] = 0 cur_sorted_spk_bin[ k, np.int64(cur_sorted_spk_times[k] - pre_offset)] = 1 try: median_cutoff = np.where( np.sum(np.isnan(cur_sorted_spk_bin), axis=0) / n_trials > cutoff)[0][0] except IndexError: return 'Goddamn handling of cutoff throwing some weird bug' chopped_spk_bin = cur_sorted_spk_bin[:, 0:median_cutoff] chopped_times = times[0:median_cutoff] chopped_trial_counts = np.sum( ~np.isnan(chopped_spk_bin), axis=0) elif t_align == t_stop: times = np.int64(np.arange( post_offset - max_trial_duration, post_offset)) for k in range(len(cur_sorted_spk_times)): cur_sorted_spk_bin[ k, -np.int64(cur_sorted_trial_durations[k]):] = 0 cur_sorted_spk_bin[ k, np.int64(cur_sorted_spk_times[k] - post_offset)] = 1 try: median_cutoff = np.where( np.sum(np.isnan(cur_sorted_spk_bin), axis=0) / n_trials > cutoff)[0][-1] + 1 except IndexError: print 'Goddamn handling of cutoff throwing some weird bug' chopped_spk_bin = cur_sorted_spk_bin[:, median_cutoff:] chopped_times = times[median_cutoff:] chopped_trial_counts = np.sum( ~np.isnan(chopped_spk_bin), axis=0) # Regular boxcar smoothing smoothed_psth = movavg(sp.stats.nanmean( chopped_spk_bin, axis=0) / .001, boxcar_width) # Gaussian smoothing with gauss_sigma x = np.arange(0, gauss_sigma * 6 + 1) gaussian_exponent = -0.5 * np.power( (x - np.mean(x)) / gauss_sigma, 2) gaussian_filter = np.exp( gaussian_exponent) / np.sum(np.exp(gaussian_exponent)) smoothed_psth_gauss = np.convolve( sp.stats.nanmean(chopped_spk_bin, axis=0) / .001, gaussian_filter, mode='valid') # Alpha-like functions smoothing t = np.arange(250) h = (1 - np.exp(-t)) * np.exp(-t / 25.) h = h / np.sum(h) smoothed_psth_alpha = np.convolve( sp.stats.nanmean(chopped_spk_bin, axis=0) / .001, h, mode='full') smoothed_psth_alpha = smoothed_psth_alpha[ 0:chopped_spk_bin.shape[1]][25:] psth_times_alpha = chopped_times[0:chopped_spk_bin.shape[1]][25:] all_sorted_timestamps.append(cur_sorted_timestamps) all_sorted_trial_counts.append(len(cur_sorted_spk_times)) all_sorted_trialids.append(cur_sorted_trialids) all_sorted_spk_times.append(cur_sorted_spk_times) all_sorted_spk_counts.append(cur_sorted_spk_counts) all_sorted_spk_bins.append(cur_sorted_spk_bin) all_sorted_times.append(times) all_sorted_chopped_times.append( movavg(chopped_times, boxcar_width) + .5) all_sorted_chopped_times_gauss.append(chopped_times[ int(gauss_sigma * 6 / 2):int(-gauss_sigma * 6 / 2)]) all_sorted_chopped_times_alpha.append(psth_times_alpha) all_sorted_chopped_trial_counts.append(chopped_trial_counts) all_sorted_frs.append(cur_sorted_frs) all_sorted_psths.append(smoothed_psth) all_sorted_psths_gauss.append(smoothed_psth_gauss) all_sorted_psths_alpha.append(smoothed_psth_alpha) out = {} out['label1_name'] = label1_name out['label2_name'] = label2_name out['label1'] = labels[:, 0] out['label2'] = labels[:, 1] out['timestamps'] = all_sorted_timestamps out['trial_counts'] = all_sorted_trial_counts out['trial_ids'] = all_sorted_trialids out['spk_times'] = all_sorted_spk_times out['spk_counts'] = all_sorted_spk_counts out['chopped_times'] = all_sorted_chopped_times out['chopped_trial_counts'] = all_sorted_chopped_trial_counts out['frs'] = all_sorted_frs out['psths'] = all_sorted_psths out['gauss_psths'] = all_sorted_psths_gauss out['gauss_chopped_times'] = all_sorted_chopped_times_gauss out['alpha_psths'] = all_sorted_psths_alpha out['alpha_chopped_times'] = all_sorted_chopped_times_alpha if trial_order == 'rt': out['rts_sorted'] = trialorder_sorted if ret_spk_bins: out['spk_bins'] = all_sorted_spk_bins out['times'] = all_sorted_times return out
import matplotlib matplotlib.rc('text',usetex=True) matplotlib.rc('text.latex',preamble=r'\usepackage{euler}') from numpy import * from matplotlib.mlab import movavg sig,eps=array([]),array([]) dta=genfromtxt('tension-test.data') eps,sig=dta[:,0],dta[:,2] eps,sig=concatenate(([0],eps[2:-2])),concatenate(([0],movavg(sig,5))) epsFt,ft=eps[sig==max(sig)][0],max(sig) ft02=.2*ft sig2=array([(s if eps[i]>epsFt else (ft/epsFt)*eps[i]) for i,s in enumerate(sig) if eps[i]<epsFt or s>ft02]+[0]) #sig2=concatenate((sig2,[sig2[-1]])) eps2=concatenate((eps[0:len(sig2)-1],[eps[len(sig2)-2]])) epsFt02=max(eps2) print(epsFt,epsFt02) print(shape(eps2),shape(sig2)) import pylab pylab.grid() pylab.plot(eps,sig,label=r'$\sigma_N(\eps_N)$') pylab.fill(eps2,sig2,color='b',alpha=.3) pylab.annotate(r'$f_t$',xytext=(epsFt,ft),xy=(0,ft),arrowprops=dict(arrowstyle='-',linestyle='dashed',relpos=(0,0),shrinkA=0,shrinkB=0)) pylab.annotate(r'$0.2\,f_t$',xytext=(epsFt02,ft02),xy=(0,ft02),arrowprops=dict(arrowstyle='-',linestyle='dashed',relpos=(0,0),shrinkA=0,shrinkB=0)) pylab.text(epsFt,.5*ft,'$\Large G_f$',fontsize=20) pylab.xlabel(r'$\varepsilon_N$') pylab.ylabel(r'$\sigma_N(\varepsilon_N)$') #pylab.show() pylab.savefig('fracture-energy.pdf')
def likegridandy(chains, params=None, lims=None, ticks=None, default_chain=0, colors=None, filled=True, nbins1d=30, nbins2d=20, labels=None, fig=None, size=2, legend_loc=None, param_name_mapping=None, param_label_size=14): """ Make a grid (aka "triangle plot") of 1- and 2-d likelihood contours. Parameters ---------- chains : one or a list of `Chain` objects default_chain, optional : the chain used to get default parameters names, axes limits, and ticks either an index into chains or a `Chain` object (default: chains[0]) params, optional : list of parameter names which to show (default: all parameters from default_chain) lims, optional : a dictionary mapping parameter names to (min,max) axes limits (default: +/- 4 sigma from default_chain) ticks, optional : a dictionary mapping parameter names to list of [ticks] (default: [-2, 0, +2] sigma from default_chain) fig, optional : figure of figure number in which to plot (default: figure(0)) size, optional : size in inches of one plot (default: 2) colors, optional : colors to cycle through for plotting filled, optional : whether to fill in the contours (default: True) labels, optional : list of names for a legend legend_loc, optional : (x,y) location of the legend (coordinates scaled to [0,1]) nbins1d, optional : number of bins for 1d plots (default: 30) nbins2d, optional : number of bins for 2d plots (default: 20) """ from matplotlib.pyplot import figure, Line2D fig = figure(0) if fig is None else (figure(fig) if isinstance(fig,int) else fig) if type(chains)!=list: chains=[chains] if params==None: params = sorted(reduce(lambda x,y: set(x)&set(y), [c.params() for c in chains])) if param_name_mapping is None: param_name_mapping = {} if size is not None: fig.set_size_inches(*([size*len(params)]*2)) if colors is None: colors=['b','orange','k','m','cyan'] fig.subplots_adjust(hspace=0,wspace=0) c=chains[default_chain] if isinstance(default_chain,int) else default_chain lims = dict({p:(max(min(c[p]),mean(c[p])-4*std(c[p])),min(max(c[p]),mean(c[p])+4*std(c[p]))) for p in params},**(lims if lims is not None else {})) #ANDY ticks = dict({p:[t for t in ts if lims[p][0]<=t<=lims[p][1]] for (p,ts) in zip(params,(c.mean(params)+c.std(params)*transpose([[-2,-1,0,1,2]])).T)},**(ticks if ticks is not None else {})) #original below #ticks = dict({p:[t for t in ts if lims[p][0]<=t<=lims[p][1]] for (p,ts) in zip(params,(c.mean(params)+c.std(params)*transpose([[-2,0,2]])).T)},**(ticks if ticks is not None else {})) n=len(params) for (i,p1) in enumerate(params): for (j,p2) in enumerate(params): if (i<=j): ax=fig.add_subplot(n,n,j*n+i+1) ax.set_xticks(ticks[p1]) ax.set_xlim(*lims[p1]) if (i==j): for (ch,col) in zip(chains,colors): #if p1 in ch: ch.like1dandy(p1,weights=ch["weight"],nbins=nbins1d,maxed=True,color=col,ax=ax) if p1 in ch: H, xe = histogram(ch[p1],bins=nbins1d,weights=ch['weight'],normed=True,range=None) H=H/max(H) xem=movavg(xe,2) ax.plot(xem,H) #ch.like1d(p1,nbins=nbins1d,maxed=True,color=col,ax=ax) ax.set_yticks([]) elif (i<j): for (ch,col) in zip(chains,colors): if p1 in ch and p2 in ch: ch.like2d(p1,p2,filled=filled,nbins=nbins2d,color=col,ax=ax) ax.set_yticks(ticks[p2]) ax.set_ylim(*lims[p2]) if i==0: ax.set_ylabel(param_name_mapping.get(p2,p2),size=param_label_size) #int(2-log10(std(c[p2]))) #'{0:.{s}f}'.format(1.234, s = 2) #ANDY ax.set_yticklabels(['{0:.{s}f}'.format(t, s = int(2-log10(std(c[p2])))) for t in ticks[p2]]) #ax.set_yticklabels(['%.5f'%t for t in ticks[p2]]) else: ax.set_yticklabels([]) if j==n-1: ax.set_xlabel(param_name_mapping.get(p1,p1),size=param_label_size) ax.set_xticklabels(['{0:.{s}f}'.format(t, s = int(2-log10(std(c[p1])))) for t in ticks[p1]]) #ax.set_xticklabels(['%.5f'%t for t in ticks[p1]]) else: ax.set_xticklabels([]) fig.autofmt_xdate(rotation=90) if labels is not None: fig.legend([Line2D([0],[0],c=c) for c in colors],labels,fancybox=True,shadow=True,loc=legend_loc) fig.savefig('likegridandy.pdf', format='pdf',dpi=400)
def like1d(dat, weights=None, nbins=30, ranges=None, maxed=False, ax=None, smooth=False, kde=True, zero_endpoints=False, filled=False, **kw): from matplotlib.pyplot import gca if ax is None: ax = gca() if kde: try: from getdist import MCSamples except ImportError as e: raise Exception( "Plotting with kde, kde1d, or kde2d set to True requires package 'getdist'. Install this package or set to False." ) from e if ranges: i = bitwise_and(dat > (ranges[0] or -Inf), dat < (ranges[1] or Inf)) dat = dat[i] weights = weights[i] d = MCSamples(samples=dat, weights=weights, names=['x'], ranges={ 'x': ranges or (None, None) }, settings={ 'smooth_scale_1D': (smooth or -1) }).get1DDensity(0) d.normalize('max' if maxed else 'integral') xem, H = d.x, d.P * (maxed or 1) else: from matplotlib.mlab import movavg H, xe = histogram(dat, bins=nbins, weights=weights, normed=True, range=ranges) xem = movavg(xe, 2) if smooth: from scipy.interpolate import PchipInterpolator itp = PchipInterpolator(xem, H) xem = linspace(xem.min(), xem.max(), 100) H = itp(xem) if maxed: H = H / max(H) * (maxed or 1) if zero_endpoints: xem = hstack([[xem[0]], xem, [xem[-1]]]) H = hstack([[0], H, [0]]) if filled: ax.fill_between(xem, H, alpha=(0.5 if filled is True else filled), **kw) kw.pop('label') ax.plot(xem, H, **kw)
def cor_movavg(slopes, kreuz, win): masl = mlab.movavg(slopes, win) makr = mlab.movavg(kreuz, win) return np.corrcoef(masl, makr)[1,0]
def getData(self): '''Finds and processes data returning plot''' #print 'opening data...' if self.variable == 'pdsi' or self.variable == 'scpdsi' or self.variable == 'pzi': filename = os.path.join(WWDTNETCDF_DIR, self.variable, '%s_%s_PRISM.nc' % (self.variable, self.month)) else: filename = os.path.join(WWDTNETCDF_DIR, '%s%s' % (self.variable, self.span), '%s%s_%s_PRISM.nc' % (self.variable, self.span, self.month)) # Open netcdf for data and elevation dataFile = netcdf.netcdf_file(filename, 'r') elevationFile = netcdf.netcdf_file(ELEVATION_DATA, 'r') # Get closest Lat/Lon closestLat = self.Index(dataFile.variables['latitude'], self.lat) closestLon = self.Index(dataFile.variables['longitude'], self.lon) # Get closest Lat/Lon for elevation eclosestLat = self.Index(elevationFile.variables['lat'], self.lat) eclosestLon = self.Index(elevationFile.variables['lon'], self.lon) # Set elevation based on coordinates elevationData = elevationFile.variables['elevation'] elevation = elevationData[eclosestLat, eclosestLon] elevationFile.close() # Set Current dates currentYear = datetime.now().year #currentYear = 2011 currentDay = datetime.now().day currentMonth = datetime.now().month # # Start here when selecting len of arrary # years = np.arange(self.startYear, self.endYear+1, 1) data = np.array(dataFile.variables['data'][self.startYear-1895:len(years),closestLat,closestLon]) # Convert Precip to if there are any -9999.00 values to exclude if data selection is for all years if self.variable == 'pon': data = data/100. # Force data values of -9999.0 for nonexistent data if self.month - self.span < 0: noData = (abs(self.month - self.span)/12) noYear = 0 if noData == 0: data[0] = -9999.0 else: while noYear < noData: data[noYear] = -9999.00 noYear+=1 if data.mean() == -9999.00: data = '' # Select earliest possible year/value based on user input v = 0 for value in data: if data[v] == -9999.0: v+=1 else: years = np.arange(self.startYear+v, self.endYear+1, 1) data = np.array(dataFile.variables['data'][(self.startYear-1895)+v:len(years)+v,closestLat,closestLon]) value+=1 # Convert C to F if self.variable == 'mdn': data = ((data* 9.0/5) + 32) # Convert Precip to inches if self.variable == 'pon': data = data/100. # Prints Data #print data[:] # Set normal range 1981-2010 normal_range = np.array(dataFile.variables['data'][86:116,closestLat,closestLon]) normal = normal_range.mean() #print normal # Convert precip to correct format if self.variable == 'pon': normal = normal/100. # Convert temperature from C to F if self.variable == 'mdn': normal = ((normal* 9.0/5) + 32) # Raise error if data is all -9999.0 if normal == -9999.0: normal = '' # No time scale is needed for drought indices if self.variable == 'pdsi' or self.variable == 'scpdsi' or self.variable == 'pzi' or self.variable == 'spi': normal = 0 # Set distance from normal variable inv_data = data - normal # Create a figure for plots fig = plt.figure(figsize=(10,7), facecolor='w') ax = fig.add_axes([0.08, 0.15, .90, 0.78]) # Used to set month name in plots based on month index monthList = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] # Setup plots based on Variable if self.variable == 'pdsi': self.span = 1 ax.set_title(u'Palmer Drought Severity Index, %s-Months Ending in %s \n %4.2f\u00b0N, %4.2f\u00b0W, Elevation: %4.2f Meters' % (self.span, monthList[self.month-1], self.lat, abs(self.lon), elevation)) ax.set_ylabel("PDSI") topColor, bottomColor = 'green', 'gold' if self.variable == 'scpdsi': self.span = 1 ax.set_title(u' Self Calibrated Palmer Drought Severity Index, %s-Months Ending in %s \n %4.2f\u00b0N, %4.2f\u00b0W, Elevation: %4.2f Meters' % (self.span, monthList[self.month-1], self.lat, abs(self.lon), elevation)) ax.set_ylabel("SCPDSI") topColor, bottomColor = 'green', 'gold' if self.variable == 'pzi': self.span = 1 ax.set_title(u' Palmer Z-Index, %s-Months Ending in %s \n %4.2f\u00b0N, %4.2f\u00b0W, Elevation: %4.2f Meters' % (self.span, monthList[self.month-1], self.lat, abs(self.lon), elevation)) ax.set_ylabel("PZI") topColor, bottomColor = 'green', 'gold' if self.variable == 'mdn': if self.span == 1: ax.set_title(u'Mean Temperature, %s \n %4.2f\u00b0N, %4.2f\u00b0W, Elevation: %4.2f Meters' % (monthList[self.month-1], self.lat, abs(self.lon), elevation)) else: ax.set_title(u'Mean Temperature, %s-Months Ending in %s \n %4.2f\u00b0N, %4.2f\u00b0W, Elevation: %4.2f Meters' % (self.span, monthList[self.month-1], self.lat, abs(self.lon), elevation)) ax.set_ylabel(u"Temperature \u00b0F") topColor, bottomColor = 'red', 'blue' if self.variable == 'spi': if self.span == 1: ax.set_title(u'Standardized Precipitation Index, %s \n %4.2f\u00b0N, %4.2f\u00b0W, Elevation: %4.2f Meters' % (monthList[self.month-1], self.lat, abs(self.lon), elevation)) else: ax.set_title(u'Standardized Precipitation Index, %s-Months Ending in %s \n %4.2f\u00b0N, %4.2f\u00b0W, Elevation: %4.2f Meters' % (self.span, monthList[self.month-1], self.lat, abs(self.lon), elevation)) ax.set_ylabel(u"SPI") topColor, bottomColor = 'blue', 'red' if self.variable == 'pon': if self.span == 1: ax.set_title(u'Precipitation, %s \n %4.2f\u00b0N, %4.2f\u00b0W, Elevation: %4.2f Meters' % (monthList[self.month-1], self.lat, abs(self.lon), elevation)) else: ax.set_title(u'Precipitation, %s-Months Ending in %s \n %4.2f\u00b0N, %4.2f\u00b0W, Elevation: %4.2f Meters' % (self.span, monthList[self.month-1], self.lat, abs(self.lon), elevation)) #ax.set_title(u'Precipitation at %4.2f\u00b0N, %4.2f\u00b0W, Elevation:(%4.2f Meters) - %s-Months Ending in %s' % (self.lat, self.lon, elevation, self.span, monthList[self.month-1]), fontsize=9) ax.set_ylabel("Inches") ax.set_ybound(max(data)) ax.axhline(y=normal, color="black", label='Normal Period: 1981-2010') if max(data) <= 10: ax.yaxis.set_major_locator(MultipleLocator(1)) ax.yaxis.set_minor_locator(MultipleLocator(.1)) else: ax.yaxis.set_major_locator(MultipleLocator(10)) ax.yaxis.set_minor_locator(MultipleLocator(1)) ax.xaxis.set_major_locator(MultipleLocator(10)) ax.xaxis.set_minor_locator(MultipleLocator(1)) ax.bar(years, data, color='green', align="center") if self.runavg==False: pass elif (self.runavg%2==0): ma = movavg(data, self.runavg) ma.shape ax.plot(years[(self.runavg/2-1):-(self.runavg/2)], ma, color='#FF9900', linewidth=2, label="%d Year Average"% self.runavg) else: ma = movavg(data, self.runavg) ma.shape ax.plot(years[self.runavg/2:-(self.runavg/2)], ma, color='#FF9900', linewidth=2, label="%d Year Average"% self.runavg) # Set up plot for non Percent of Normal Plots if not self.variable == 'pon': # Determines if normal period should be added to plot - drought indices are exempt. if self.variable == 'pdsi' or self.variable == 'scpdsi' or self.variable == 'pzi' or self.variable == 'spi': ax.axhline(y=normal, color="black") else: ax.axhline(y=normal, color="black", label='Normal Period: 1981-2010') ax.yaxis.set_major_locator(MultipleLocator(1)) ax.yaxis.set_minor_locator(MultipleLocator(.1)) ax.xaxis.set_major_locator(MultipleLocator(10)) ax.xaxis.set_minor_locator(MultipleLocator(1)) # Plot stacked graphs ax.bar(years[data < normal], inv_data[data < normal], color=bottomColor, align="center", bottom=normal) ax.bar(years[data >= normal], inv_data[data >= normal], color=topColor, align="center", bottom=normal) # Set running average if not self.variable == 'mdn': if self.runavg==False: pass elif (self.runavg%2==0): ma = movavg(inv_data, self.runavg) ma.shape ax.plot(years[(self.runavg/2-1):-(self.runavg/2)], ma, color='black', linewidth=2, label="%d Year Average"% self.runavg) else: ma = movavg(inv_data, self.runavg) ma.shape ax.plot(years[self.runavg/2:-(self.runavg/2)], ma, color='black', linewidth=2, label="%d Year Average"% self.runavg) # Add normal to place running average correctly for temperature data else: if self.runavg==False: pass elif (self.runavg%2==0): ma = movavg(inv_data+normal, self.runavg) ma.shape ax.plot(years[(self.runavg/2-1):-(self.runavg/2)], ma, color='black', linewidth=2, label="%d Year Average"% self.runavg) else: ma = movavg(inv_data+normal, self.runavg) ma.shape ax.plot(years[self.runavg/2:-(self.runavg/2)], ma, color='black', linewidth=2, label="%d Year Average"% self.runavg) # Determine y-axis if abs(max(inv_data[:])) >= abs(max(data)): y_limit = max(abs(inv_data[:])) else: y_limit = max(abs(data[:])) # Complete setting of y-axis if not self.variable == 'mdn': y_min, y_max = (-(y_limit), y_limit) else: y_min, y_max = (normal- max(abs(inv_data)), normal+ max(abs(inv_data))) # Uncrowd y-axis is span is more than 13 - set for all variable - indent 4 spaces to make apply only to mdn if y_max-y_min >= 13: ax.yaxis.set_major_locator(MultipleLocator(2)) ax.yaxis.set_minor_locator(MultipleLocator(.5)) ax.set_ybound(y_min, y_max) # Rescale x-axis if less than ten year span if self.endYear-self.startYear <=10: ax.xaxis.set_major_formatter(ScalarFormatter(useOffset=False)) ax.xaxis.set_major_locator(MultipleLocator(1)) else: ax.xaxis.grid() # Set font properties for the legend(s) fontProperties = FontProperties() fontProperties.set_size('small') ax.legend(loc=(0, -0.150), fancybox=True, prop=fontProperties) # Set branding if currentDay < 10: currentDay = "0"+str(currentDay) ax.set_xlabel("Data Source: WRCC/UI, Created: %s-%s-%s" % (currentMonth,currentDay,currentYear)) ax.xaxis.set_label_coords(0.78, -.122, transform=None) # Set scale for x-axis ax.set_autoscale_on(False) ax.set_xbound((years[0]-1, years[-1]+1)) # Add figure to canvas canvas = FigureCanvas(plt.figure(1)) #canvas.close() dataFile.close() return canvas
mpl.use("agg") import pylab as pl from matplotlib import mlab mn = s.mean(1) vr = s.var(1) firstI = 1000 firstY = vr[0 : firstI * 2].mean() firstX = x.lenSamp[firstI] # allX = n.arange(firstX,max(x.lenSamp),firstX) allX = x.lenSamp allY = allX.astype(float) / firstX * firstY # allY = (n.arange(len(allX))+1)*firstY pl.axes() pl.plot(x.lenSamp, mn, label="Observed") pl.title("Score Mean(Sample Length)") pl.legend() pl.savefig("samp_len_mean.png", format="png") pl.axes().clear() # pl.plot(x.lenSamp,s) # pl.savefig("samp_len.png", format='png') pl.plot(x.lenSamp, vr, "g.", label="Observed") wind = 400 mavgVr = mlab.movavg(vr, wind + 1) pl.plot(x.lenSamp[wind / 2 : -wind / 2], mavgVr, "b-", label="Movavg(Observed,400)", linewidth=3) pl.plot(allX, allY, "r-", linewidth=2, label="Expected as sum of i.i.d.") pl.title("Score Variance(Sample Length)") pl.legend() pl.savefig("samp_len_var.png", format="png")
def updatePlots(self): # Save the empty backgrounds for quicker refreshes # I had to do it here instead of in __init__ if self.BACKGROUND_UPDATE_REQUIRED: self.initializePlots() self.backgrounds = list() for i in range(NPLOTS): self.backgrounds.append( self.canvases[i].copy_from_bbox(self.axes[i].bbox)) self.BACKGROUND_UPDATE_REQUIRED = 0 # Restore the empty backgrounds for i in range(NPLOTS): self.canvases[i].restore_region(self.backgrounds[i]) for i in range(NPLOTS): # the different rows of plots have different align events if (i == 0) | (i == 1) | (i == 2): t_align = self.t_targetson elif (i == 3) | (i == 4) | (i == 5): t_align = self.t_mixture elif (i == 6) | (i == 7) | (i == 8): t_align = self.t_response timestamps = list() if i % 3 == 1: timestamps = [ self.timestamps[j] for j in range(len(self.side)) if self.side[j] == 1.] t_align = t_align[self.side == 1.] elif i % 3 == 0: timestamps = [ self.timestamps[j] for j in range(len(self.side)) if self.side[j] == -1.] t_align = t_align[self.side == -1.] if i % 3 < 2: # means it's a raster plot pixels_per_yunit = ( self.axes[i].transData.transform_point((1, 1)) - self.axes[i].transData.transform_point((0, 0)))[1] # How many total spikes are we dealing with? # Get the most recent n_trials n_trials = np.min([self.nTrialsToPlot.get(), len(timestamps)]) timestamps = timestamps[-1 * n_trials:] t_align = t_align[-1 * n_trials:] n_total_spikes = np.sum([len(ts) for ts in timestamps]) # Pre-allocate... we're going to make one long array of # all spikes and then we will vertically offset the # individual trials xdataRasters = np.zeros(n_total_spikes) ydataRasters = np.zeros(n_total_spikes) xdataTrialmarkers = np.zeros(n_trials) ydataTrialmarkers = np.zeros(n_trials) offset = 0 for j in range(n_trials): xdataRasters[offset:(offset + len(timestamps[j]))] = \ timestamps[j] - t_align[j] ydataRasters[offset:(offset + len(timestamps[j]))] = \ np.repeat( self.axes[i].get_ylim()[1] - .025 - j * np.min([1 / n_trials, .1]), len(timestamps[j])) xdataTrialmarkers[j] = self.axes[i].get_xlim()[0] + 10 ydataTrialmarkers[j] = self.axes[i].get_ylim()[1] - .025 - j * np.min([1/n_trials,.1]) offset += len(timestamps[j]) xmin = eval('self.xmin' + repr(int(i / 3)) + '.get()') xmax = eval('self.xmax' + repr(int(i / 3)) + '.get()') psthBinwidth = self.psthBinwidth.get() bincounts, binedges = np.histogram( xdataRasters, np.arange(xmin, xmax, 1)) if n_trials > 0: psth = movavg(bincounts / n_trials * 1000, psthBinwidth) else: psth = movavg(bincounts * 1000, psthBinwidth) bincenters = movavg(binedges, psthBinwidth)[0:-1] # the following piece of code leaves behind the psths # to be pick up by plots 3, 6, and 9 if i % 3 == 1: self.psth_right = psth self.bincenters_right = bincenters elif i % 3 == 0: self.psth_left = psth self.bincenters_left = bincenters # update the actual plots self.raster_lines[i].set_data(xdataRasters, ydataRasters) self.raster_lines[i].set_linestyle('None') self.raster_lines[i].set_markersize( np.min([pixels_per_yunit / n_trials, 10])) self.axes[i].draw_artist(self.raster_lines[i]) self.trial_lines[i].set_data( xdataTrialmarkers, ydataTrialmarkers) self.trial_lines[i].set_linestyle('None') self.raster_lines[i].set_markersize( np.min([pixels_per_yunit / n_trials, 5])) self.axes[i].draw_artist(self.trial_lines[i]) # blit that shit, whatever it does self.canvases[i].blit(self.axes[i].bbox) else: # means we're doing a histogram plot # here's where we pick up those self.psth and self.bincenters # that we left behind self.psth_left_lines[i].set_data( self.bincenters_left, self.psth_left) self.axes[i].draw_artist(self.psth_left_lines[i]) self.psth_right_lines[i].set_data( self.bincenters_right, self.psth_right) self.axes[i].draw_artist(self.psth_right_lines[i]) maxFR = np.max([10, np.max(np.concatenate( [self.psth_left, self.psth_right])) * 1.25]) # self.axes[i].xaxis.set_major_formatter(NullFormatter()) # self.axes[i].yaxis.set_major_formatter(NullFormatter()) # blit that shit, whatever it does self.canvases[i].blit(self.axes[i].bbox)
def gotQuotes(quotes): ((_, quotesX), (_, quotesY)) = quotes #f = mathutil.IIR.lowPass(1/40) #quotesX.values = f.filter(f.filter(f.filter(f.filter(quotesX.values)))) #quotesY.values = f.filter(f.filter(f.filter(f.filter(quotesY.values)))) window = quote.ComparisonWindow(quotesX, quotesY) returnsX = isixtyeight.IReturns(window.xQuotes) returnsY = isixtyeight.IReturns(window.yQuotes) minReturn = min([returnsX.minReturn(), returnsY.minReturn()]) maxReturn = max([returnsX.maxReturn(), returnsY.maxReturn()]) print 'mean day logrithmic returns' print '%s: %g' % (returnsX.symbol, returnsX.meanReturn()) print '%s: %g' % (returnsY.symbol, returnsY.meanReturn()) fig = pyplot.figure(1, figsize=(5.5,5.5)) x = returnsX.returns y = returnsY.returns axScatter = fig.add_subplot(1, 2, 2) axScatter.set_title('daily return correlation') axScatter.scatter(x, y, s=1) axScatter.set_xlabel(window.xQuotes.symbol) axScatter.set_ylabel(window.yQuotes.symbol) m, b, r, p, e = linregress(x, y) print "y = %g * x + %g" % (m, b) print "r^2 = %g" % (r**2,) print "p = %g" % (p,) print "standard error = %g" % (e,) axScatter.plot([minReturn, maxReturn], [minReturn, maxReturn], 'g') axScatter.plot([min(x), max(x)], [m*min(x) + b, m*max(x) + b], 'r') axScatter.set_aspect(1.) axValue = fig.add_subplot(4, 2, 1) axValue.set_title('relative value') axValue.set_yscale('log', basey=2) axValue.plot(window.xQuotes.dates, [v / window.xQuotes.values[0] for v in window.xQuotes.values], label=window.xQuotes.symbol) axValue.plot(window.yQuotes.dates, [v / window.yQuotes.values[0] for v in window.yQuotes.values], label=window.yQuotes.symbol) axValue.legend(loc=0, ncol=2) returnDiffs = [y-x for (x,y) in izip(returnsX.returns, returnsY.returns)] axExcess = fig.add_subplot(2, 2, 3) axExcess.set_title('rolling excess return (%s over %s)' % (window.yQuotes.symbol, window.xQuotes.symbol)) axExcess.set_ylabel('excess force of interest (%)') axExcess.set_xlabel('period ending') oneYear = [i*25000 for i in mlab.movavg(returnDiffs, 250)] axExcess.plot(returnsX.dates[250-1:], oneYear, label="250 day") #threeYear = [i*25000 for i in mlab.movavg(returnDiffs, 250*3)] #axExcess.plot(returnsX.dates[250*3-1:], threeYear, label="3 yr", color="green") #fiveYear = [i*25000 for i in mlab.movavg(returnDiffs, 250*5)] #axExcess.plot(returnsX.dates[250*5-1:], fiveYear, label="5 yr", color="red") axExcess.axhline(color="black") axExcess.axhline(y=sum(oneYear)/len(oneYear), color="blue") #axExcess.axhline(y=sum(threeYear)/len(threeYear), color="green") #axExcess.axhline(y=sum(fiveYear)/len(fiveYear), color="red") axExcess.set_xlim((window.dates[0], window.dates[-1])) axExcess.legend(loc='upper left', ncol=4) axAccum = fig.add_subplot(4, 2, 3) axAccum.set_title('cumulative excess return (%s over %s)' % (window.yQuotes.symbol, window.xQuotes.symbol)) acc = 0 accList = [] for x, y in izip(returnsX.returns, returnsY.returns): acc += y-x accList.append(acc) axAccum.plot(returnsX.dates, accList) axAccum.set_xlim((window.dates[0], window.dates[-1])) x = [d.toordinal() for d in returnsX.dates] m, b, r, p, e = linregress(x, accList) #print "y = %g * x + %g" % (m, b) #print "r^2 = %g" % (r**2,) #print "p = %g" % (p,) #print "standard error = %g" % (e,) axAccum.plot([min(returnsX.dates), max(returnsX.dates)], [m*min(x) + b, m*max(x) + b], 'r') pyplot.draw() pyplot.show()
def getSpksSortedAligned( g, spk_chan, unit_num, sort_by, t_align, pre_align, post_align, boxcar_width=50, gauss_sigma=25, trial_order='rt', ret_spk_bins=True, limit=None): depth = lambda L: isinstance(L, list) and max(map(depth, L)) + 1 if depth(sort_by) != 1: raise RuntimeError("Depth of sort_by list has to be 1") # Check length of sort_by, function only supports 2 or less sort variables if len(sort_by) > 2: raise RuntimeError("Number of sort variables can only be 1 or 2") # select a subset of trials based on a user-provided boolean selector if limit is not None: g = selectDataStruct(g, limit) g[t_align] = np.int64(np.round(np.float64(g[t_align]))) n_trials = len(g['spk_times']) if len(sort_by) == 1: insert_dummy = True label1_name = 'dummy variable' label2_name = sort_by[0] sort_by_list = [g[sort_by[0]]] elif len(sort_by) == 2: insert_dummy = False label1_name = sort_by[0] label2_name = sort_by[1] sort_by_list = [g[sort_by[0]], g[sort_by[1]]] if trial_order == 'rt': g['t_trial_order'] = g['t_response'] - g['t_dotson'] elif trial_order == 'duration': g['t_trial_order'] = g['t_response'] - g['t_targetson'] elif trial_order == 'chronological': g['t_trial_order'] = g['t_response'] trialorder_sorted = sortByLists(g['t_trial_order'], sort_by_list) trial_duration = post_align - pre_align # psth will be computed aligned on relevant event spk_times_aligned = getSpkTimesAligned( g, spk_chan, unit_num, t_align, pre_align, post_align ) spk_times_aligned_sorted = sortByLists( np.array(spk_times_aligned), sort_by_list ) labels = spk_times_aligned_sorted['labels'] if insert_dummy: labels = np.c_[np.repeat(1, labels.shape[0]), labels] u_labels = np.unique(labels[:, 0]) n_u_labels = len(u_labels) all_sorted_trial_counts = list() all_sorted_timestamps = list() all_sorted_spk_times = list() all_sorted_spk_counts = list() all_sorted_spk_bins = list() all_sorted_times = list() all_sorted_frs = list() all_sorted_psths = list() all_sorted_psth_times = list() all_sorted_psths_gauss = list() all_sorted_psth_times_gauss = list() all_sorted_psths_alpha = list() all_sorted_psth_times_alpha = list() for i in range(n_u_labels): u_sublabels = labels[:, 1][labels[:, 0] == u_labels[i]] for j in range(len(u_sublabels)): n_trials = np.array(trialorder_sorted['sorted'])[ np.logical_and(labels[:, 0] == u_labels[i], labels[:, 1] == u_sublabels[j])][0].shape[0] if n_trials == 0: all_sorted_trial_counts.append(0) all_sorted_timestamps.append([]) all_sorted_spk_times.append([]) all_sorted_spk_counts.append([]) all_sorted_spk_bins.append([]) all_sorted_times.append([]) all_sorted_psth_times.append([]) all_sorted_psth_times_gauss.append([]) all_sorted_psth_times_alpha.append([]) all_sorted_frs.append([]) all_sorted_psths.append([]) all_sorted_psths_gauss.append([]) all_sorted_psths_alpha.append([]) continue cur_sorted_trialorder = np.array(trialorder_sorted['sorted'])[ np.logical_and(labels[:, 0] == u_labels[i], labels[:, 1] == u_sublabels[j])] cur_sorted_spk_times = np.array( spk_times_aligned_sorted['sorted'])[ np.logical_and(labels[:, 0] == u_labels[i], labels[:, 1] == u_sublabels[j])] # Sort by the desired trial order cur_sorted_spk_times = cur_sorted_spk_times[0][ np.argsort(cur_sorted_trialorder[0])] cur_sorted_spk_counts = [len(k) for k in cur_sorted_spk_times] cur_sorted_frs = [k / (post_align - pre_align) * 1000 for k in cur_sorted_spk_counts] # We now create a spk_bin array where the rows are trials, # each column is a ms, and 0's and 1's indicate the absence # and presence of spikes. cur_sorted_spk_bin = np.empty([len(cur_sorted_spk_times), trial_duration]) cur_sorted_spk_bin[:] = np.nan times = np.int64(np.arange(0, trial_duration, 1) + pre_align) for k in range(len(cur_sorted_spk_times)): cur_sorted_spk_bin[k, 0:trial_duration] = 0 cur_sorted_spk_bin[ k, np.int64(cur_sorted_spk_times[k] - pre_align)] = 1 # Regular boxcar smoothing smoothed_psth = movavg(sp.stats.nanmean( cur_sorted_spk_bin, axis=0) / .001, boxcar_width) psth_times = movavg(times, boxcar_width) + .5 # Compute psth smoothed with a gaussian with gauss_sigma x = np.arange(0, gauss_sigma * 6 + 1) gaussian_exponent = -0.5 * np.power( (x - np.mean(x)) / gauss_sigma, 2) gaussian_filter = np.exp(gaussian_exponent) / np.sum( np.exp(gaussian_exponent)) smoothed_psth_gauss = np.correlate( sp.stats.nanmean(cur_sorted_spk_bin, axis=0) / .001, gaussian_filter, mode='valid') psth_times_gauss = times[ int(gauss_sigma * 6 / 2):int(-gauss_sigma * 6 / 2)] # Compute psth smoothed with alpha-like function t = np.arange(250) h = (1 - np.exp(-t)) * np.exp(-t / 25.) h = h / np.sum(h) smoothed_psth_alpha = np.convolve( sp.stats.nanmean(cur_sorted_spk_bin, axis=0) / .001, h, mode='full') smoothed_psth_alpha = smoothed_psth_alpha[ 0:cur_sorted_spk_bin.shape[1]][25:] psth_times_alpha = times[0:cur_sorted_spk_bin.shape[1]][25:] all_sorted_trial_counts.append(len(cur_sorted_spk_times)) all_sorted_spk_times.append(cur_sorted_spk_times) all_sorted_spk_counts.append(cur_sorted_spk_counts) all_sorted_spk_bins.append(cur_sorted_spk_bin) all_sorted_times.append(times) all_sorted_frs.append(cur_sorted_frs) all_sorted_psths.append(smoothed_psth) all_sorted_psth_times.append(psth_times) all_sorted_psths_gauss.append(smoothed_psth_gauss) all_sorted_psth_times_gauss.append(psth_times_gauss) all_sorted_psths_alpha.append(smoothed_psth_alpha) all_sorted_psth_times_alpha.append(psth_times_alpha) out = {} out['label1_name'] = label1_name out['label2_name'] = label2_name out['label1'] = labels[:, 0] out['label2'] = labels[:, 1] out['trial_counts'] = all_sorted_trial_counts out['spk_times'] = all_sorted_spk_times out['spk_counts'] = all_sorted_spk_counts out['frs'] = all_sorted_frs out['psths'] = all_sorted_psths out['psth_times'] = all_sorted_psth_times out['gauss_psths'] = all_sorted_psths_gauss out['gauss_psth_times'] = all_sorted_psth_times_gauss out['alpha_psths'] = all_sorted_psths_alpha out['alpha_psth_times'] = all_sorted_psth_times_alpha if ret_spk_bins: out['spk_bins'] = all_sorted_spk_bins out['times'] = all_sorted_times return out
import matplotlib matplotlib.rc('text',usetex=True) matplotlib.rc('text.latex',preamble=r'\usepackage{euler}') from numpy import * from matplotlib.mlab import movavg sig,eps=array([]),array([]) dta=genfromtxt('tension-test.data') eps,sig=dta[:,0],dta[:,2] eps,sig=concatenate(([0],eps[2:-2])),concatenate(([0],movavg(sig,5))) import pylab pylab.grid() pylab.plot(eps,sig,label=r'reference') pylab.plot(eps,.5*sig,label=r'vertically scaled') pylab.plot(.5*eps,.5*sig,label=r'radially scaled') pylab.legend() pylab.xlabel(r'$\varepsilon_N$') pylab.ylabel(r'$\sigma_N$') #pylab.show() pylab.savefig('cpm-scaling.pdf')
def getData(self): '''Finds and processes data returning plot''' #print 'opening data...' if self.variable == 'pdsi' or self.variable == 'scpdsi' or self.variable == 'pzi': filename = os.path.join( WWDTNETCDF_DIR, self.variable, '%s_%s_PRISM.nc' % (self.variable, self.month)) else: filename = os.path.join( WWDTNETCDF_DIR, '%s%s' % (self.variable, self.span), '%s%s_%s_PRISM.nc' % (self.variable, self.span, self.month)) # Open netcdf for data and elevation dataFile = netcdf.netcdf_file(filename, 'r') elevationFile = netcdf.netcdf_file(ELEVATION_DATA, 'r') # Get closest Lat/Lon closestLat = self.Index(dataFile.variables['latitude'], self.lat) closestLon = self.Index(dataFile.variables['longitude'], self.lon) # Get closest Lat/Lon for elevation eclosestLat = self.Index(elevationFile.variables['lat'], self.lat) eclosestLon = self.Index(elevationFile.variables['lon'], self.lon) # Set elevation based on coordinates elevationData = elevationFile.variables['elevation'] elevation = elevationData[eclosestLat, eclosestLon] elevationFile.close() # Set Current dates currentYear = datetime.now().year #currentYear = 2011 currentDay = datetime.now().day currentMonth = datetime.now().month # Open data years = np.arange(self.startYear, self.endYear + 1, 1) data = np.array(dataFile.variables['data'][self.startYear - 1895:(self.endYear - 1894), closestLat, closestLon]) # Force - 9999 to nan for i in range(0, data.size): #print data[i] if data[i] == -9999.0: #print i data[i] = np.nan # Convert Precip to if there are any -9999.00 values to exclude if data selection is for all years if self.variable == 'pon': data = data / 25.4 # Force data values of -9999.0 for nonexistent data if self.month - self.span < 0: noData = (abs(self.month - self.span) / 12) noYear = 0 if noData == 0: data[0] = -9999.0 else: while noYear < noData: data[noYear] = -9999.00 noYear += 1 #if data.mean() == -9999.00: # data = '' # Select earliest possible year/value based on user input v = 0 for value in data: if data[v] == -9999.0: v += 1 else: years = np.arange(self.startYear + v, self.endYear + 1, 1) data = np.array( dataFile.variables['data'][(self.startYear - 1895) + v:(self.endYear - 1894), closestLat, closestLon]) value += 1 # Force - 9999 to nan for i in range(0, data.size): #print data[i] if data[i] == -9999.0: #print i data[i] = np.nan # Convert C to F if self.variable == 'mdn': data = ((data * 9.0 / 5) + 32) # Convert Precip to inches if self.variable == 'pon': data = data / 25.4 # Set normal range 1981-2010 normal_range = np.array(dataFile.variables['data'][86:116, closestLat, closestLon]) normal = normal_range.mean() #print normal # Convert precip to correct format if self.variable == 'pon': normal = normal / 25.4 # Convert temperature from C to F if self.variable == 'mdn': normal = ((normal * 9.0 / 5) + 32) # Raise error if data is all -9999.0 if normal == -9999.0: normal = np.nan # No time scale is needed for drought indices if self.variable == 'pdsi' or self.variable == 'scpdsi' or self.variable == 'pzi' or self.variable == 'spi' or self.variable == 'spei': normal = 0 # Set distance from normal variable inv_data = data - normal # Create a figure for plots fig = Figure(figsize=(11, 8), facecolor='w') ax = fig.add_axes([0.08, 0.15, .90, 0.78]) # Used to set month name in plots based on month index monthList = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December' ] # Setup plots based on Variable if self.variable == 'pdsi': self.span = 1 ax.set_title( u'Palmer Drought Severity Index, %s \n %4.2f\u00b0N, %4.2f\u00b0W, Elevation: %4.2f Meters' % (monthList[self.month - 1], self.lat, abs( self.lon), elevation)) ax.set_ylabel("PDSI") topColor, bottomColor = 'green', 'gold' if self.variable == 'scpdsi': self.span = 1 ax.set_title( u' Self Calibrated Palmer Drought Severity Index, %s \n %4.2f\u00b0N, %4.2f\u00b0W, Elevation: %4.2f Meters' % (monthList[self.month - 1], self.lat, abs( self.lon), elevation)) ax.set_ylabel("SCPDSI") topColor, bottomColor = 'green', 'gold' if self.variable == 'pzi': self.span = 1 ax.set_title( u' Palmer Z-Index, %s-Months Ending in %s \n %4.2f\u00b0N, %4.2f\u00b0W, Elevation: %4.2f Meters' % (self.span, monthList[self.month - 1], self.lat, abs( self.lon), elevation)) ax.set_ylabel("PZI") topColor, bottomColor = 'green', 'gold' if self.variable == 'mdn': if self.span == 1: ax.set_title( u'Mean Temperature, %s \n %4.2f\u00b0N, %4.2f\u00b0W, Elevation: %4.2f Meters' % (monthList[self.month - 1], self.lat, abs( self.lon), elevation)) else: ax.set_title( u'Mean Temperature, %s-Months Ending in %s \n %4.2f\u00b0N, %4.2f\u00b0W, Elevation: %4.2f Meters' % (self.span, monthList[self.month - 1], self.lat, abs(self.lon), elevation)) ax.set_ylabel(u"Temperature \u00b0F") topColor, bottomColor = 'red', 'blue' if self.variable == 'spi': if self.span == 1: ax.set_title( u'Standardized Precipitation Index, %s \n %4.2f\u00b0N, %4.2f\u00b0W, Elevation: %4.2f Meters' % (monthList[self.month - 1], self.lat, abs( self.lon), elevation)) else: ax.set_title( u'Standardized Precipitation Index, %s-Months Ending in %s \n %4.2f\u00b0N, %4.2f\u00b0W, Elevation: %4.2f Meters' % (self.span, monthList[self.month - 1], self.lat, abs(self.lon), elevation)) ax.set_ylabel(u"SPI") topColor, bottomColor = 'blue', 'red' if self.variable == 'spei': if self.span == 1: ax.set_title( u'Standardized Precipitation-Evapotranspiration Index, %s \n %4.2f\u00b0N, %4.2f\u00b0W, Elevation: %4.2f Meters' % (monthList[self.month - 1], self.lat, abs( self.lon), elevation), fontsize=15) else: ax.set_title( u'Standardized Precipitation-Evapotranspiration Index, %s-Months Ending in %s \n %4.2f\u00b0N, %4.2f\u00b0W, Elevation: %4.2f Meters' % (self.span, monthList[self.month - 1], self.lat, abs(self.lon), elevation), fontsize=15) ax.set_ylabel(u"SPEI") topColor, bottomColor = 'blue', 'red' if self.variable == 'pon': if self.span == 1: ax.set_title( u'Precipitation, %s \n %4.2f\u00b0N, %4.2f\u00b0W, Elevation: %4.2f Meters' % (monthList[self.month - 1], self.lat, abs( self.lon), elevation)) else: ax.set_title( u'Precipitation, %s-Months Ending in %s \n %4.2f\u00b0N, %4.2f\u00b0W, Elevation: %4.2f Meters' % (self.span, monthList[self.month - 1], self.lat, abs(self.lon), elevation)) ax.set_ylabel("Inches") ax.set_ybound(max(data)) ax.axhline(y=normal, color="black", label='Normal Period: 1981-2010') if max(data) <= 10: ax.yaxis.set_major_locator(MultipleLocator(1)) ax.yaxis.set_minor_locator(MultipleLocator(.1)) else: ax.yaxis.set_major_locator(MultipleLocator(10)) ax.yaxis.set_minor_locator(MultipleLocator(1)) ax.xaxis.set_major_locator(MultipleLocator(10)) ax.xaxis.set_minor_locator(MultipleLocator(1)) ax.bar(years, data, color='green', align="center") if self.runavg == False: pass elif (self.runavg % 2 == 0): ma = movavg(data, self.runavg) ma.shape ax.plot(years[(self.runavg / 2 - 1):-(self.runavg / 2)], ma, color='#FF9900', linewidth=2, label="%d Year Average" % self.runavg) else: ma = movavg(data, self.runavg) ma.shape ax.plot(years[self.runavg / 2:-(self.runavg / 2)], ma, color='#FF9900', linewidth=2, label="%d Year Average" % self.runavg) # Set up plot for non Percent of Normal Plots if not self.variable == 'pon': # Determines if normal period should be added to plot - drought indices are exempt. if self.variable == 'pdsi' or self.variable == 'scpdsi' or self.variable == 'pzi' or self.variable == 'spi': ax.axhline(y=normal, color="black") else: ax.axhline(y=normal, color="black", label='Normal Period: 1981-2010') ax.yaxis.set_major_locator(MultipleLocator(1)) ax.yaxis.set_minor_locator(MultipleLocator(.1)) ax.xaxis.set_major_locator(MultipleLocator(10)) ax.xaxis.set_minor_locator(MultipleLocator(1)) # Plot stacked graphs ax.bar(years[data < normal], inv_data[data < normal], color=bottomColor, align="center", bottom=normal) ax.bar(years[data >= normal], inv_data[data >= normal], color=topColor, align="center", bottom=normal) # Set running average if not self.variable == 'mdn': if self.runavg == False: pass elif (self.runavg % 2 == 0): ma = movavg(inv_data, self.runavg) ma.shape ax.plot(years[(self.runavg / 2 - 1):-(self.runavg / 2)], ma, color='black', linewidth=2, label="%d Year Average" % self.runavg) else: ma = movavg(inv_data, self.runavg) ma.shape ax.plot(years[self.runavg / 2:-(self.runavg / 2)], ma, color='black', linewidth=2, label="%d Year Average" % self.runavg) # Add normal to place running average correctly for temperature data else: if self.runavg == False: pass elif (self.runavg % 2 == 0): ma = movavg(inv_data + normal, self.runavg) ma.shape ax.plot(years[(self.runavg / 2 - 1):-(self.runavg / 2)], ma, color='black', linewidth=2, label="%d Year Average" % self.runavg) else: ma = movavg(inv_data + normal, self.runavg) ma.shape ax.plot(years[self.runavg / 2:-(self.runavg / 2)], ma, color='black', linewidth=2, label="%d Year Average" % self.runavg) # Determine y-axis if abs(max(inv_data[:])) >= abs(max(data)): y_limit = max(abs(inv_data[:])) else: y_limit = max(abs(data[:])) # Complete setting of y-axis if not self.variable == 'mdn': y_min, y_max = (-(y_limit), y_limit) else: y_min, y_max = (normal - max(abs(inv_data)), normal + max(abs(inv_data))) # Uncrowd y-axis is span is more than 13 - set for all variable - indent 4 spaces to make apply only to mdn if y_max - y_min >= 13: ax.yaxis.set_major_locator(MultipleLocator(2)) ax.yaxis.set_minor_locator(MultipleLocator(.5)) ax.set_ybound(y_min, y_max) # Rescale x-axis if less than ten year span if self.endYear - self.startYear <= 10: ax.xaxis.set_major_formatter(ScalarFormatter(useOffset=False)) ax.xaxis.set_major_locator(MultipleLocator(1)) else: ax.xaxis.grid() # Set font properties for the legend(s) fontProperties = FontProperties() fontProperties.set_size('small') ax.legend(loc=(0, -0.150), fancybox=True, prop=fontProperties) # Set branding if currentDay < 10: currentDay = "0" + str(currentDay) ax.set_xlabel("Data Source: WRCC/UI, Created: %s-%s-%s" % (currentMonth, currentDay, currentYear)) ax.xaxis.set_label_coords(0.78, -.122, transform=None) # Set scale for x-axis ax.set_autoscale_on(False) ax.set_xbound((years[0] - 1, years[-1] + 1)) # Add figure to canvas #canvas = FigureCanvas(plt.figure(1)) #canvas.close() #dataFile.close() return fig