def stratBallot(cls, voter): frontUtils = [voter[frontId], voter[targId]] #utils of frontrunners stratGap = frontUtils[1] - frontUtils[0] if stratGap is 0: strat = extraStrat = [(4 if (util >= frontUtils[0]) else 0) for util in voter] isStrat = True else: if stratGap < 0: #winner is preferred; be complacent. isStrat = False else: #runner-up is preferred; be strategic in iss run isStrat = True #sort cuts high to low frontUtils = (frontUtils[1], frontUtils[0]) top = max(voter) #print("lll312") #print(self.baseCuts, front) cutoffs = [( (min(frontUtils[0], self.baseCuts[i])) if (i < floor(targResult)) else ( (frontUtils[1]) if (i < floor(frontResult) + 1) else min(top, self.baseCuts[i]) )) for i in range(len(self.baseCuts))] strat = [toVote(cutoffs, util) for util in voter] extraStrat = [max(0,min(10,floor( 4.99 * (util-frontUtils[1]) / (frontUtils[0]-frontUtils[1]) ))) for util in voter] return dict(strat=strat, extraStrat=extraStrat, isStrat=isStrat, stratGap = stratGap)
def count_histogram_for_bin(positions, assignments, im_width, im_height, num_bins, i, j, num_words): xmin = floor(im_width / float(num_bins) * i) xmax = floor(im_width / float(num_bins) * (i + 1)) ymin = floor(im_height / float(num_bins) * j) ymax = floor(im_height / float(num_bins) * (j + 1)) indices = get_indices_for_pos(positions, xmin, xmax, ymin, ymax) return count_histogram(indices, assignments, num_words)
def __call__(self, X, n = None, x0 = None): ''' Generates n seed points for the lpc algorithm. X, 2 dimensional [#points, #dimension of points] array containing the data for which local density modes is to calculated n, required number of seed points, if n = None, returns exactly the local density modes, otherwise lpcRandomStartPoints is called with x0 equal to the local density modes (local density modes are the cluster centers) x0, 2-dimensional array containing #rows equal to number of explicitly defined mean shift seed points and #columns equal to dimension of the individual data points (called number of features in MeanShift docs). Returns the lpc seed points as a 2 dimensional [#seed points, #dimension of seed points] array ''' self._Xi = X if x0 is None: N = self._Xi.shape[0] ms_sub = float(self._lpcParameters['ms_sub']) #guarantees ms_sub <= ms_sub % of N <= 10 * ms_sub seed points (could give the option of using seed point binning in MeanShift) Nsub = int(min(max(ms_sub, floor(ms_sub * N / 100)), 10 * ms_sub)) ms_seeds = self._Xi[sample(xrange(0, N), Nsub),:] else: ms_seeds = x0 self._meanShift.seeds = ms_seeds self._meanShift.fit(self._Xi) cluster_respresentatives = self._removeNonTracklikeClusterCenters() if len(cluster_respresentatives) == 0: cluster_respresentatives = None lpcRSP = lpcRandomStartPoints() if n is None: return lpcRSP(self._Xi, n = 2, x0 = cluster_respresentatives) else: return lpcRSP(self._Xi, n = n, x0 = cluster_respresentatives)
def plot_fscores(labels, series): length = max(list(map(len, series))) fig = plt.figure() ax = fig.gca() ax.set_xticks(np.arange(0, float(len(series))), 1) ymin = min(list(map(min, series))) ymax = max(list(map(max, series))) ymin = floor(ymin * 10) / 10 ymax = ceil(ymax * 10) / 10 ax.set_yticks(np.arange(ymin, ymax, 0.1)) plt.axis([0, length - 1, ymin, ymax]) fontProperties = {'family': 'sans-serif', 'sans-serif': ['Helvetica'], 'weight': 'normal', 'size': 20} rc('text', usetex=True) rc('font', **fontProperties) ax.set_xticklabels( [r'$\frac{%d}{%d}$' % (i + 1, length - i) for i in range(length)], fontProperties) plt.grid() for i, [l, s] in enumerate(zip(labels, series)): c = CVALUE[COLORS[i]] plt.plot(list(range(len(s))), s, '-', marker=MARKERS[i], color=c, linewidth=2.5, markersize=12, fillstyle='full', label=l) plt.legend(loc="best") plt.ylabel(r'$F_1$') plt.xlabel(r'$k$')
def plot_KLDiv_with_logscale(series): length = len(series) fig = plt.figure() ax = fig.gca() ax.set_xticks(np.arange(0, float(len(series))), 1) ymin = min(series) ymax = max(series) ymin = floor(ymin * 10) / 10 ymax = ceil(ymax * 10) / 10 ax.set_yticks(np.arange(ymin, ymax, 0.1)) plt.axis([0, length - 1, ymin, ymax]) fontProperties = {'family': 'sans-serif', 'sans-serif': ['Helvetica'], 'weight': 'normal', 'size': 20} rc('text', usetex=True) rc('font', **fontProperties) # ax.set_xticklabels([r'$\frac{%d}{%d}$' % (i+1, length-i) for i in range(length)], fontProperties) plt.grid() a = plt.axes() # plt.axis([0, length-1, ymin, ymax]) plt.yscale('log') c = CVALUE[COLORS[0]] m = MARKERS[0] plt.plot(list(range(len(series))), series, '-', marker=m, color=c, linewidth=2.5, markersize=12, fillstyle='full', label='Label') c = CVALUE[COLORS[1]] m = MARKERS[1] plt.plot(list(range(len(series))), series, '-', marker=m, color=c, linewidth=2.5, markersize=12, fillstyle='full', label='Label') plt.legend(loc="best") plt.ylabel(r'$F_1$') plt.xlabel(r'$k$')
def generateInt(self, k): ''' generowanie k liczb ''' T = [0]*k for i in range(k): U = uniform(low=0, high=1) X = log(U)/log(1-self.p) T[(int(floor(X)))] = T[(int(floor(X)))] + 1 return T #A = Geometric(0.1) #C = A.showIntGenAndCount(A.generateInt(100)) #P = A.probabilityChart(C) #A = PlotHist() #A.plotHistgram("Generator dwumianowy", C,P,1,'ilosc','liczby') #A.showHistogram()
def stratBallot(cls, voter): frontUtils = [voter[frontId], voter[targId]] #utils of frontrunners stratGap = frontUtils[1] - frontUtils[0] if stratGap is 0: strat = extraStrat = [(4 if (util >= frontUtils[0]) else 0) for util in voter] isStrat = True else: if stratGap < 0: #winner is preferred; be complacent. isStrat = False else: #runner-up is preferred; be strategic in iss run isStrat = True #sort cuts high to low frontUtils = (frontUtils[1], frontUtils[0]) top = max(voter) #print("lll312") #print(self.baseCuts, front) cutoffs = [((min(frontUtils[0], self.baseCuts[i])) if (i < floor(targResult)) else ((frontUtils[1]) if (i < floor(frontResult) + 1) else min(top, self.baseCuts[i]))) for i in range(len(self.baseCuts))] strat = [toVote(cutoffs, util) for util in voter] extraStrat = [ max( 0, min( 10, floor(4.99 * (util - frontUtils[1]) / (frontUtils[0] - frontUtils[1])))) for util in voter ] return dict(strat=strat, extraStrat=extraStrat, isStrat=isStrat, stratGap=stratGap)
def honBallot(cls, utils): """Takes utilities and returns an honest ballot (on 0..10) honest ballots work as expected >>> Score().honBallot(Score, Voter([5,6,7])) [0.0, 5.0, 10.0] >>> Score().resultsFor(DeterministicModel(3)(5,3),Score().honBallot)["results"] [4.0, 6.0, 5.0] """ bot = min(utils) scale = max(utils)-bot return [floor((cls.topRank + .99) * (util-bot) / scale) for util in utils]
def honBallot(cls, utils): """Takes utilities and returns an honest ballot (on 0..10) honest ballots work as expected >>> Score().honBallot(Score, Voter([5,6,7])) [0.0, 5.0, 10.0] >>> Score().resultsFor(DeterministicModel(3)(5,3),Score().honBallot)["results"] [4.0, 6.0, 5.0] """ bot = min(utils) scale = max(utils) - bot return [ floor((cls.topRank + .99) * (util - bot) / scale) for util in utils ]
def fillStratBallot(cls, voter, polls, places, n, stratGap, ballot, frontId, frontResult, targId, targResult): """Returns a (function which takes utilities and returns a strategic ballot) for the given "polling" info.""" cuts = [voter[frontId], voter[targId]] if stratGap > 0: #sort cuts high to low cuts = (cuts[1], cuts[0]) if cuts[0] == cuts[1]: strat = [(cls.topRank if (util >= cuts[0]) else 0) for util in voter] else: strat = [max(0,min(cls.topRank,floor( (cls.topRank + .99) * (util-cuts[1]) / (cuts[0]-cuts[1]) ))) for util in voter] for i in range(n): ballot[i] = strat[i]
def stratBallot(cls, voter): cuts = [voter[places[0][0]], voter[places[1][0]]] stratGap = cuts[1] - cuts[0] if stratGap <= 0: #winner is preferred; be complacent. isStrat = False else: #runner-up is preferred; be strategic in iss run isStrat = True #sort cuts high to low cuts = (cuts[1], cuts[0]) if cuts[0] == cuts[1]: strat = [(cls.topRank if (util >= cuts[0]) else 0) for util in voter] else: strat = [max(0,min(cls.topRank,floor( (cls.topRank + .99) * (util-cuts[1]) / (cuts[0]-cuts[1]) ))) for util in voter] return dict(strat=strat, isStrat=isStrat, stratGap=stratGap)
def test_changing_the_shape_of_an_array(self): a = floor(10*random.random((3,4))) a = array([[1,2,3,4],[5,6,7,8],[9,10,11,12]]) numpy.testing.assert_array_equal(a.ravel(), array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])) a.shape = (6,2) numpy.testing.assert_array_equal(a, array([[ 1, 2], [ 3, 4], [ 5, 6], [ 7, 8], [ 9, 10], [11, 12]])) numpy.testing.assert_array_equal(a.transpose(), array([[ 1, 3, 5, 7, 9, 11], [ 2, 4, 6, 8, 10, 12]])) a = a.reshape(2,6) numpy.testing.assert_array_equal(a, array([[ 1, 2, 3, 4, 5, 6], [ 7, 8, 9, 10, 11, 12]])) a = a.reshape(3,-1) numpy.testing.assert_array_equal(a, array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]))
def fillStratBallot(cls, voter, polls, places, n, stratGap, ballot, frontId, frontResult, targId, targResult): """Returns a (function which takes utilities and returns a strategic ballot) for the given "polling" info.""" cuts = [voter[frontId], voter[targId]] if stratGap > 0: #sort cuts high to low cuts = (cuts[1], cuts[0]) if cuts[0] == cuts[1]: strat = [(cls.topRank if (util >= cuts[0]) else 0) for util in voter] else: strat = [ max( 0, min( cls.topRank, floor((cls.topRank + .99) * (util - cuts[1]) / (cuts[0] - cuts[1])))) for util in voter ] for i in range(n): ballot[i] = strat[i]
def stratBallot(cls, voter): cuts = [voter[places[0][0]], voter[places[1][0]]] stratGap = cuts[1] - cuts[0] if stratGap <= 0: #winner is preferred; be complacent. isStrat = False else: #runner-up is preferred; be strategic in iss run isStrat = True #sort cuts high to low cuts = (cuts[1], cuts[0]) if cuts[0] == cuts[1]: strat = [(cls.topRank if (util >= cuts[0]) else 0) for util in voter] else: strat = [ max( 0, min( cls.topRank, floor((cls.topRank + .99) * (util - cuts[1]) / (cuts[0] - cuts[1])))) for util in voter ] return dict(strat=strat, isStrat=isStrat, stratGap=stratGap)
from matplotlib import pyplot from scipy import ndimage import numpy from numpy.ma.core import floor from string import replace from scipy.misc.pilutil import imsave import os print ('********************************************************************************') print ('* Changing the intensity levels of an image using SciPy, NumPy and MatPlotLib *') print ('********************************************************************************') import sys sys.path.append('../utils') import userinput fpath = userinput.get_img_path() img_array = userinput.get_gray_img(fpath) # builds new images... intensities = [2, 4, 8, 16, 32, 64, 128, 255]; for intensity in intensities: print ('building new image using an intensity level of \'' + str(intensity) + '\'...') new_img_array = floor(numpy.array(img_array) / intensity) * intensity new_fpath = '%s/%i-intensity-%s' % (os.path.dirname(fpath), intensity, os.path.basename(fpath)) print ('saving new image to \'' + new_fpath + '\'...') imsave(new_fpath, new_img_array) print ('********************************************************************************')
class ParametersAlgo(object): ALPHA = 0.97 FOR_JINGJU = 0 FOR_MAKAM = 0 OBS_MODEL = 'GMM' OBS_MODEL = 'MLP' OBS_MODEL = 'MLP_fuzzy' EVAL_LEVEL = tierAliases.words # eval level phonemes does not work # EVAL_LEVEL = tierAliases.pinyin # in Jingju only level is syllable # use duraiton-based decoding (HMMDuraiton package) or just plain viterbi (HMM package) # if false, use transition probabilities from htkModels WITH_DURATIONS= 1 USE_PERSISTENT_PPGs = 0 # level into which to segments decoded result stateNetwork # DETECTION_TOKEN_LEVEL= 'syllables' DETECTION_TOKEN_LEVEL= 'words' # DETECTION_TOKEN_LEVEL= 'phonemes' Q_WEIGHT_TRANSITION = 3.5 DECODE_WITH_HTK = 0 GLOBAL_WAIT_PROB = 0.9 THRESHOLD_PEAKS = -70 DEVIATION_IN_SEC = 0.1 # unit: num frames NUMFRAMESPERSECOND = 100 # same as WINDOWSIZE in wavconfig singing. unit: seconds. TOOD: read from there automatically WINDOW_SIZE = 0.025 # in frames ONLY_MIDDLE_STATE = 1 WITH_SHORT_PAUSES = 0 # padded a short pause state at beginning and end of sequence WITH_PADDED_SILENCE = 0 # no feature vectors at all. all observ, probs. set to 1 # WITH_ORACLE_PHONEMES = -1 WITH_ORACLE_PHONEMES = 0 PATH_TO_HCOPY= '/usr/local/bin/HCopy' PATH_TO_HVITE = '/usr/local/bin/HVite' # On kora.s.upf.edu # PATH_TO_HCOPY = '/homedtic/georgid/htkBuilt/bin/HCopy' projDir = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)) , os.path.pardir )) PATH_TO_CONFIG_FILES= projDir + '/models_makam/input_files/' parentDir = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__) ), os.path.pardir)) MODELS_DIR = os.path.join(parentDir, 'models_jingju/' + '3' + 'folds/') POLYPHONIC = 1 WITH_ORACLE_ONSETS = -1 ### no onsets at all. # WITH_ORACLE_ONSETS = -1 # Sigma of onset smoothing function g: normal distribution ONSET_SIGMA = 0.075 # ONSET_SIGMA = 0.15 ONSET_SIGMA_IN_FRAMES = int(floor(ONSET_SIGMA * NUMFRAMESPERSECOND)) if ONSET_SIGMA_IN_FRAMES % 2 == 0: ONSET_SIGMA_IN_FRAMES += 1 # ONSET_TOLERANCE_WINDOW = 0.02 # seconds. to work implement decoding with one onset only ONSET_TOLERANCE_WINDOW = 0 # seconds # in _ContinousHMM.b_map cut probabilities CUTOFF_BIN_OBS_PROBS = 30 # for for_jingju CONSONANT_DURATION_IN_SEC = 0.3 # for for_makam # CONSONANT_DURATION_IN_SEC = 0.1 CONSONANT_DURATION = NUMFRAMESPERSECOND * CONSONANT_DURATION_IN_SEC; CONSONANT_DURATION_DEVIATION = 0.7 ##### LOGGING_LEVEL = logging.INFO VISUALIZE = 0 ANNOTATION_RULES_ONSETS_EXT = 'annotationOnsets.txt' ANNOTATION_SCORE_ONSETS_EXT = 'alignedNotes.txt' # use this ont to get better impression on recall, compared to annotationOnsets.txt, which are only on note onsets with rules of interest WRITE_TO_FILE = True
image_data_raw = image_generator.GetBGR24ImageMapRaw() depth_data_raw = depth_generator.GetGrayscale16DepthMapRaw() cv.SetData(current_depth_frame, depth_data_raw) cv.Convert(current_depth_frame, for_thresh) cv.SetData(current_image_frame, image_data_raw) # initialize matrices for drawing and start timing t0 = time.time() cv.SetZero(hist_img) cv.SetZero(out) cv.SetZero(contours) # compute and smooth histogram depth = np.asarray(current_depth_frame) hist, bins = np.histogram(depth, n_bins, range=(min_range, max_range), normed=False) hist_half = floor(n_bins/2) hist[:hist_half] = np.convolve(hist[:hist_half], np.ones(k_width) / k_width, 'same') hist[hist_half:] = np.convolve(hist[hist_half:], np.ones(k_width2) / k_width2, 'same') max_hist = np.max(hist) timing['t_histo'] += time.time() - t0 # histogram clustering start, end = 0, 0 c = 1 conts_list = [] for i in range(len(hist)-1): cur_value = hist[i] next_value = hist[i + 1]
def find_bursts(duration, dt, transient, N, M_t, M_i, max_freq): base = 2 #round lgbinwidth to nearest 2 so will always divide into durations expnum = 2.0264 * exp(-0.2656 * max_freq + 2.9288) + 5.7907 lgbinwidth = (int(base * round( (-max_freq + 33) / base))) * ms #23-good for higher freq stuff #lgbinwidth=(int(base*round((expnum)/base)))/1000 #use exptl based on some fit of choice binwidths #lgbinwidth=10*ms numlgbins = int(ceil(duration / lgbinwidth)) #totspkhist=zeros((numlgbins,1)) totspkhist = zeros(numlgbins) #totspkdist_smooth=zeros((numlgbins,1)) skiptime = transient * ms skipbin = int(ceil(skiptime / lgbinwidth)) inc_past_thresh = [] dec_past_thresh = [] #Create histogram given the bins calculated for i in xrange(numlgbins): step_start = (i) * lgbinwidth step_end = (i + 1) * lgbinwidth totspkhist[i] = len(M_i[logical_and(M_t > step_start, M_t < step_end)]) ###smooth plot first so thresholds work better #totspkhist_1D=reshape(totspkhist,len(totspkhist)) #first just reshape so single row not single colm #b,a=butter(3,0.4,'low') #totspkhist_smooth=filtfilt(b,a,totspkhist_1D) #totspkhist_smooth=reshape(totspkhist,len(totspkhist)) #here we took out the actual smoothing and left it as raw distn. here just reshape so single row not single colm totspkdist_smooth = totspkhist / max( totspkhist[skipbin:] ) #create distn based on hist, but skip first skiptime to cut out transient excessive spiking # ####### FOR MOVING THRESHOLD ################# ## find points where increases and decreases over some threshold dist_thresh = [] thresh_plot = [] mul_fac = 0.35 switch = 0 #keeps track of whether inc or dec last elim_noise = 1 / (max_freq * 2.5 * Hz) #For line 95, somehow not required in previous version? #elim_noise_units = 1/(max_freq*Hz*2.5) thresh_time = 5 / (max_freq) #capture 5 cycles thresh_ind = int(floor( (thresh_time / lgbinwidth) / 2)) #the number of indices on each side of the window #dist_thresh moves with window capturing approx 5 cycles (need special cases for borders) Find where increases and decreases past threshold (as long as a certain distance apart, based on "elim_noise" which is based on avg freq of bursts dist_thresh.append( totspkdist_smooth[skipbin:skipbin + thresh_ind].mean(0) + mul_fac * totspkdist_smooth[skipbin:skipbin + thresh_ind].std(0)) for i in xrange(1, numlgbins): step_start = (i) * lgbinwidth step_end = (i + 1) * lgbinwidth #moving threshold if i > (skipbin + thresh_ind) and (i + thresh_ind) < len(totspkdist_smooth): #print(totspkdist_smooth[i-thresh_ind:i+thresh_ind]) dist_thresh.append( totspkdist_smooth[i - thresh_ind:i + thresh_ind].mean(0) + mul_fac * totspkdist_smooth[i - thresh_ind:i + thresh_ind].std(0)) elif (i + thresh_ind) >= len(totspkdist_smooth): dist_thresh.append(totspkdist_smooth[-thresh_ind:].mean(0) + mul_fac * totspkdist_smooth[-thresh_ind:].std(0)) else: dist_thresh.append( totspkdist_smooth[skipbin:skipbin + thresh_ind].mean(0) + mul_fac * totspkdist_smooth[skipbin:skipbin + thresh_ind].std(0)) if (totspkdist_smooth[i - 1] < dist_thresh[i]) and (totspkdist_smooth[i] >= dist_thresh[i]): #inc_past_thresh.append(step_start-0.5*lgbinwidth) if (inc_past_thresh): #there has already been at least one inc, if ( abs(inc_past_thresh[-1] - (step_start - 0.5 * lgbinwidth)) > elim_noise ) and switch == 0: #must be at least x ms apart (yHz), and it was dec last.. inc_past_thresh.append( step_start - 0.5 * lgbinwidth ) #take lower point (therefore first) when increasing. Need to -0.5binwidth to adjust for shift between index of bin width and index of bin distn #print (['incr=%f'%inc_past_thresh[-1]]) thresh_plot.append(dist_thresh[i]) switch = 1 else: inc_past_thresh.append( step_start - 0.5 * lgbinwidth ) #take lower point (therefore first) when increasing. Need to -0.5binwidth to adjust for shift between index of bin width and index of bin distn thresh_plot.append(dist_thresh[i]) switch = 1 #keeps track of that it was inc. last elif (totspkdist_smooth[i - 1] >= dist_thresh[i]) and (totspkdist_smooth[i] < dist_thresh[i]): # dec_past_thresh.append(step_end-0.5*lgbinwidth) #take lower point (therefore second) when decreasing if (dec_past_thresh): #there has already been at least one dec if ( abs(dec_past_thresh[-1] - (step_end - 0.5 * lgbinwidth)) > elim_noise ) and switch == 1: #must be at least x ms apart (y Hz), and it was inc last dec_past_thresh.append( step_end - 0.5 * lgbinwidth ) #take lower point (therefore second) when decreasing #print (['decr=%f'%dec_past_thresh[-1]]) switch = 0 else: dec_past_thresh.append( step_end - 0.5 * lgbinwidth ) #take lower point (therefore second) when decreasing switch = 0 #keeps track of that it was dec last if totspkdist_smooth[0] < dist_thresh[ 0]: #if you are starting below thresh, then pop first inc. otherwise, don't (since will decrease first) if inc_past_thresh: #if list is not empty inc_past_thresh.pop(0) # ##################################################################### # ######### TO DEFINE A STATIC THRESHOLD AND FIND CROSSING POINTS # dist_thresh=0.15 #static threshold # switch=0 #keeps track of whether inc or dec last # overall_freq=3.6 #0.9 # elim_noise=1/(overall_freq*5)#2.5) # # # for i in xrange(1,numlgbins): # step_start=(i)*lgbinwidth # step_end=(i+1)*lgbinwidth # # if (totspkdist_smooth[i-1]<dist_thresh) and (totspkdist_smooth[i]>=dist_thresh): #if cross threshold (increasing) # if (inc_past_thresh): #there has already been at least one inc, # if (abs(dec_past_thresh[-1]-(step_start-0.5*lgbinwidth))>elim_noise) and switch==0: #must be at least x ms apart (yHz) from the previous dec, and it was dec last.. # inc_past_thresh.append(step_start-0.5*lgbinwidth) #take lower point (therefore first) when increasing. Need to -0.5binwidth to adjust for shift between index of bin width and index of bin distn # #print (['incr=%f'%inc_past_thresh[-1]]) #-0.5*lgbinwidth # switch=1 # else: # inc_past_thresh.append(step_start-0.5*lgbinwidth) #take lower point (therefore first) when increasing. Need to -0.5binwidth to adjust for shift between index of bin width and index of bin distn # switch=1 #keeps track of that it was inc. last # elif (totspkdist_smooth[i-1]>=dist_thresh) and (totspkdist_smooth[i]<dist_thresh): # if (dec_past_thresh): #there has already been at least one dec # if (abs(inc_past_thresh[-1]-(step_end-0.5*lgbinwidth))>elim_noise) and switch==1: #must be at least x ms apart (y Hz) from the previous incr, and it was inc last # dec_past_thresh.append(step_end-0.5*lgbinwidth) #take lower point (therefore second) when decreasing # #print (['decr=%f'%dec_past_thresh[-1]]) # switch=0 # else: # dec_past_thresh.append(step_end-0.5*lgbinwidth) #take lower point (therefore second) when decreasing # switch=0 #keeps track of that it was dec last # # # if totspkdist_smooth[0]<dist_thresh: #if you are starting below thresh, then pop first inc. otherwise, don't (since will decrease first) # if inc_past_thresh: #if list is not empty # inc_past_thresh.pop(0) ################################################################ ############################################################### ######## DEFINE INTER AND INTRA BURSTS ######## #since always start with dec, intraburst=time points from 1st inc:2nd dec, from 2nd inc:3rd dec, etc. #interburst=time points from 1st dec:1st inc, from 2nd dec:2nd inc, etc. intraburst_time_ms_compound_list = [] interburst_time_ms_compound_list = [] intraburst_bins = [] #in seconds interburst_bins = [] #print(inc_past_thresh) if len(inc_past_thresh) < len(dec_past_thresh): #if you end on a decrease for i in xrange(len(inc_past_thresh)): intraburst_time_ms_compound_list.append( arange(inc_past_thresh[i] / ms, dec_past_thresh[i + 1] / ms, 1)) #10 is timestep interburst_time_ms_compound_list.append( arange((dec_past_thresh[i] + dt) / ms, (inc_past_thresh[i] - dt) / ms, 1)) #10 is timestep intraburst_bins.append(inc_past_thresh[i]) intraburst_bins.append(dec_past_thresh[i + 1]) interburst_bins.append(dec_past_thresh[i]) interburst_bins.append(inc_past_thresh[i]) else: #if you end on an increase for i in xrange(len(inc_past_thresh) - 1): intraburst_time_ms_compound_list.append( arange(inc_past_thresh[i] / ms, dec_past_thresh[i + 1] / ms, 1)) #10 is timestep interburst_time_ms_compound_list.append( arange((dec_past_thresh[i] + dt) / ms, (inc_past_thresh[i] - dt) / ms, 1)) #10 is timestep intraburst_bins.append(inc_past_thresh[i]) intraburst_bins.append(dec_past_thresh[i + 1]) interburst_bins.append(dec_past_thresh[i] + dt) interburst_bins.append(inc_past_thresh[i] - dt) if dec_past_thresh and inc_past_thresh: #if neither dec_past_thresh nor inc_past_thresh is empty interburst_bins.append(dec_past_thresh[-1] + dt) #will have one more inter than intra interburst_bins.append(inc_past_thresh[-1] + dt) interburst_bins = interburst_bins / second intraburst_bins = intraburst_bins / second intraburst_time_ms = [ num for elem in intraburst_time_ms_compound_list for num in elem ] #flatten list interburst_time_ms = [ num for elem in interburst_time_ms_compound_list for num in elem ] #flatten list num_intraburst_bins = len( intraburst_bins ) / 2 #/2 since have both start and end points for each bin num_interburst_bins = len(interburst_bins) / 2 intraburst_bins_ms = [x * 1000 for x in intraburst_bins] interburst_bins_ms = [x * 1000 for x in interburst_bins] ###################################### #bin_s=[((inc_past_thresh-dec_past_thresh)/2+dec_past_thresh) for inc_past_thresh, dec_past_thresh in zip(inc_past_thresh,dec_past_thresh)] bin_s = [((x - y) / 2 + y) for x, y in zip(inc_past_thresh, dec_past_thresh)] / second binpt_ind = [int(floor(x / lgbinwidth)) for x in bin_s] ########## FIND PEAK TO TROUGH AND SAVE VALUES ################### ########## CATEGORIZE BURSTING BASED ON PEAK TO TROUGH VALUES ################### ########## DISCARD BINPTS IF PEAK TO TROUGH IS TOO SMALL ################### peaks = [] trough = [] peak_to_trough_diff = [] min_burst_size = 0.2 #defines a burst as 0.2 or larger. for i in xrange(len(binpt_ind) - 1): peaks.append(max(totspkdist_smooth[binpt_ind[i]:binpt_ind[i + 1]])) trough.append(min(totspkdist_smooth[binpt_ind[i]:binpt_ind[i + 1]])) peak_to_trough_diff = [ max_dist - min_dist for max_dist, min_dist in zip(peaks, trough) ] #to delete all bins following any <min_burst_size first_ind_not_burst = next( (x[0] for x in enumerate(peak_to_trough_diff) if x[1] < 0.2), None) # if first_ind_not_burst: # del bin_s[first_ind_not_burst+1:] #needs +1 since bin_s has one additional value (since counts edges) #to keep track of any bins <0.2 so can ignore in stats later all_ind_not_burst = [ x[0] for x in enumerate(peak_to_trough_diff) if x[1] < 0.2 ] #defines a burst as 0.2 or larger. bin_ms = [x * 1000 for x in bin_s] binpt_ind = [int(floor(x / lgbinwidth)) for x in bin_s] #for moving threshold only thresh_plot = [] thresh_plot = [dist_thresh[x] for x in binpt_ind] #for static threshold #thresh_plot=[dist_thresh]*len(bin_ms) # # # bin_s=[((inc_past_thresh-dec_past_thresh)/2+dec_past_thresh) for inc_past_thresh, dec_past_thresh in zip(inc_past_thresh,dec_past_thresh)] # bin_ms=[x*1000 for x in bin_s] # thresh_plot=[] # binpt_ind=[int(floor(x/lgbinwidth)) for x in bin_s] # thresh_plot=[dist_thresh[x] for x in binpt_ind] # binpts = xrange(int(lgbinwidth * 1000 / 2), int(numlgbins * lgbinwidth * 1000), int(lgbinwidth * 1000)) totspkhist_list = totspkhist.tolist( ) #[val for subl in totspkhist for val in subl] #find first index after transient to see if have enough bins to do stats bin_ind_no_trans = bisect.bisect(bin_ms, transient) intrabin_ind_no_trans = bisect.bisect(intraburst_bins, transient / 1000) #transient to seconds if intrabin_ind_no_trans % 2 != 0: #index must be even since format is ind0=start_bin, ind1=end_bin, ind2=start_bin, .... . intrabin_ind_no_trans += 1 interbin_ind_no_trans = bisect.bisect(interburst_bins, transient / 1000) if interbin_ind_no_trans % 2 != 0: interbin_ind_no_trans += 1 return [ bin_s, bin_ms, binpts, totspkhist, totspkdist_smooth, dist_thresh, totspkhist_list, thresh_plot, binpt_ind, lgbinwidth, numlgbins, intraburst_bins, interburst_bins, intraburst_bins_ms, interburst_bins_ms, intraburst_time_ms, interburst_time_ms, num_intraburst_bins, num_interburst_bins, bin_ind_no_trans, intrabin_ind_no_trans, interbin_ind_no_trans ]