예제 #1
0
def MaybeMergeChildren(parent_node):
    children = parent_node.child_nodes()
    assert len(children) == 2
    if not AreLeaves(children):
        logging.debug('Not both children are leaves. Bailing.')
        return False

    # Make the new dictionaries and edge lengths
    child_pathways = [c.pathways for c in children]
    child_lengths = [c.edge.length for c in children]
    virtual_count = sum(c.count for c in children)
    max_length_idx = pylab.argmax(child_lengths)
    label = children[max_length_idx].taxon.label
    merged_pathways = set.union(*child_pathways)

    logging.debug('Merging 2 children with edge lengths %s', child_lengths)

    # Remove children and update the parent
    map(parent_node.remove_child, children)
    parent_node.edge.length += child_lengths[max_length_idx]
    parent_node.pathways = merged_pathways
    parent_node.count = virtual_count
    parent_node.annotate('count')
    for pname in parent_node.pathways:
        setattr(parent_node, pname, True)
        parent_node.annotate(pname)

    # Set up a taxon for the parent according to the
    # most distinct child.
    # TODO(flamholz): indicate somehow that this was merged.
    taxon = dendropy.Taxon()
    taxon.label = label
    parent_node.taxon = taxon

    return True
예제 #2
0
 def _removeTimeShift(self,tdDatas):
     #not sure if needed, maybe we want to correct the rawdata by shifting the maxima on top of each other
     #the indices of the maxima        
     #takes at the moment only the X Channel data and corrects it (safer!)
     peak_pos=[]
     for tdData in tdDatas:
         time_max_raw=tdData[py.argmax(tdData[:,1]),0]
         thisPeakData=self.getShorterData(tdData,time_max_raw-0.5e-12,time_max_raw+0.5e-12)
         thisPeakData=self.getInterData(thisPeakData,len(thisPeakData[:,0])*20,thisPeakData[0,0],thisPeakData[-1,0],'cubic')
         peak_pos.append(thisPeakData[py.argmax(thisPeakData[:,1]),0])
     
     peak_pos=py.asarray(peak_pos)
     mp=py.mean(peak_pos)
     for i in range(len(tdDatas)):
         tdDatas[i][:,0]-=(peak_pos[i]-mp)
     return tdDatas,py.std(peak_pos)
예제 #3
0
    def computeMomentumSpreading(self):
        spreading = []
        meanPosMomentum = []
        for i in range(len(self.variableArray)):
            profile = self.profileArray[i] / max(self.profileArray[i])
            maxLocation = []
            maxVal = []
            for x0x1 in self.coords:
                x0 = x0x1[0]
                x1 = x0x1[1]
                maxLocation.append(x0 + py.argmax(profile[x0:x1]))
                maxVal.append(max(profile[x0:x1]))

            maxLocation = py.array(maxLocation)
            maxVal = py.array(maxVal)
            zeroIndex = int(len(maxLocation) / 2)
            momemtum = (py.array(range(len(profile))) -
                        maxLocation[zeroIndex]) / self.h_d_ratio
            maxLocationMomentum = (py.array(maxLocation) -
                                   maxLocation[zeroIndex]) / self.h_d_ratio
            meanPosMomentum.append(
                py.sum(maxLocationMomentum * maxVal) / py.sum(maxVal))
            spreading.append(
                py.sum(maxVal *
                       (maxLocationMomentum - meanPosMomentum[-1])**2))
        spreading = py.array(spreading)
        spreading = spreading / max(spreading)
        self.spreading = spreading
        self.meanPosMomentum = meanPosMomentum
예제 #4
0
 def average_q(self, state):
     q = self.get_q(state)
     average_q = float64(0)
     for action in range(0, self.env.get_num_actions()):
         p = self.optimal_p if action == argmax(q) else (self.epsilon / self.env.get_num_actions())
         average_q += p * q[action]
     return average_q
예제 #5
0
def psp_parameter_estimate_fixmem(time, value):
    smoothing_kernel = 10
    smoothed_value = p.convolve(
        value,
        p.ones(smoothing_kernel) / float(smoothing_kernel),
        "same")

    mean_est_part = int(len(value) * .1)
    mean_estimate = p.mean(smoothed_value[-mean_est_part:])
    noise_estimate = p.std(value[-mean_est_part:])

    integral = p.sum(smoothed_value - mean_estimate) * (time[1] - time[0])

    f = 1.

    A_estimate = (max(smoothed_value) - mean_estimate) / (1. / 4.)

    min_A = noise_estimate

    if A_estimate < min_A:
        A_estimate = min_A

    t1_est = integral / A_estimate * f
    t2_est = 2 * t1_est

    tmax_est = time[p.argmax(smoothed_value)] + p.log(t2_est / t1_est) * (t1_est * t2_est) / (t1_est - t2_est)

    return p.array([
        tmax_est,
        A_estimate,
        t1_est,
        mean_estimate])
예제 #6
0
def alpha_psp_parameter_estimate(time, value, smoothing_samples=10):
    t1_est_min = time[1] - time[0]

    mean_est_part = len(value) * .1
    mean_estimate = p.mean(value[-mean_est_part:])
    noise_estimate = p.std(value[-mean_est_part:])

    smoothed_value = p.convolve(
        value - mean_estimate,
        p.ones(smoothing_samples) / float(smoothing_samples),
        "same") + mean_estimate

    integral = p.sum(smoothed_value - mean_estimate) * (time[1] - time[0])

    f = 1.

    height_estimate = (max(smoothed_value) - mean_estimate)

    min_height = noise_estimate

    if height_estimate < min_height:
        height_estimate = min_height

    t1_est = integral / height_estimate * f
    if t1_est < t1_est_min:
        t1_est = t1_est_min
    t2_est = 2 * t1_est

    tmax_est = time[p.argmax(smoothed_value)]
    tstart_est = tmax_est + p.log(t2_est / t1_est) \
        * (t1_est * t2_est) / (t1_est - t2_est)

    return p.array(
        [height_estimate, t1_est, t2_est, tstart_est, mean_estimate])
예제 #7
0
파일: fasta.py 프로젝트: sequana/sequana
    def summary(self, max_contigs=-1):
        from pylab import mean, argmax
        # used by sequana summary fasta
        summary = {"number_of_contigs": len(self.sequences)}
        summary["total_contigs_length"] = sum(self.lengths)
        summary["mean_contig_length"] = mean(self.lengths)
        summary["max_contig_length"] = max(self.lengths)
        summary["min_contig_length"] = min(self.lengths)
        N = 0
        lengths = self.lengths[:]
        positions = list(range(len(lengths)))
        stats = self.get_stats()
        print("#sample_name: {}".format(self.filename))
        print("#total length: {}".format(stats['total_length']))
        print("#N50: {}".format(stats['N50']))
        print("#Ncontig: {}".format(stats['N']))
        print("#L50: {}".format(stats['L50']))
        print("#max_contig_length: {}".format(stats['max_length']))
        print("#min_contig_length: {}".format(stats['min_length']))
        print("#mean_contig_length: {}".format(stats['mean_length']))

        print("contig name,length,count A,C,G,T,N")
        if max_contigs == -1:
            max_contigs = len(lengths) + 1
        while lengths and N < max_contigs:
            N += 1
            index = argmax(lengths)
            length = lengths.pop(index)
            position = positions.pop(index)
            sequence = self.sequences[position]
            name = self.names[position]
            print("{},{},{},{},{},{},{}".format(name, length, sequence.count('A'), sequence.count('C'),
                sequence.count('G'), sequence.count('T'), sequence.count('N')))
예제 #8
0
    def getPreceedingNoise(self,tdData,timePreceedingSignal=-1):
        #retruns the preceeding noise in X and Y channel of tdData
        #it uses timePreceedingSignal to evaluate the noise, if not given,
        #it autodetects a "good" time
        
        #we should crop the data at least 2.5 ps before the main peak
        nearestdistancetopeak=2.5e-12
        if min(tdData[:,0])+timePreceedingSignal>-nearestdistancetopeak:
            timePreceedingSignal=-min(tdData[:,0])-nearestdistancetopeak
            print("Time Interval of preceeding noise to long, reset done")
            #dont use user input if higher than peak
        
        #get the first time
        starttime=min(tdData[:,0])
        if timePreceedingSignal<0:
            #determine length automatically            
            ratio=2
            ix_max=py.argmax(tdData[:,1])
            ix_min=py.argmin(tdData[:,1])
            earlier=min(ix_max,ix_min)
                #take only the first nts part            
            endtime=tdData[int(earlier/ratio),0]
        else:
            endtime=starttime+timePreceedingSignal

        #cut the data from starttime to endtime            
        noise=self.getShorterData(tdData,starttime,endtime)
        #detrend the noise
        noise=signal.detrend(noise[:,1:3],axis=0)
        return noise
예제 #9
0
def _calculate_spectra_sussix(sx, sy, Q_x, Q_y, Q_s, n_lines):

    n_turns, n_files = sx.shape

    # Allocate memory for output.        
    oxx, axx = plt.zeros((n_lines, n_files)), plt.zeros((n_lines, n_files))
    oyy, ayy = plt.zeros((n_lines, n_files)), plt.zeros((n_lines, n_files))

    # Initialise Sussix object.
    SX = PySussix.Sussix()
    
    x, xp, y, yp = sx.real, sx.imag, sy.real, sy.imag
    for file_i in xrange(n_files):
        SX.sussix_inp(nt1=1, nt2=n_turns, idam=2, ir=0, tunex=Q_x[file_i] % 1, tuney=Q_y[file_i] % 1)
        SX.sussix(x[:,file_i], xp[:,file_i], y[:,file_i], yp[:,file_i], sx[:,file_i], sx[:,file_i])

        # Amplitude normalisation
        SX.ax /= plt.amax(SX.ax)
        SX.ay /= plt.amax(SX.ay)

        # Tunes
        SX.ox = plt.absolute(SX.ox)
        SX.oy = plt.absolute(SX.oy)
        if file_i==0:
            tunexsx = SX.ox[plt.argmax(SX.ax)]
            tuneysx = SX.oy[plt.argmax(SX.ay)]
            print "\n*** Tunes from Sussix"
            print "    tunex", tunexsx, ", tuney", tuneysx, "\n"

        # Tune normalisation
        SX.ox = (SX.ox - (Q_x[file_i] % 1)) / Q_s[file_i]
        SX.oy = (SX.oy - (Q_y[file_i] % 1)) / Q_s[file_i]
    
        # Sort
        CX = plt.rec.fromarrays([SX.ox, SX.ax], names='ox, ax')
        CX.sort(order='ax')
        CY = plt.rec.fromarrays([SX.oy, SX.ay], names='oy, ay')
        CY.sort(order='ay')
        ox, ax, oy, ay = CX.ox, CX.ax, CY.oy, CY.ay
        oxx[:,file_i], axx[:,file_i], oyy[:,file_i], ayy[:,file_i] = ox, ax, oy, ay

    spectra = {}
    spectra['horizontal'] = (oxx, axx)
    spectra['vertical']   = (oyy, ayy)
        
    return spectra
예제 #10
0
파일: map.py 프로젝트: TomPeerdeman/AEC2013
 def classify(self, x):
     y1 = multivariate_pdf(x, self.mu[0], self.cov[0]) * self.P[0]
     y2 = multivariate_pdf(x, self.mu[1], self.cov[1]) * self.P[1]
     y3 = multivariate_pdf(x, self.mu[2], self.cov[2]) * self.P[2]
     px = y1 + y2 + y3
     P1x = y1 / px
     P2x = y2 / px
     P3x = y3 / px
     return argmax([P1x, P2x, P3x]) + 1.0
예제 #11
0
def computeSpectrum(signal):
    #gen = VectorInput(signal)
    #fc = FrameCutter(startFromZero=False, frameSize=48, hopSize=1)
    #w = Windowing(zeroPhase=False)
    #spec = Spectrum()

    #p = essentia.Pool()
    #gen.data >> fc.signal
    #fc.frame >> w.frame >> spec.frame
    #spec.spectrum >> (p,'spectrum')
    #essentia.run(gen)

    #pyplot.imshow(p['spectrum'], cmap=pyplot.cm.hot, aspect='auto', origin='lower')

    corr = std.AutoCorrelation()(signal)
    pyplot.plot(corr)
    pyplot.show()
    print argmax(corr[2:]) + 2
예제 #12
0
 def _determineLockinPhase(self,rawtdData):
     #determine the phase difference from X and Y channel (at maximum signal)
     
     ix_max=py.argmax(rawtdData[:,1])
     #take 9 datapoints to evaluate theta
     no=4
     XCs=rawtdData[max(0,ix_max-no):min(rawtdData.shape[0],ix_max+no),1]
     YCs=rawtdData[max(0,ix_max-no):min(rawtdData.shape[0],ix_max+no),2]
     return py.arctan(py.mean(YCs/XCs))
예제 #13
0
def computeSpectrum(signal):
    #gen = VectorInput(signal)
    #fc = FrameCutter(startFromZero=False, frameSize=48, hopSize=1)
    #w = Windowing(zeroPhase=False)
    #spec = Spectrum()

    #p = essentia.Pool()
    #gen.data >> fc.signal
    #fc.frame >> w.frame >> spec.frame
    #spec.spectrum >> (p,'spectrum')
    #essentia.run(gen)

    #pyplot.imshow(p['spectrum'], cmap=pyplot.cm.hot, aspect='auto', origin='lower')

    corr = std.AutoCorrelation()(signal)
    pyplot.plot(corr)
    pyplot.show()
    print argmax(corr[2:])+2
예제 #14
0
def translate_back0(outputs, threshold=0.25):
    ms = amax(outputs, axis=1)
    cs = argmax(outputs, axis=1)
    cs[ms < threshold * amax(outputs)] = 0
    result = []
    for i in range(1, len(cs)):
        if cs[i] != cs[i - 1]:
            if cs[i] != 0:
                result.append(cs[i])
    return result
예제 #15
0
def fit_modes_full(o, a, v, ixrange):
    ax = [plt.argmax(a[:,i]) for i in range(a.shape[1])]
    o = plt.array([o[ax[i],i] for i in range(o.shape[1])])
    a = plt.array([a[ax[i],i] for i in range(a.shape[1])])

    x, y = v[ixrange], o[ixrange]
    p = plt.polyfit(x, y, 1)
    z = v*p[0] + p[1]

    return x, y, z, p
def OutToClass(outputs, targets):
    outs = []
    trgs = []
    for out, trg in zip(outputs, targets):
        # this is to have each output neruon responsible for a class
        # if False and len(out) == 3 and \
        if len(out) == 1:
          outs.append(out[0])
          trgs.append(trg[0])
          # print out[0]  
        elif len(out) == 3 and \
          array([where(x in [1, 0], True, False) for x in out]).all() and \
          bincount(out.astype('int'))[0] != 2:
            outs.append(-1)
            trgs.append(argmax(trg))
        else:
            outs.append(argmax(out))
            trgs.append(argmax(trg))
    return outs, trgs
예제 #17
0
def OutToClass(outputs, targets):
    outs = []
    trgs = []
    for out, trg in zip(outputs, targets):
        # this is to have each output neruon responsible for a class
        # if False and len(out) == 3 and \
        if len(out) == 1:
            outs.append(out[0])
            trgs.append(trg[0])
            # print out[0]
        elif len(out) == 3 and \
          array([where(x in [1, 0], True, False) for x in out]).all() and \
          bincount(out.astype('int'))[0] != 2:
            outs.append(-1)
            trgs.append(argmax(trg))
        else:
            outs.append(argmax(out))
            trgs.append(argmax(trg))
    return outs, trgs
예제 #18
0
    def _calculate_sussix_spectrum(self, turn, window_width):
    
        # Initialise Sussix object
        SX = PySussix.Sussix()
        SX.sussix_inp(nt1=1, nt2=window_width, idam=2, ir=0, tunex=self.q_x, tuney=self.q_y)

        tunes_x = plt.zeros(self.n_particles_to_analyse)
        tunes_y = plt.zeros(self.n_particles_to_analyse)

        n_particles_to_analyse_10th = int(self.n_particles_to_analyse/10)
        print 'Running SUSSIX analysis ...'
        for i in xrange(self.n_particles_to_analyse):
            if not i%n_particles_to_analyse_10th: print '  Particle', i
            SX.sussix(self.x[i,turn:turn+window_width], self.xp[i,turn:turn+window_width],
                      self.y[i,turn:turn+window_width], self.yp[i,turn:turn+window_width],
                      self.x[i,turn:turn+window_width], self.xp[i,turn:turn+window_width]) # this line is not used by sussix!
            tunes_x[i] = plt.absolute(SX.ox[plt.argmax(SX.ax)])
            tunes_y[i] = plt.absolute(SX.oy[plt.argmax(SX.ay)])
                
        return tunes_x, tunes_y
예제 #19
0
def _calculate_spectra_fft(sx, sy, Q_x, Q_y, Q_s, n_lines):

    n_turns, n_files = sx.shape
        
    # Allocate memory for output.
    oxx, axx = plt.zeros((n_lines, n_files)), plt.zeros((n_lines, n_files))
    oyy, ayy = plt.zeros((n_lines, n_files)), plt.zeros((n_lines, n_files))

    for file_i in xrange(n_files):
        t = plt.linspace(0, 1, n_turns)
        ax = plt.absolute(plt.fft(sx[:, file_i]))
        ay = plt.absolute(plt.fft(sy[:, file_i]))

        # Amplitude normalisation
        ax /= plt.amax(ax, axis=0)
        ay /= plt.amax(ay, axis=0)
    
        # Tunes
        if file_i==0:
            tunexfft = t[plt.argmax(ax[:n_turns/2], axis=0)]
            tuneyfft = t[plt.argmax(ay[:n_turns/2], axis=0)]
            print "\n*** Tunes from FFT"
            print "    tunex:", tunexfft, ", tuney:", tuneyfft, "\n"

        # Tune normalisation
        ox = (t - (Q_x[file_i] % 1)) / Q_s[file_i]
        oy = (t - (Q_y[file_i] % 1)) / Q_s[file_i]
    
        # Sort
        CX = plt.rec.fromarrays([ox, ax], names='ox, ax')
        CX.sort(order='ax')
        CY = plt.rec.fromarrays([oy, ay], names='oy, ay')
        CY.sort(order='ay')
        ox, ax, oy, ay = CX.ox[-n_lines:], CX.ax[-n_lines:], CY.oy[-n_lines:], CY.ay[-n_lines:]
        oxx[:,file_i], axx[:,file_i], oyy[:,file_i], ayy[:,file_i] = ox, ax, oy, ay

    spectra = {}
    spectra['horizontal'] = (oxx, axx)
    spectra['vertical']   = (oyy, ayy)
        
    return spectra
예제 #20
0
def get_margins(x, y):
    vx = vxinterp(x, y)
    vy = vyinterp(x, y)
    n = (-vx, vy) / sqrt(vx**2 + vy**2)
    d = 50e3
    nsamp = 500
    line = array([
        x + d * linspace(-n[0], n[0], nsamp),
        y + d * linspace(-n[1], n[1], nsamp)
    ])
    vxd = vxinterp(line[0], line[1])
    vyd = vyinterp(line[0], line[1])
    v = sqrt(vxd**2 + vyd**2)
    dv = v[1:] - v[:-1]
    il = argmax(dv[:nsamp / 2])
    ir = argmax(dv[nsamp / 2:])
    print il, ir

    return line[0, il], line[1, il], line[0,
                                          nsamp / 2 + ir], line[1,
                                                                nsamp / 2 + ir]
예제 #21
0
def get_maximum(data, sigmas=None, betas=None):
    if sigmas is None: sigmas = [0, 0.25, 0.5, 0.75, 1]
    if betas is None: betas = [0.95, 1]

    file_names = []
    for beta in betas:
        for sigma in sigmas:
            temp_data = data[(data['sigma'] == sigma) * (data['beta'] == beta)]
            if size(temp_data) != 0:
                max_indx = argmax(temp_data['Average_Return'])
                file_names.append(temp_data['File_Name'][max_indx])

    return file_names
예제 #22
0
def fit_modes_0(o, a, v, ixrange):
    ix = [plt.where(plt.logical_and(-1<o, o<1)[:,i]) for i in range(o.shape[1])]
    o = [o[ix[i],i][0] for i in range(o.shape[1])]
    a = [a[ix[i],i][0] for i in range(a.shape[1])]
    ax = [plt.argmax(a[i][:]) for i in range(len(a))]
    o = plt.array([o[i][ax[i]] for i in range(len(o))])
    a = plt.array([a[i][ax[i]] for i in range(len(a))])

    x, y = v[list(ixrange)], o[list(ixrange)]
    p = plt.polyfit(x, y, 1)
    z = v*p[0] + p[1]

    return x, y, z, p
예제 #23
0
def translate_back0(outputs,threshold=0.7):
    """Simple code for translating output from a classifier
    back into a list of classes. TODO/ATTENTION: this can
    probably be improved."""
    ms = amax(outputs,axis=1)
    cs = argmax(outputs,axis=1)
    cs[ms<threshold] = 0
    result = []
    for i in range(1,len(cs)):
        if cs[i]!=cs[i-1]:
            if cs[i]!=0:
                result.append(cs[i])
    return result
예제 #24
0
def translate_back0(outputs, threshold=0.7):
    """Simple code for translating output from a classifier
    back into a list of classes. TODO/ATTENTION: this can
    probably be improved."""
    ms = amax(outputs, axis=1)
    cs = argmax(outputs, axis=1)
    cs[ms < threshold] = 0
    result = []
    for i in range(1, len(cs)):
        if cs[i] != cs[i - 1]:
            if cs[i] != 0:
                result.append(cs[i])
    return result
예제 #25
0
 def measure(self, line):
     h, w = line.shape
     smoothed = filters.gaussian_filter(line,
                                        (h * 0.5, h * self.smoothness),
                                        mode='constant')
     smoothed += 0.001 * filters.uniform_filter(smoothed, (h * 0.5, w),
                                                mode='constant')
     self.shape = (h, w)
     a = argmax(smoothed, axis=0)
     a = filters.gaussian_filter(a, h * self.extra)
     self.center = array(a, 'i')
     deltas = abs(arange(h)[:, newaxis] - self.center[newaxis, :])
     self.mad = mean(deltas[line != 0])
     self.r = int(1 + self.range * self.mad)
예제 #26
0
    def get_features(self,positions):
        wMin = 5
        wMax = 18

        track_length = pl.shape(positions)[0]
        
        steps = self._get_steps(positions,track_length)
        angles = self._get_angles(steps,track_length)
        
        feats = pl.zeros([track_length,self.many_features])
        manyTimes = pl.zeros(track_length)

        msd = self._get_msd(positions,track_length)
        # following code is to _get diffusion coefficient
        xi = pl.arange(4)
        A = pl.array([xi, pl.ones(4)]).T
        diff_coeff = pl.lstsq(A,msd[:4])[0][0]

        for i in range(track_length-wMax+1):
            for j in range(wMin,wMax+1):
                feats[i:i+j,0] += self._get_straight(angles[i:i+j-2],j-1)
                feats[i:i+j,1] += self._get_bend(angles[i:i+j-2],j-1)
                feats[i:i+j,2] += self._get_eff(positions[i:i+j,:],steps[i:i+j-1,:],j-1)

                gyrationTensor = self._get_gyration_tensor(positions[i:i+j,:])
                [eig_vals, eig_vecs] = pl.eig(gyrationTensor)
                eig_vals = pl.array([eig_vals[0],eig_vals[1]])

                feats[i:i+j,3] += self._get_asymm(eig_vals[0],eig_vals[1])

                dom_index = pl.argmax(eig_vals)
                dom_vec = eig_vecs[:,dom_index]
                pos_proj = self._get_projection(positions[i:i+j,:],dom_vec,j-1)
                proj_mean = pl.mean(pos_proj)

                feats[i:i+j,4] += self._get_skew(pos_proj,proj_mean,j-1)
                feats[i:i+j,5] += self._get_kurt(pos_proj,proj_mean,j-1)
                feats[i:i+j,6] += self._get_disp(positions[i:i+j,:])
                feats[i:i+j,7] += self._get_conf(positions[i:i+j,:],j-1,diff_coeff)

                manyTimes[i:i+j] += 1

        for i in range(self.many_features):
            feats[:,i] /= manyTimes

        return feats
예제 #27
0
 def epsilon_greedy_probability(self, state, action):
     q = self.get_q(state)
     if size(unique(q)) < self.env.get_num_actions():
         max_q = max(q)
         max_observations = 0
         for value in q:
             if value == max_q: max_observations += 1
         probabilities = zeros(size(q))
         for i in range(size(q)):
             if q[i] == max_q: probabilities[i] = ((1-self.epsilon) / max_observations) + \
                                                  (self.epsilon / self.env.get_num_actions())
             else: probabilities[i] = self.epsilon / self.env.get_num_actions()
         return probabilities[action]
     else:
         if action == argmax(q):
             return self.optimal_p
         else:
             return self.epsilon / self.env.get_num_actions()
예제 #28
0
def calculate_1d_sussix_spectrum(turn, window_width, x, xp, qx):
    macroparticlenumber = len(x)
    tunes = plt.zeros(macroparticlenumber)

    # Initialise Sussix object
    SX = PySussix.Sussix()
    SX.sussix_inp(nt1=1, nt2=window_width, idam=1, ir=0, tunex=qx)

    n_particles_to_analyse_10th = int(macroparticlenumber/10)
    print 'Running SUSSIX analysis ...'
    for i in xrange(macroparticlenumber):
        if not i%n_particles_to_analyse_10th: print '  Particle', i
        SX.sussix(x[i,turn:turn+window_width], xp[i,turn:turn+window_width],
                  x[i,turn:turn+window_width], xp[i,turn:turn+window_width],
                  x[i,turn:turn+window_width], xp[i,turn:turn+window_width]) # this line is not used by sussix!
        tunes[i] = plt.absolute(SX.ox[plt.argmax(SX.ax)])

    return tunes
예제 #29
0
    def setglimits_speed(current_data):
        from pylab import xlim, ylim, title, argmax, show, array, ylabel
        gaugeno = current_data.gaugeno
        s = speed(current_data)
        t = current_data.t
        g = current_data.plotdata.getgauge(gaugeno)
        level = g.level
        maxlevel = max(level)

        #find first occurrence of the max of levels used by
        #this gauge and set the limits based on that time
        argmax_level = argmax(level)  #first occurrence of it
        xlim(time_scale * array(t[argmax_level], t[-1]))
        ylabel('meters/sec')
        min_speed = s[argmax_level:].min()
        max_speed = s[argmax_level:].max()
        ylim(min_speed - 0.5, max_speed + 0.5)
        title('Gauge %i : Speed (s)\n' % gaugeno + \
              'max(s) = %7.3f,    max(level) = %i' %(max_speed,maxlevel))
예제 #30
0
    def setglimits_eta(current_data):
        from pylab import xlim, ylim, title, argmax, show, array, ylabel
        gaugeno = current_data.gaugeno
        q = current_data.q
        eta = q[3, :]
        t = current_data.t
        g = current_data.plotdata.getgauge(gaugeno)
        level = g.level
        maxlevel = max(level)

        #find first occurrence of the max of levels used by
        #this gauge and set the limits based on that time
        argmax_level = argmax(level)  #first occurrence of it
        xlim(time_scale * array(t[argmax_level], t[-1]))
        ylabel('meters')
        min_eta = eta[argmax_level:].min()
        max_eta = eta[argmax_level:].max()
        ylim(min_eta - 0.5, max_eta + 0.5)
        title('Gauge %i : Surface Elevation (eta)\n' % gaugeno + \
              'max(eta) = %7.3f,    max(level) = %i' %(max_eta,maxlevel))
예제 #31
0
    def setglimits_depth(current_data):
        from pylab import xlim, ylim, title, argmax, show, array, ylabel
        gaugeno = current_data.gaugeno
        q = current_data.q
        depth = q[0, :]
        t = current_data.t
        g = current_data.plotdata.getgauge(gaugeno)
        level = g.level
        maxlevel = max(level)

        #find first occurrence of the max of levels used by
        #this gauge and set the limits based on that time
        argmax_level = argmax(level)
        xlim(time_scale * array(t[argmax_level], t[-1]))
        ylabel('meters')
        min_depth = depth[argmax_level:].min()
        max_depth = depth[argmax_level:].max()
        ylim(min_depth - 0.5, max_depth + 0.5)
        title('Gauge %i : Flow Depth (h)\n' % gaugeno + \
              'max(h) = %7.3f,    max(level) = %i' %(max_depth,maxlevel))
예제 #32
0
def MaybeMergeChildren(parent_node):
    children = parent_node.child_nodes()
    assert len(children) == 2
    if not AreLeaves(children):
        logging.debug('Not both children are leaves. Bailing.')
        return False
    
    # Make the new dictionaries and edge lengths
    child_pathways = [c.pathways for c in children]
    child_oxy_reqs = [c.oxygen_req for c in children]
    child_lengths = [c.edge.length for c in children]
    virtual_count = sum(c.count for c in children)
    max_length_idx = pylab.argmax(child_lengths)
    label = children[max_length_idx].taxon.label
    merged_pathways = set.union(*child_pathways)
    merged_oxygen = DictSum(child_oxy_reqs)
    
    logging.debug('Merging 2 children with edge lengths %s',
                  child_lengths)
    
    # Remove children and update the parent
    map(parent_node.remove_child, children)
    parent_node.edge.length += child_lengths[max_length_idx]
    parent_node.pathways = merged_pathways
    parent_node.count = virtual_count
    parent_node.annotate('count')
    parent_node.oxygen_req = merged_oxygen
    parent_node.annotate('oxygen_req')
    for pname in parent_node.pathways:
        setattr(parent_node, pname, True)
        parent_node.annotate(pname)
    
    # Set up a taxon for the parent according to the
    # most distinct child.
    # TODO(flamholz): indicate somehow that this was merged.
    taxon = dendropy.Taxon()
    taxon.label = label
    parent_node.taxon = taxon
    
    return True
예제 #33
0
def getMostStableTickLength(ticks):
    nticks = len(ticks)
    dticks = zeros(nticks-1)
    for i in range(nticks-1):
        dticks[i] = (ticks[i+1] - ticks[i])
    hist, distx = np.histogram(dticks, bins=50*(1+(max(dticks)-min(dticks))))
    bestPeriod = distx[argmax(hist)] # there may be more than one candidate!!
    bestBpm = 60./bestPeriod
    print 'best period', bestPeriod
    print 'best bpm:', bestBpm

    #print 'hist:', hist, distx
    maxLength = 0
    idx = 0
    for startpos in range(nticks-1):
        l = longestChain(dticks, startpos, bestPeriod, 0.1)
        if l > maxLength :
            maxLength = l;
            idx = startpos;

    print 'max stable length:', idx, maxLength
    return idx, maxLength, bestBpm
예제 #34
0
def getMostStableTickLength(ticks):
    nticks = len(ticks)
    dticks = zeros(nticks-1)
    for i in range(nticks-1):
        dticks[i] = (ticks[i+1] - ticks[i])
    hist, distx = np.histogram(dticks, bins=50*(1+(max(dticks)-min(dticks))))
    bestPeriod = distx[argmax(hist)] # there may be more than one candidate!!
    bestBpm = 60./bestPeriod
    print 'best period', bestPeriod
    print 'best bpm:', bestBpm

    #print 'hist:', hist, distx
    maxLength = 0
    idx = 0
    for startpos in range(nticks-1):
        l = longestChain(dticks, startpos, bestPeriod, 0.1)
        if l > maxLength :
            maxLength = l;
            idx = startpos;

    print 'max stable length:', idx, maxLength
    return idx, maxLength, bestBpm
예제 #35
0
def FourD():
    # collects data from file and plots
    L = 20
    mc = int(1e5)
    temps = [100, 240]
    spinconfigs = ["up", "random"]
    most_often = {}

    for spin in spinconfigs:

        pl.figure()
        for temp in temps:

            Enername = "Energyprob_L" + str(L) + "_mc" + str(mc) + "_T" + str(
                temp) + "_spin" + str(spin)
            energies, variance = pl.loadtxt('../data/4c/' + Enername + ".dat",
                                            usecols=(0, 1),
                                            unpack=True)
            pl.hist(energies,
                    normed=0,
                    bins=100,
                    histtype="step",
                    label="Temp=%s" % temp)
            hist, bins = pl.histogram(energies, bins=len(pl.unique(energies)))
            E = (bins[:-1])[pl.argmax(hist)] + 0.5 * (bins[1] - bins[0])
            most_often[spin + " " + str(temp)] = E, max(hist), variance[-1]

        pl.title("Energy occurrence histogram for spin %s" % spin)
        pl.xlabel("Occurring energies")
        pl.ylabel("Count of energy")
        pl.xlim([-820, -350])
        pl.legend(loc="best")
        pl.savefig("../figs/4d/probabilityhistogram_%s.png" % spin)
    for i, j in most_often.iteritems():
        print i, " energy:", j[0], "\n---          count:", j[1]
        print "        Prob of state: %g " % (j[1] / 87000.)
        print "             Variance: %g " % (j[2])
예제 #36
0
def alignTicks(sine, novelty, frameRate, bpm, size):
        ''' Aligns the sine function with the novelty function. Parameters:
            @sine: the sinusoid from bpmHistogram,
            @novelty: the novelty curve
            @frameRate: the frameRate
            @size: the audio size, in order to not to have more ticks than audiosize
            @bpm: the estimated bpm'''

        #pyplot.plot(novelty, 'k')
        #pyplot.plot(sine, 'r')
        #for i in range(len(novelty)-1):
        #    diff = novelty[i+1]-novelty[i]
        #    if diff > 0: novelty[i] = diff
        #    else: novelty[i] = 0
        #pyplot.plot(novelty, 'r')

        noveltySize = len(novelty)
        prodPulse = zeros(noveltySize, dtype='f4')
        i = 0
        while i < noveltySize:
            if sine[i] <= 0:
                i += 1
                continue
            window = []
            while i < noveltySize and sine[i] != 0:
              window.append(novelty[i]*sine[i])
              i+=1
            peakPos = argmax(window)
            peakPos = i - len(window) + peakPos
            prodPulse[peakPos] = novelty[peakPos]

        #pyplot.plot(prodPulse, 'g')
        #pyplot.show()
        ticks = []
        ticksAmp = []
        tatum = 60./bpm
        diffTick = 2*tatum
        prevTick = -1
        prevAmp = -1
        for i, x in enumerate(prodPulse):
            if x != 0:
               newTick = float(i)/frameRate
               if newTick < 0 or newTick >= size:
                   continue
               ticks.append(newTick)
               ticksAmp.append(x)
            #if x != 0:
            #    newTick = float(i)/frameRate
            #    if newTick < 0 or newTick >= size: continue
            #    if prevTick < 0:
            #       ticks.append(newTick)
            #       ticksAmp.append(x)
            #       prevTick = newTick
            #       prevAmp = x
            #    else:
            #        print 'ok'
            #        diff = newTick-prevTick
            #        if (diff >= 0.9*tatum) :
            #           ticks.append(newTick)
            #           ticksAmp.append(x)
            #           prevTick = newTick
            #           prevAmp = x
            #        else: #(newTick-prevTick) < 0.75*tatum:
            #            print 'newTick:', newTick, 'prevTick', prevTick, 'diff:', newTick-prevTick, 'tatum', tatum, 0.9*tatum
            #            newTick = (newTick*x+prevTick*prevAmp)/(x+prevAmp)
            #            ticks[-1] = newTick
            #            ticksAmp[-1] = (x+prevAmp)/2.
            #            prevTick = newTick
            #            prevAmp = (x+prevAmp)/2.
        return ticks, ticksAmp
예제 #37
0
def clustering2(patts=None,
                corr=None,
                setPatts=None,
                sim_coef=0.9,
                sim_func=fPearsonCorrelation,
                acc=True,
                L=150,
                miea='max',
                one=False):
    """From patts with or without correlations / sets, or from correlation w/wout sets, without patterns.
    NB: Similarity can be calculated from correlations or distances."""

    if one:
        f_combi = lambda L: list(combinations(list(range(L)), 2))[slice(L - 1)]
        acc = False
    else:
        f_combi = lambda L: list(combinations(list(range(L)), 2))

    if corr == None:
        corr = sim_func(patts.T)
        corr -= diag(diag(corr))

    if setPatts == None:
        try:
            setP = [[k] for k in range(len(patts))]
        except:
            setP = [[k] for k in range(len(corr))]
    else:
        setP = newList(setPatts)

    if len(setP) > L and acc:
        kwa = {
            'patts': patts,
            'corr': corr,
            'sim_coef': sim_coef,
            'sim_func': sim_func,
            'L': L,
            'miea': miea,
            'one': one
        }
        set1 = clustering2(setPatts=setPatts[:L], **kwa)
        set2 = clustering2(setPatts=setPatts[L:], **kwa)
        return clustering2(setPatts=set1 + set2, acc=False, **kwa)

    elif len(setP) == 1:
        return setP

    if miea == 'min': miea = type(array(int())).min
    elif miea == 'mean': miea = type(array(int())).mean
    elif miea == 'max': miea = type(array(int())).max

    sim_final = 1
    while len(setP) > 1 and sim_final > sim_coef:

        combi = f_combi(len(setP))
        sims = []
        for i, j in combi:
            sims.append(miea(corr[setP[i]][:, setP[j]]))
        sim_final = max(sims)

        if sim_final > sim_coef:
            i, j = combi[argmax(sims)]
            setP[i] += setP[j]
            del setP[j]

    return setP
예제 #38
0
파일: psp_shapes.py 프로젝트: cpehle/halbe
    def initial_fit_values(self, time, value, smoothing_samples=10,
                           integral_factor=.25, tau_fraction=2):
        """
        Estimate the initial fit values for the given sample data in
        (time, value).

        time : numpy.ndarray
            array of times at which the values are measured

        value : numpy.ndarray
            array of voltage values

        smoothing_samples : int
            width of the box filter used for the convolution

        integral_factor : float
            The time constants are estimated using the integral and
            the height of the given psp. integral_factor is the
            quotient of the maximum of a psp and the integral under
            it, which is 0.25 for tau_fraction = 2 for an ideal psp.

        tau_fraction : float
            The ratio tau_2 / tau_1, which is constant for this
            estimate.
        """
        mean_est_part = int(len(value) * .1)
        mean_estimate = p.mean(value[-mean_est_part:])
        noise_estimate = p.std(value[-mean_est_part:])

        smoothed_value = p.convolve(
            value - mean_estimate,
            p.ones(smoothing_samples) / float(smoothing_samples),
            "same") + mean_estimate

        integral = p.sum(smoothed_value - mean_estimate) * (time[1] - time[0])

        height_estimate = (max(smoothed_value) - mean_estimate)

        min_height = noise_estimate

        if height_estimate < min_height:
            height_estimate = min_height

        t1_est = integral / height_estimate * integral_factor

        # prevent t1 from being smaller than a time step
        t1_est_min = time[1] - time[0]
        if t1_est < t1_est_min:
            t1_est = t1_est_min
        t2_est = tau_fraction * t1_est

        tmax_est = time[p.argmax(smoothed_value)]
        tstart_est = tmax_est + p.log(t2_est / t1_est) \
            * (t1_est * t2_est) / (t1_est - t2_est)

        return dict(
            height=height_estimate,
            tau_1=t1_est,
            tau_2=t2_est,
            start=tstart_est,
            offset=mean_estimate)
예제 #39
0
        args = sc.objdict({'i': i, 'j': j, 'r': r, 'incub': incub})
        arglist.append(args)

tmp_results = sc.parallelize(run_sim, iterarg=arglist)
for tmp in tmp_results:
    results[tmp.i, tmp.j] = tmp.loglike

sc.toc()

#%% Plotting
pl.figure(figsize=(12, 8))
delta_r = (r_vec[1] - r_vec[0]) / 2
delta_i = (i_vec[1] - i_vec[0]) / 2
plot_r_vec = pl.hstack([r_vec - delta_r, r_vec[-1] + delta_r
                        ]) * 30 * 3  # TODO: estimate better from sim
plot_i_vec = pl.hstack([i_vec - delta_i, i_vec[-1] + delta_i])
pl.pcolormesh(plot_i_vec, plot_r_vec, results, cmap=sc.parulacolormap())
# pl.imshow(results)
pl.colorbar()
pl.title('Log-likelihood')
pl.xlabel('Days from exposure to infectiousness')
pl.ylabel('R0')

max_like_ind = pl.argmax(results)
indices = pl.unravel_index(max_like_ind, results.shape)
pl.scatter(indices[0], indices[1], marker='*', s=100, c='black', label='MLE')
pl.legend()
pl.savefig('log-likelihood-example.png')

print('Done.')
예제 #40
0
 def epsilon_greedy_action(self, state):
     p = uniform()
     if p < self.epsilon:
         return randint(0, self.env.get_num_actions())
     else:
         return argmax(self.get_q(state))
예제 #41
0
        # Huhn

        index = pl.argmin(abs(means[k]["Time"] - (level / massflows[k])))

        means_index = means[k].loc[index][1:]

        Tmin = means_index.min()
        Tmax = means_index.max()

        Tlb = Tmin + 0.1 * (Tmax - Tmin)
        Tub = Tmin + 0.9 * (Tmax - Tmin)

        pTlb = pl.argmin(means_index[means_index >= Tlb])

        pTub = pl.argmax(means_index[means_index <= Tub])

        eTlb = means_index[pTlb]
        eTub = means_index[pTub]

        gradT = (eTub - eTlb) / (float(pTub) - float(pTlb))

        pl.figure()

        pl.plot(means_index[0:].values, heights, \
            label = "Mean temperature per stratum" )

        xlim = [58.0, 86.0]

        pl.plot(xlim, [pTlb] * 2, label = "Lower bound = " + \
            pTlb + " m", linestyle = "--")
예제 #42
0
def _animation(fig):
    # Number of particles
    Num = 50
    # Step size
    stepSize = 0.02
    # Sensor Noise
    sNoi = 0.001
    # Movement Noise
    mNoi = 0.01
    # Min percentile for keeping particle
    minP = 25
    # Locations
    pts = rand(Num) + 1j * rand(Num)
    # Weights
    wgt = ones(Num)
    # Line
    abLine = asarray([.2 + 0.5j, .8 + 0.7j])
    # Actual position
    pos = 0.5 + 0.5j
    while True:
        ### "robot" simulation
        # Move "robot"
        step = (randn() + randn() * 1j) * stepSize
        pos += step
        # Get "robot" sensor measurement
        dpos = lineDist([pos], abLine[0], abLine[1])
        spos = lineSensorResponse(dpos, sNoi)
        ### Particle filter
        # Move particles
        pts += step + (randn(Num) + 1j * randn(Num)) * mNoi
        # Get particle sensor measurements
        d = lineDist(pts, abLine[0], abLine[1])
        s = lineSensorResponse(d, sNoi)
        # Adjust weights, keeping matching sensors with heigher weight
        #   We penalize sensor mismatch
        #   We penalize all high weights. Because average weight is reset to 1.0,
        #     this implies that in absence of
        #wgt = 1.0+abs(spos-s)*0.5+wgt*0.1
        wgt = wgt * 0.6 + 0.4 / (1.0 + abs(spos - s) * 0.5)
        wgt = Num * wgt / sum(wgt)
        # Find best particle
        best = argmax(wgt)
        # Replace low weight particles
        idx = find(wgt < prctile(wgt, minP))
        if any(idx):
            pts[idx] = pts[best]
            wgt[idx] = 1.0
        fig.clf()
        a = fig.add_subplot(121)
        a.set_title('weights')
        a.plot(abLine.real, abLine.imag, 'o-k', lw=3)
        a.scatter(pts.real,
                  pts.imag,
                  s=10 + wgt * wgt * 4,
                  c=linspace(0, 1, Num),
                  alpha=0.5)
        a.plot(pos.real, pos.imag, 'r+', ms=15, mew=3)
        a.plot(pts[best].real, pts[best].imag, 'bx', ms=15, mew=2)
        a.axis('equal')
        a.axis([-0.5, 1.5, -0.5, 1.5])
        a = fig.add_subplot(122)
        a.bar(arange(Num), wgt)
        yield
예제 #43
0
 def classify(self, x):
     d = self.X - tile(x.reshape(self.n, 1), self.N)
     dsq = sum(d*d, 0)
     neighbours = self.c[argpartition(dsq, self.k)[:self.k]]
     most_common = argmax(bincount(neighbours.astype(int)))
     return most_common
예제 #44
0
def computeBeats(filename, pool):
    computeNoveltyCurve(filename, pool)
    recompute = True
    novelty = pool['novelty_curve']
    count = 0
    bpmTolerance = 5
    while recompute:
        gen = VectorInput(novelty)
        bpmHist = BpmHistogram(frameRate=pool['framerate'],
                               frameSize=pool['tempo_framesize'],
                               overlap=int(pool['tempo_overlap']),
                               maxPeaks=50,
                               windowType='hann',
                               minBpm=40.0,
                               maxBpm=1000.0,
                               normalize=False,
                               constantTempo=False,
                               tempoChange=5,
                               weightByMagnitude=True)

        gen.data >> bpmHist.novelty
        bpmHist.bpm >> (pool, 'peaksBpm')
        bpmHist.bpmMagnitude >> (pool, 'peaksMagnitude')
        bpmHist.harmonicBpm >> (pool, 'harmonicBpm')
        bpmHist.harmonicBpm >> (pool, 'harmonicBpm')
        bpmHist.confidence >> (pool, 'confidence')
        bpmHist.ticks >> (pool, 'ticks')
        bpmHist.ticksMagnitude >> (pool, 'ticksMagnitude')
        bpmHist.sinusoid >> (pool, 'sinusoid')
        essentia.run(gen)

        ## get rid of beats of beats > audio.length
        #ticks = []
        #ticksAmp = []
        #for t, amp in zip(pool['ticks'], pool['ticksMagnitude']):
        #    if t < 0 or t > pool['length']: continue
        #    ticks.append(float(t))
        #    ticksAmp.append(float(amp))

        #step = pool['step']
        #ticks = essentia.postProcessTicks(ticks, ticksAmp, 60./pool['harmonicBpm'][0]);
        sine = pool['sinusoid']
        #pyplot.plot(novelty, 'k')
        #pyplot.plot(sine, 'r')
        #for i in range(len(novelty)-1):
        #    diff = novelty[i+1]-novelty[i]
        #    if diff > 0: novelty[i] = diff
        #    else: novelty[i] = 0
        #pyplot.plot(novelty, 'r')

        prodPulse = zeros(len(novelty))
        i = 0
        while i < len(novelty):
            if sine[i] <= 0.1:
                i += 1
                continue
            window = []
            while sine[i] != 0 and i < len(novelty):
                window.append(novelty[i] * sine[i])
                i += 1
            peakPos = argmax(window)
            peakPos = i - len(window) + peakPos
            prodPulse[peakPos] = novelty[peakPos]

        #pyplot.plot(prodPulse, 'g')
        #pyplot.show()
        ticks = []
        ticksAmp = []
        frameRate = pool['framerate']
        bpms = pool['harmonicBpm']
        print 'estimated bpm:', bpms
        tatum = 60. / bpms[0]
        diffTick = 2 * tatum
        prevTick = -1
        prevAmp = -1
        for i, x in enumerate(prodPulse):
            if x != 0:
                newTick = float(i) / frameRate
                if newTick < 0 or newTick > pool['length']: continue
                ticks.append(newTick)
                ticksAmp.append(x)
        #    if x != 0:
        #        newTick = float(i)/frameRate
        #        if prevTick < 0:
        #           ticks.append(newTick)
        #           ticksAmp.append(x)
        #           prevTick = newTick
        #           prevAmp = x
        #        else:
        #            diff = newTick-prevTick
        #            ratio = max( round(tatum/diff), round(diff/tatum))
        #            if (diff >= 0.9*tatum*ratio) and (diff <= 1.1*tatum*ratio):
        #               ticks.append(newTick)
        #               ticksAmp.append(x)
        #               prevTick = newTick
        #               prevAmp = x
        #            else: #(newTick-prevTick) < 0.75*tatum:
        #                newTick = (newTick*x+prevTick*prevAmp)/(x+prevAmp)
        #                ticks[-1] = newTick
        #                ticksAmp[-1] = (x+prevAmp)/2.
        #                prevTick = newTick
        #                prevAmp = (x+prevAmp)/2.
        _, _, bestBpm = getMostStableTickLength(ticks)
        #pool.set('bestTicksStart', bestTicks[0])
        #pool.set('bestTicksEnd', bestTicks[0] + bestTicks[1])
        #ticks = essentia.postProcessTicks(ticks, ticksAmp, 60./pool['harmonicBpm'][0]);
        #ticks = essentia.postProcessTicks(ticks)
        if fabs(bestBpm - bpms[0]) < bpmTolerance: recompute = False
        else:
            count += 1
            if count >= 5:
                bpmTolerance += 1
                count = 0
            print "recomputing!!!!"
            novelty = copy.deepcopy(pool['sinusoid'])
            pool.remove('sinusoid')
            pool.remove('novelty_curve')
            pool.remove('peaksBpm')
            pool.remove('peaksMagnitude')
            pool.remove('harmonicBpm')
            pool.remove('harmonicBpm')
            pool.remove('confidence')
            pool.remove('ticks')
            pool.remove('ticksMagnitude')
    #pyplot.plot(prodPulse, 'g')
    #pyplot.show()

    print 'estimated bpm:', bpms
    print 'bpms:', pool['peaksBpm']
    #ticks = postProcessTicks(filename, pool)
    #print 'bpm mags:', pool['peaksMagnitude']
    bpmRatios = []
    #for i, bpm1 in enumerate(bpms):
    #    bpmRatios.append([float(bpm1)/float(bpm2) for bpm2 in bpms[i:]])
    #print 'bpmRatios:', bpmRatios
    #print 'original nticks:', len(ticks)
    #print 'step:', step
    if step > 1:
        ticks = essentia.array(
            map(lambda i: ticks[i],
                filter(lambda i: i % step == 0, range(len(ticks)))))

    #print 'nticks:', len(ticks)
    pool.remove('ticks')
    pool.set('ticks', ticks)
예제 #45
0
def alignTicks(sine, novelty, frameRate, bpm, size):
    ''' Aligns the sine function with the novelty function. Parameters:
            @sine: the sinusoid from bpmHistogram,
            @novelty: the novelty curve
            @frameRate: the frameRate
            @size: the audio size, in order to not to have more ticks than audiosize
            @bpm: the estimated bpm'''

    #pyplot.plot(novelty, 'k')
    #pyplot.plot(sine, 'r')
    #for i in range(len(novelty)-1):
    #    diff = novelty[i+1]-novelty[i]
    #    if diff > 0: novelty[i] = diff
    #    else: novelty[i] = 0
    #pyplot.plot(novelty, 'r')

    noveltySize = len(novelty)
    prodPulse = zeros(noveltySize, dtype='f4')
    i = 0
    while i < noveltySize:
        if sine[i] <= 0:
            i += 1
            continue
        window = []
        while i < noveltySize and sine[i] != 0:
            window.append(novelty[i] * sine[i])
            i += 1
        peakPos = argmax(window)
        peakPos = i - len(window) + peakPos
        prodPulse[peakPos] = novelty[peakPos]

    #pyplot.plot(prodPulse, 'g')
    #pyplot.show()
    ticks = []
    ticksAmp = []
    tatum = 60. / bpm
    diffTick = 2 * tatum
    prevTick = -1
    prevAmp = -1
    for i, x in enumerate(prodPulse):
        if x != 0:
            newTick = float(i) / frameRate
            if newTick < 0 or newTick >= size:
                continue
            ticks.append(newTick)
            ticksAmp.append(x)
        #if x != 0:
        #    newTick = float(i)/frameRate
        #    if newTick < 0 or newTick >= size: continue
        #    if prevTick < 0:
        #       ticks.append(newTick)
        #       ticksAmp.append(x)
        #       prevTick = newTick
        #       prevAmp = x
        #    else:
        #        print 'ok'
        #        diff = newTick-prevTick
        #        if (diff >= 0.9*tatum) :
        #           ticks.append(newTick)
        #           ticksAmp.append(x)
        #           prevTick = newTick
        #           prevAmp = x
        #        else: #(newTick-prevTick) < 0.75*tatum:
        #            print 'newTick:', newTick, 'prevTick', prevTick, 'diff:', newTick-prevTick, 'tatum', tatum, 0.9*tatum
        #            newTick = (newTick*x+prevTick*prevAmp)/(x+prevAmp)
        #            ticks[-1] = newTick
        #            ticksAmp[-1] = (x+prevAmp)/2.
        #            prevTick = newTick
        #            prevAmp = (x+prevAmp)/2.
    return ticks, ticksAmp
예제 #46
0
def computeBeats(filename, pool):
    computeNoveltyCurve(filename, pool)
    recompute = True
    novelty = pool['novelty_curve']
    count = 0
    bpmTolerance = 5
    while recompute:
        gen     = VectorInput(novelty)
        bpmHist = BpmHistogram(frameRate=pool['framerate'],
                               frameSize=pool['tempo_framesize'],
                               overlap=int(pool['tempo_overlap']),
                               maxPeaks=50,
                               windowType='hann',
                               minBpm=40.0,
                               maxBpm=1000.0,
                               normalize=False,
                               constantTempo=False,
                               tempoChange=5,
                               weightByMagnitude=True)

        gen.data >> bpmHist.novelty
        bpmHist.bpm            >> (pool, 'peaksBpm')
        bpmHist.bpmMagnitude   >> (pool, 'peaksMagnitude')
        bpmHist.harmonicBpm    >> (pool, 'harmonicBpm')
        bpmHist.harmonicBpm    >> (pool, 'harmonicBpm')
        bpmHist.confidence     >> (pool, 'confidence')
        bpmHist.ticks          >> (pool, 'ticks')
        bpmHist.ticksMagnitude >> (pool, 'ticksMagnitude')
        bpmHist.sinusoid       >> (pool, 'sinusoid')
        essentia.run(gen)

        ## get rid of beats of beats > audio.length
        #ticks = []
        #ticksAmp = []
        #for t, amp in zip(pool['ticks'], pool['ticksMagnitude']):
        #    if t < 0 or t > pool['length']: continue
        #    ticks.append(float(t))
        #    ticksAmp.append(float(amp))

        #step = pool['step']
        #ticks = essentia.postProcessTicks(ticks, ticksAmp, 60./pool['harmonicBpm'][0]);
        sine = pool['sinusoid']
        #pyplot.plot(novelty, 'k')
        #pyplot.plot(sine, 'r')
        #for i in range(len(novelty)-1):
        #    diff = novelty[i+1]-novelty[i]
        #    if diff > 0: novelty[i] = diff
        #    else: novelty[i] = 0
        #pyplot.plot(novelty, 'r')

        prodPulse = zeros(len(novelty))
        i = 0
        while i < len(novelty):
            if sine[i] <= 0.1:
                i += 1
                continue
            window = []
            while sine[i] != 0 and i < len(novelty):
              window.append(novelty[i]*sine[i])
              i+=1
            peakPos = argmax(window)
            peakPos = i - len(window) + peakPos
            prodPulse[peakPos] = novelty[peakPos]

        #pyplot.plot(prodPulse, 'g')
        #pyplot.show()
        ticks = []
        ticksAmp = []
        frameRate = pool['framerate']
        bpms = pool['harmonicBpm']
        print 'estimated bpm:', bpms
        tatum = 60./bpms[0]
        diffTick = 2*tatum
        prevTick = -1
        prevAmp = -1
        for i, x in enumerate(prodPulse):
            if x != 0:
               newTick = float(i)/frameRate
               if newTick < 0 or newTick > pool['length']: continue
               ticks.append(newTick)
               ticksAmp.append(x)
        #    if x != 0:
        #        newTick = float(i)/frameRate
        #        if prevTick < 0:
        #           ticks.append(newTick)
        #           ticksAmp.append(x)
        #           prevTick = newTick
        #           prevAmp = x
        #        else:
        #            diff = newTick-prevTick
        #            ratio = max( round(tatum/diff), round(diff/tatum))
        #            if (diff >= 0.9*tatum*ratio) and (diff <= 1.1*tatum*ratio):
        #               ticks.append(newTick)
        #               ticksAmp.append(x)
        #               prevTick = newTick
        #               prevAmp = x
        #            else: #(newTick-prevTick) < 0.75*tatum:
        #                newTick = (newTick*x+prevTick*prevAmp)/(x+prevAmp)
        #                ticks[-1] = newTick
        #                ticksAmp[-1] = (x+prevAmp)/2.
        #                prevTick = newTick
        #                prevAmp = (x+prevAmp)/2.
        _, _, bestBpm= getMostStableTickLength(ticks)
        #pool.set('bestTicksStart', bestTicks[0])
        #pool.set('bestTicksEnd', bestTicks[0] + bestTicks[1])
        #ticks = essentia.postProcessTicks(ticks, ticksAmp, 60./pool['harmonicBpm'][0]);
        #ticks = essentia.postProcessTicks(ticks)
        if fabs(bestBpm - bpms[0]) < bpmTolerance: recompute = False
        else:
            count+=1
            if count >= 5:
                bpmTolerance += 1
                count = 0
            print "recomputing!!!!"
            novelty = copy.deepcopy(pool['sinusoid'])
            pool.remove('sinusoid')
            pool.remove('novelty_curve')
            pool.remove('peaksBpm')
            pool.remove('peaksMagnitude')
            pool.remove('harmonicBpm')
            pool.remove('harmonicBpm')
            pool.remove('confidence')
            pool.remove('ticks')
            pool.remove('ticksMagnitude')
    #pyplot.plot(prodPulse, 'g')
    #pyplot.show()

    print 'estimated bpm:', bpms
    print 'bpms:', pool['peaksBpm']
    #ticks = postProcessTicks(filename, pool)
    #print 'bpm mags:', pool['peaksMagnitude']
    bpmRatios = []
    #for i, bpm1 in enumerate(bpms):
    #    bpmRatios.append([float(bpm1)/float(bpm2) for bpm2 in bpms[i:]])
    #print 'bpmRatios:', bpmRatios
    #print 'original nticks:', len(ticks)
    #print 'step:', step
    if step>1:
        ticks = essentia.array(map(lambda i: ticks[i],
                               filter(lambda i: i%step == 0,range(len(ticks)))))

    #print 'nticks:', len(ticks)
    pool.remove('ticks')
    pool.set('ticks', ticks)
예제 #47
0
def plot(pool, title, outputfile='out.svg', subplot=111):
    ''' plots bars for each beat'''

    #computeSpectrum(pool['loudness'])

    ticks = pool['ticks']
    #barSize = min([ticks[i+1] - ticks[i] for i in range(len(ticks[:-1]))])/2.
    barSize = 0.8
    offset = barSize / 2.

    loudness = pool['loudness']
    loudnessBand = pool['loudnessBandRatio']  # ticks x bands

    medianRatiosPerTick = []
    meanRatiosPerTick = []
    for tick, energy in enumerate(loudnessBand):
        medianRatiosPerTick.append(median(energy))
        meanRatiosPerTick.append(mean(energy))

    loudnessBand = copy.deepcopy(loudnessBand.transpose())  # bands x ticks

    #xcorr = std.CrossCorrelation(minLag=0, maxLag=16)
    #acorr = std.AutoCorrelation()
    #bandCorr = []
    #for iBand, band in enumerate(loudnessBand):
    #    bandCorr.append(acorr(essentia.array(band)))

    nBands = len(loudnessBand)
    nticks = len(loudness)
    maxRatiosPerBand = []
    medianRatiosPerBand = []
    meanRatiosPerBand = []
    for idxBand, band in enumerate(loudnessBand):
        maxRatiosPerBand.append([0] * nticks)
        medianRatiosPerBand.append([0] * nticks)
        meanRatiosPerBand.append([0] * nticks)
        for idxTick in range(nticks):
            start = idxTick
            end = start + BEATWINDOW
            if (end > nticks):
                howmuch = end - nticks
                end = nticks - 1
                start = end - howmuch
                if start < 0: start = 0
            medianRatiosPerBand[idxBand][idxTick] = median(band[start:end])
            maxRatiosPerBand[idxBand][idxTick] = max(band[start:end])
            meanRatiosPerBand[idxBand][idxTick] = mean(band[start:end])

    for iBand, band in enumerate(loudnessBand):
        for tick, ratio in enumerate(band):
            #if ratio < medianRatiosPerBand[iBand][tick] and\
            #   ratio <= medianRatiosPerTick[tick]: loudnessBand[iBand][tick]=0
            bandThreshold = max(medianRatiosPerBand[iBand][tick],
                                meanRatiosPerBand[iBand][tick])
            tickThreshold = max(medianRatiosPerTick[tick],
                                meanRatiosPerTick[tick])
            if ratio < bandThreshold and ratio <= tickThreshold:
                loudnessBand[iBand][tick] = 0
            else:
                loudnessBand[iBand][tick] *= loudness[tick]
                #if loudnessBand[iBand][tick] > 1 : loudnessBand[iBand][tick] = 1

    acorr = std.AutoCorrelation()
    bandCorr = []
    maxCorr = []
    for iBand, band in enumerate(loudnessBand):
        bandCorr.append(acorr(essentia.array(band)))
        maxCorr.append(argmax(bandCorr[-1][2:]) + 2)

    # use as much window space as possible:
    pyplot.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95)

    pyplot.subplot(511)
    pyplot.imshow(bandCorr,
                  cmap=pyplot.cm.hot,
                  aspect='auto',
                  origin='lower',
                  interpolation='nearest')
    print 'max correlation', maxCorr

    sumCorr = []
    for tick in range(nticks):
        total = 0
        for band in bandCorr:
            total += band[tick]
        sumCorr.append(total)

    sumCorr[0] = 0
    sumCorr[1] = 0
    pyplot.subplot(512)
    maxAlpha = max(sumCorr)
    for i, val in enumerate(sumCorr):
        alpha = max(0, min(val / maxAlpha, 1))
        pyplot.bar(i,
                   1,
                   barSize,
                   align='edge',
                   bottom=0,
                   alpha=alpha,
                   color='r',
                   edgecolor='w',
                   linewidth=.3)

    print 'max sum correlation', argmax(sumCorr[2:]) + 2

    hist = getHarmonics(sumCorr)
    maxHist = argmax(hist)
    print 'max histogram', maxHist
    #for idx,val in enumerate(hist):
    #    if val < maxHist: hist[idx] = 0

    pyplot.subplot(513)
    for i, val in enumerate(hist):
        pyplot.bar(i,
                   val,
                   barSize,
                   align='edge',
                   bottom=0,
                   color='r',
                   edgecolor='w',
                   linewidth=.3)

    peakDetect = std.PeakDetection(maxPeaks=5,
                                   orderBy='amplitude',
                                   minPosition=0,
                                   maxPosition=len(sumCorr) - 1,
                                   range=len(sumCorr) - 1)
    peaks = peakDetect(sumCorr)[0]
    peaks = [round(x + 1e-15) for x in peaks]
    print 'Peaks:', peaks

    pyplot.subplot(514)
    maxAlpha = max(sumCorr)
    for i, val in enumerate(sumCorr):
        alpha = max(0, min(val / maxAlpha, 1))
        pyplot.bar(i,
                   val,
                   barSize,
                   align='edge',
                   bottom=0,
                   alpha=alpha,
                   color='r',
                   edgecolor='w',
                   linewidth=.3)

    # multiply both histogram and sum corr to have a weighted histogram:
    wHist = essentia.array(hist) * sumCorr * acorr(loudness)
    maxHist = argmax(wHist)
    print 'max weighted histogram', maxHist
    pyplot.subplot(515)

    maxAlpha = max(wHist)
    for i, val in enumerate(wHist):
        alpha = max(0, min(val / maxAlpha, 1))
        pyplot.bar(i,
                   val,
                   barSize,
                   align='edge',
                   bottom=0,
                   alpha=alpha,
                   color='r',
                   edgecolor='w',
                   linewidth=.3)

    pyplot.savefig(outputfile, dpi=300)
    #pyplot.show()
    return
from CNRecognizer import CNRecognizer

# Parse command line arguments.
parser = argparse.ArgumentParser(description='Example of how to run trained CNRecognizer.')
parser.add_argument('datadir', metavar='datadir', type=str,
                    help='dataset directory')
args = parser.parse_args()

# Read images. NOTE WELL: image channels *MUST* be between 0 and
# 255. For some goddamn reason pylab scales PNGs to [0,1], but not
# JPEGs. Thanks. Anyway, be careful.
image = pl.imread('rubiks_asus_test_image.png') * 255.0
mask = pl.imread('rubiks_asus_test_mask.png')

# Instantiate (and deserialize, if already trained) recognizer.
clf = CNRecognizer(args.datadir)
if not clf._trained:
    print 'CNRecognizer in {} not trained! See train_recognizer.py script.'
    sys.exit(1)

# And classify.
with Timer('time to extract and classify'):
    pred = clf.predict(image, mask)

# Use the '_dataset' attribute of the recognizer to access classes.
print 'Class probabilities:'
for (label, p) in enumerate(pred):
    print '  {0:.2f}: {1:s}'.format(p, clf._dataset.label2class(label))
print '\nPrediction: {}'.format(clf._dataset.label2class(pl.argmax(pred)))

예제 #49
0
def plot(pool, title, outputfile='out.svg', subplot=111):
    ''' plots bars for each beat'''

    #computeSpectrum(pool['loudness'])

    ticks = pool['ticks']
    #barSize = min([ticks[i+1] - ticks[i] for i in range(len(ticks[:-1]))])/2.
    barSize = 0.8
    offset = barSize/2.

    loudness = pool['loudness']
    loudnessBand = pool['loudnessBandRatio'] # ticks x bands

    medianRatiosPerTick = []
    meanRatiosPerTick = []
    for tick, energy in enumerate(loudnessBand):
            medianRatiosPerTick.append(median(energy))
            meanRatiosPerTick.append(mean(energy))


    loudnessBand = copy.deepcopy(loudnessBand.transpose()) # bands x ticks

    #xcorr = std.CrossCorrelation(minLag=0, maxLag=16)
    #acorr = std.AutoCorrelation()
    #bandCorr = []
    #for iBand, band in enumerate(loudnessBand):
    #    bandCorr.append(acorr(essentia.array(band)))

    nBands = len(loudnessBand)
    nticks = len(loudness)
    maxRatiosPerBand = []
    medianRatiosPerBand = []
    meanRatiosPerBand = []
    for idxBand, band in enumerate(loudnessBand):
        maxRatiosPerBand.append([0]*nticks)
        medianRatiosPerBand.append([0]*nticks)
        meanRatiosPerBand.append([0]*nticks)
        for idxTick in range(nticks):
            start = idxTick
            end = start+BEATWINDOW
            if (end>nticks):
                howmuch = end-nticks
                end = nticks-1
                start = end-howmuch
                if start < 0: start = 0
            medianRatiosPerBand[idxBand][idxTick] = median(band[start:end])
            maxRatiosPerBand[idxBand][idxTick] = max(band[start:end])
            meanRatiosPerBand[idxBand][idxTick] = mean(band[start:end])


    for iBand, band in enumerate(loudnessBand):
        for tick, ratio in enumerate(band):
            #if ratio < medianRatiosPerBand[iBand][tick] and\
            #   ratio <= medianRatiosPerTick[tick]: loudnessBand[iBand][tick]=0
            bandThreshold = max(medianRatiosPerBand[iBand][tick],
                                meanRatiosPerBand[iBand][tick])
            tickThreshold = max(medianRatiosPerTick[tick],
                                meanRatiosPerTick[tick])
            if ratio < bandThreshold and ratio <= tickThreshold:
                loudnessBand[iBand][tick]=0
            else:
                loudnessBand[iBand][tick] *= loudness[tick]
                #if loudnessBand[iBand][tick] > 1 : loudnessBand[iBand][tick] = 1

    acorr = std.AutoCorrelation()
    bandCorr = []
    maxCorr = []
    for iBand, band in enumerate(loudnessBand):
        bandCorr.append(acorr(essentia.array(band)))
        maxCorr.append(argmax(bandCorr[-1][2:])+2)

    # use as much window space as possible:
    pyplot.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95)

    pyplot.subplot(511)
    pyplot.imshow(bandCorr, cmap=pyplot.cm.hot, aspect='auto', origin='lower', interpolation='nearest')
    print 'max correlation', maxCorr

    sumCorr = []
    for tick in range(nticks):
        total = 0
        for band in bandCorr:
            total += band[tick]
        sumCorr.append(total)

    sumCorr[0] = 0
    sumCorr[1] = 0
    pyplot.subplot(512)
    maxAlpha = max(sumCorr)
    for i,val in enumerate(sumCorr):
        alpha = max(0,min(val/maxAlpha, 1))
        pyplot.bar(i, 1 , barSize, align='edge',
                   bottom=0,alpha=alpha,
                   color='r', edgecolor='w', linewidth=.3)

    print 'max sum correlation', argmax(sumCorr[2:])+2

    hist = getHarmonics(sumCorr)
    maxHist = argmax(hist)
    print 'max histogram', maxHist
    #for idx,val in enumerate(hist):
    #    if val < maxHist: hist[idx] = 0

    pyplot.subplot(513)
    for i,val in enumerate(hist):
        pyplot.bar(i, val , barSize, align='edge',
                   bottom=0, color='r', edgecolor='w', linewidth=.3)


    peakDetect = std.PeakDetection(maxPeaks=5,
                                   orderBy='amplitude',
                                   minPosition=0,
                                   maxPosition=len(sumCorr)-1,
                                   range=len(sumCorr)-1)
    peaks = peakDetect(sumCorr)[0]
    peaks = [round(x+1e-15) for x in peaks]
    print 'Peaks:',peaks

    pyplot.subplot(514)
    maxAlpha = max(sumCorr)
    for i,val in enumerate(sumCorr):
        alpha = max(0,min(val/maxAlpha, 1))
        pyplot.bar(i, val, barSize, align='edge',
                   bottom=0,alpha=alpha,
                   color='r', edgecolor='w', linewidth=.3)

    # multiply both histogram and sum corr to have a weighted histogram:
    wHist = essentia.array(hist)*sumCorr*acorr(loudness)
    maxHist = argmax(wHist)
    print 'max weighted histogram', maxHist
    pyplot.subplot(515)

    maxAlpha = max(wHist)
    for i,val in enumerate(wHist):
        alpha = max(0,min(val/maxAlpha, 1))
        pyplot.bar(i, val, barSize, align='edge',
                   bottom=0,alpha=alpha,
                   color='r', edgecolor='w', linewidth=.3)

    pyplot.savefig(outputfile, dpi=300)
    #pyplot.show()
    return
예제 #50
0
파일: colplot.py 프로젝트: villflakken/p5
def plotVirial(posdat, endat, bodycount, dt, eps, tot_time):
    """
    makes the virial comparison plot and everything else
    """

    bodycount, pos_length, postype = posdat.shape
    pos_timelen = pl.linspace(0, tot_time, pos_length)

    # component wise energies
    kin_en_comp = posdat[:, :,
                         4]  # (100 bodies, kin energies at timestep).shape
    pot_en_comp = posdat[:, :, 5]  # pot energies
    kin_en_ejec_comp = pl.zeros((bodycount, pos_length))
    pot_en_ejec_comp = pl.zeros((bodycount, pos_length))
    masses = posdat[:, 0, 3]

    # now to exclude ejected bodies
    ejecta_body_array = pl.zeros(
        bodycount)  # identifies which bodies become ejected at which time
    ejecta_time_array = pl.zeros(
        pos_length)  # measures no. of ejecta at time incr.

    # body summed energies, for use in en. consv. and virial test
    kin_en_sum = pl.zeros(pos_length)
    pot_en_sum = pl.zeros(pos_length)
    kin_en_ejec_sum = pl.zeros(pos_length)
    pot_en_ejec_sum = pl.zeros(pos_length)

    # task f) relevant
    eq_time = pl.array([4.52])
    eq_pos = []
    eq_arg = int(pl.where(pos_timelen >= eq_time[0])[0][0])

    # running through the lists
    for step in pl.arange(0, pos_length):
        for body in pl.arange(0, bodycount):
            if kin_en_comp[body, step] + pot_en_comp[body, step] > 0:
                # move to ejected lists
                kin_en_ejec_comp[body, step] = kin_en_comp[body, step]
                pot_en_ejec_comp[body, step] = pot_en_comp[body, step]

                ejecta_body_array[body] = 1  # identification
                ejecta_time_array[step] = sum(ejecta_body_array)
                # stores no. of ejecta at time incr.
                kin_en_comp[body, step] = 0.  # necessary for elimination
                pot_en_comp[body, step] = 0.

        kin_en_sum[step] = sum(kin_en_comp[:, step])
        pot_en_sum[step] = sum(pot_en_comp[:, step]) / 2.
        # factor of 1/2 because system

        kin_en_ejec_sum[step] = sum(kin_en_ejec_comp[:, step])
        pot_en_ejec_sum[step] = sum(pot_en_ejec_comp[:, step]) / .2
        """print type(pos_timelen[step]), pos_timelen[step]
        print type(eq_time[0]), eq_time[0]
        print """
        if step == eq_arg:
            for i in range(len(ejecta_body_array)):
                if int(ejecta_body_array[i]) == 0:
                    eq_pos.append(posdat[i, eq_arg, :3])
                    # equilibrium positions, (bodies, positions).shape
    eq_pos = pl.array(eq_pos)
    eq_radia = pl.zeros(len(eq_pos))
    for i in range(len(eq_pos)):
        eq_radia[i] = (eq_pos[i, 0]**2 + eq_pos[i, 1]**2 + eq_pos[i, 2]**2)**.5

    consv_bound_ejec_sum = kin_en_sum + pot_en_sum \
                            + kin_en_ejec_sum + pot_en_ejec_sum

    # --- tasks b) through e) --- #

    pl.figure()
    pl.subplot(2, 1, 1)
    pl.plot(pos_timelen, kin_en_sum, label="Kinetic")
    pl.legend(loc='best')
    pl.ylabel("Kinetic energy")
    pl.xlim([0.0, tot_time])
    pl.title(r"Bound energy over time, %dbody %gdt %g$\varepsilon$" %
             (bodycount, dt, eps))
    pl.grid("on")

    pl.subplot(2, 1, 2)
    pl.plot(pos_timelen, pot_en_sum, label="Potential")
    pl.legend(loc='best')
    pl.xlabel(r"Time $\tau_c$")
    pl.ylabel("Potential energy")
    pl.xlim([0.0, tot_time])
    pl.grid("on")
    pl.savefig("../figs/ClusterEnergiesComp_" + str(bodycount) + "body_dt" +
               str(int(dt * 1000)) + "_eps" + str(int(eps * 100)) + "_dur" +
               str(int(tot_time)) + ".png")

    ### --- ###

    pl.figure()
    pl.subplot(2, 1, 1)
    pl.title(r"No. of ejecta, %dbody %gdt %g$\varepsilon$" %
             (bodycount, dt, eps))
    pl.ylabel(r"Ejection fraction")
    pl.xlim([0.0, tot_time])
    pl.plot(pos_timelen, ejecta_time_array / bodycount, label="Ejecta/Tot")
    pl.legend(loc='best')
    pl.grid("on")

    pl.subplot(2, 1, 2)

    pl.plot(pos_timelen,
            kin_en_ejec_sum - pot_en_ejec_sum,
            label=r"$K_e - V_e$")
    pl.legend(loc='best')
    pl.xlabel(r"Time $\tau_c$")
    pl.ylabel("Energy")
    pl.xlim([0., tot_time])
    pl.title(r"Ejected bodies' energy, %dbody %gdt %g$\varepsilon$" %
             (bodycount, dt, eps))
    pl.grid("on")
    pl.savefig("../figs/ClusterEnergiesEjecEn_" + str(bodycount) + "body_dt" +
               str(int(dt * 1000)) + "_eps" + str(int(eps * 100)) + "_dur" +
               str(int(tot_time)) + ".png")

    ### --- ###

    pl.figure()
    pl.subplot(2, 1, 1)
    pl.plot(pos_timelen,
            consv_bound_ejec_sum,
            label=r"$K_b + V_b + K_e + V_e$")
    pl.plot(pos_timelen,
            pl.ones(pos_length) * pot_en_sum[0],
            linestyle="dashed",
            color="black",
            label="Conserved ideal")
    pl.legend(loc='best')
    pl.ylabel("Energy sum")
    pl.xlim([0., tot_time])
    # pl.ylim([pot_en_sum[0] - 0.1*max(consv_bound_ejec_sum), max(consv_bound_ejec_sum) + 0.1*max(consv_bound_ejec_sum)])
    pl.title(r"Energy conservation test, %dbody %gdt %g$\varepsilon$" %
             (bodycount, dt, eps))
    pl.grid("on")

    pl.subplot(2, 1, 2)
    pl.plot(pos_timelen,
            2 * kin_en_sum / (bodycount - ejecta_time_array) + pot_en_sum /
            (bodycount - ejecta_time_array),
            label=r"$2K_b + V_b$")
    pl.plot(pos_timelen,
            pl.zeros(pos_length),
            linestyle="dashed",
            color="black",
            label="Virial ideal")
    pl.legend(loc='best')
    pl.xlabel(r"Time $\tau_c$")
    pl.ylabel("Virial energy comparison")
    pl.xlim([0.0, tot_time])
    pl.title(r"Virial comparison fit, %dbody %gdt %g$\varepsilon$" %
             (bodycount, dt, eps))
    pl.grid("on")
    pl.savefig("../figs/ClusterEnConsvVirial_" + str(bodycount) + "body_dt" +
               str(int(dt * 1000)) + "_eps" + str(int(eps * 100)) + "_dur" +
               str(int(tot_time)) + ".png")

    ################################
    # --- beginning of task f) --- #
    ################################

    #

    colorlist = []
    for i in range(bodycount):
        colorlist.append(random.rand(3, 1))

    fig3D = pl.figure()
    ax3D = fig3D.add_subplot(111, projection='3d')

    for body in range(len(eq_pos)):
        ax3D.plot([eq_pos[body, 0]], [eq_pos[body, 1]], [eq_pos[body, 2]],
                  marker="o",
                  color=colorlist[body])

    ax3D.set_title(
        r"Star cluster 3D %dbody %gdt %g$\varepsilon$, t=%g$\tau_c$" %
        (bodycount, dt, eps, eq_time))
    ax3D.set_xlabel("X-axis [ly]")
    ax3D.set_ylabel("Y-axis [ly]")
    ax3D.set_zlabel("Z-axis [ly]")
    ax3D.set_xlim([-25, 25])
    ax3D.set_ylim([-25, 25])
    ax3D.set_zlim([-25, 25])
    fig3D.savefig("../moviefigs/eps" + str(int(eps * 100)) + "/ClusterPos_" +
                  str(bodycount) + "body_dt" + str(int(dt * 1000)) + "_eps" +
                  str(int(eps * 100)) + "_dur" + str(int(tot_time)) + ".png")

    print "mean eq. radius:", pl.mean(eq_radia)
    print "std dev. radius:", pl.std(eq_radia)

    bincount = 60
    weights, edges = pl.histogram(eq_radia, bins=bincount, normed=False)
    radia = edges + 0.5 * (edges[1] - edges[0])

    # lsm finds correct r0
    lengthnumber = 1000
    alphalower = 0.01
    alphaupper = 2.
    alpha = pl.linspace(alphalower, alphaupper, lengthnumber)
    r0lower = 0.0001
    r0upper = 10.
    r0 = pl.linspace(r0lower, r0upper, lengthnumber)

    n0 = max(weights)
    n0arg = pl.argmax(weights)
    r0final = bodycount**(
        1. / 3)  # assuming it depends somehow on total body number in volume
    nsums = pl.zeros(lengthnumber)

    for alphacount in range(lengthnumber):
        nset = n(edges[:-1] - edges[n0arg], alpha[alphacount] * r0final, n0)
        nsums[alphacount] = sum((nset - weights)**2)

    minarg = pl.argmin(nsums)
    r0final *= alpha[minarg]
    print "n0", n0
    print "r0", r0final
    """
    pl.figure()
    pl.subplot(2,1,1)
    pl.hist(eq_radia, label="Histogram of bodies", bins=bincount)
    pl.legend(loc='best')
    pl.ylabel(r"Bodies in the data")
    pl.title(r"Radial density of bound bodies, %dbody %gdt %g$\varepsilon$" % (bodycount, dt, eps) )
    pl.grid('on')
    
    pl.subplot(2,1,2)
    pl.plot(edges + edges[pl.argmax(weights)], n(edges, r0final, n0), label=r"$n(r)$", color='blue', linewidth=2)
    pl.xlabel(r"Radius $R_0$")
    pl.ylabel(r"Radial distribution model")
    pl.legend(loc='best')
    pl.grid('on')
    pl.savefig("../figs/ClusterRadDens_"+str(bodycount)+"body_dt"+str(int(dt*1000))+"_eps"+str(int(eps*100))+"_dur"+str(int(tot_time))+".png")
    """

    pl.figure()
    pl.hist(eq_radia, label="Histogram of bodies", color='cyan', bins=bincount)
    pl.title(r"Radial density of bound bodies, %dbody %gdt %g$\varepsilon$" %
             (bodycount, dt, eps))

    pl.plot(edges + edges[pl.argmax(weights)],
            n(edges, r0final, n0),
            label=r"$n(r)$",
            color='magenta',
            linewidth=2)
    pl.xlabel(r"Radius $R_0$")
    pl.ylabel(r"Radial distribution")
    pl.legend(loc='best')
    pl.grid('on')
    pl.savefig("../figs/ClusterRadDens_" + str(bodycount) + "body_dt" +
               str(int(dt * 1000)) + "_eps" + str(int(eps * 100)) + "_dur" +
               str(int(tot_time)) + ".png")
예제 #51
0
import pylab as p

p.argmax()
예제 #52
0
 def getPeakPosition(self):
     #gives the time, at which the signal is maximal
     return self.tdData[py.argmax(self.getEX()),0]
예제 #53
0
 def getmaxfreq(self):
     #take care of constant offset what is the best lower range?
     cutted=self.getcroppedData(self.fdData,150e9,FdData.FMAX)
     fmax=cutted[py.argmax(cutted[:,3]),0]
     return fmax
예제 #54
0
    p.ylabel("voltage / AU")
    p.plot(time, psp_voltage[0])
    insert_params()

    mean = p.mean(psp_voltage, axis=0)
    std = p.std(psp_voltage, axis=0)
    p.figure()
    p.title("mean and standard deviation, ideal trigger")
    p.plot(time, mean, 'r-')
    p.fill_between(time, mean - std, mean + std, alpha=.3)
    p.xlabel("time / AU")
    p.ylabel("voltage / AU")
    insert_params()

    kernel = p.ones(sliding_average_len) / float(sliding_average_len)
    mean_max_index = p.argmax(mean)
    for i in range(len(psp_voltage)):
        smoothed = p.convolve(psp_voltage[i], kernel, "same")
        shift = mean_max_index - p.argmax(smoothed)
        psp_voltage[i] = p.roll(smoothed, shift)

    p.figure()
    p.title("mean and standard deviation, max trigger")
    mean_shifted = p.mean(psp_voltage, axis=0)
    std = p.std(psp_voltage, axis=0)
    p.plot(time, mean_shifted, 'r-')
    p.plot(time, mean, 'k--', alpha=.7)
    p.ylim(offset - height * .2, None)
    p.fill_between(time, mean - std, mean + std, alpha=.3)
    p.xlabel("time / AU")
    p.ylabel("voltage / AU")