def gaussians(params, x, numgaussians=1):
    """Defines multiple Gaussians with characteristics defined by 'params':
    params = yoffset,ymax,stddev,x0
    For multiple Gaussians, string together parameters into one long list (so we can fit using leastsq).
    Important: for multiple Gaussians, there is only one yoffset, and this will be the first element of the list;
    each individual Gaussian then has three parameters."""
    freeparams = 3  # number free parameters per gaussian, not counting yoffset
    if numgaussians == 1:
        if size(params) != freeparams + 1:
            raise NameError(
                'Incorrect number of parameters supplied to function gaussians.'
            )
        yoffset, ymax, stddev, x0 = params
        return yoffset + ymax * exp(-((x - x0) / stddev)**2 / 2.0)
    else:
        if numgaussians != (size(params) - 1) / freeparams:
            raise NameError(
                'Incorrect number of parameters supplied to function gaussians.'
            )
        yoffset = params[0]
        total = zeros(len(x))
        for ii in range(numgaussians):
            ymax = params[freeparams * ii + 1]
            stddev = params[freeparams * ii + 2]
            x0 = params[freeparams * ii + 3]
            total = total + ymax * exp(-((x - x0) / stddev)**2 / 2.0)
        return total + yoffset
Esempio n. 2
0
 def create_contour(self, var, zero_cntr, skip_pts):  
   """
   Create a contour of the data field with index <var> of <dd> provided at 
   initialization.  <zero_cntr> is the value of <var> to contour, <skip_pts>
   is the number of points to skip in the contour, needed to prevent overlap. 
   """
   # create contour :
   field  = self.dd.data[var]
   fig = figure()
   self.ax = fig.add_subplot(111)
   self.ax.set_aspect('equal') 
   self.c = self.ax.contour(self.x, self.y, field, [zero_cntr])
   
   # Get longest contour:
   cl       = self.c.allsegs[0]
   ind      = 0
   amax     = 0
   amax_ind = 0
   
   for a in cl:
     if size(a) > amax:
       amax = size(a)
       amax_ind = ind
     ind += 1
   
   # remove skip points and last point to avoid overlap :
   longest_cont      = cl[amax_ind]
   self.longest_cont = longest_cont[::skip_pts,:][:-1,:]
Esempio n. 3
0
def ssc(signal,samplerate=16000,winlen=0.025,winstep=0.01,
          nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97):
    """Compute Spectral Subband Centroid features from an audio signal.

    :param signal: the audio signal from which to compute features. Should be an N*1 array
    :param samplerate: the samplerate of the signal we are working with.
    :param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds)    
    :param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds)    
    :param nfilt: the number of filters in the filterbank, default 26.
    :param nfft: the FFT size. Default is 512.
    :param lowfreq: lowest band edge of mel filters. In Hz, default is 0.
    :param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2
    :param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97. 
    :returns: A numpy array of size (NUMFRAMES by nfilt) containing features. Each row holds 1 feature vector. 
    """          
    highfreq= highfreq or samplerate/2
    signal = sigproc.preemphasis(signal,preemph)
    frames = sigproc.framesig(signal, winlen*samplerate, winstep*samplerate)
    pspec = sigproc.powspec(frames,nfft)
    pspec = pylab.where(pspec == 0,pylab.finfo(float).eps,pspec) # if things are all zeros we get problems
    
    fb = get_filterbanks(nfilt,nfft,samplerate,lowfreq,highfreq)
    feat = pylab.dot(pspec,fb.T) # compute the filterbank energies
    R = pylab.tile(pylab.linspace(1,samplerate/2,pylab.size(pspec,1)),(pylab.size(pspec,0),1))
    
    return pylab.dot(pspec*R,fb.T) / feat
Esempio n. 4
0
def plot_running_average(file_names,
                         agents_dir,
                         window=10,
                         nlabel=True,
                         alpha_label=True):
    running_average = []
    labels = []
    total_episodes = []
    for onefile in file_names:
        agents = pickle.load(open(agents_dir + onefile, 'rb'))
        number_of_episodes = agents[0].episode_number
        temp_returns = zeros(number_of_episodes)
        for agent in agents:
            temp_returns += -asarray(agent.return_per_episode)
        temp_returns /= size(agents)
        temp_running_average = []
        for i in range(int(number_of_episodes / window)):
            temp_running_average.append(
                sum(temp_returns[0:((i + 1) * window)]) / ((i + 1) * window))
        total_episodes.append([
            (i + 1) * window for i in range(int(number_of_episodes / window))
        ])
        running_average.append(temp_running_average)

        sigma = 0
        beta = 0
        alpha = 0
        n = 0

        for k in range(len(onefile)):
            if onefile[k] == 'S':
                sigma = round(
                    float(read_letter_until_character(onefile, k + 1, '_')), 2)
            elif onefile[k] == 'A':
                alpha = int(read_letter_until_character(onefile, k + 1, '_'))
            elif onefile[k] == 'N':
                n = round(
                    int(read_letter_until_character(onefile, k + 1, '_')), 1)
            elif onefile[k] == 'B':
                beta = round(
                    float(read_letter_until_character(onefile, k + 1, '.p')),
                    2)

        temp_label = r'$\sigma$ = ' + str(sigma)
        if beta != 1: temp_label += r', $\beta$ = ' + str(beta)
        if alpha_label: temp_label += r', $\alpha$ = ' + str(alpha)
        if nlabel: temp_label += ', n = ' + str(n)

        labels.append(temp_label)

    colors = ['b', 'g', 'c', 'k', 'y', 'm', 'b']
    for i in range(size(file_names)):
        plt.plot(total_episodes[i],
                 running_average[i],
                 color=colors[i],
                 label=labels[i])

    plt.legend(loc=1)
    plt.xlabel('Episode Number', fontsize=14)
    plt.ylabel('Average Return at Episode Number', fontsize=14)
Esempio n. 5
0
    def prec_rec(ranks):
        """
        ::

            Return precision and recall arrays for ranks array data    
        """
        P = (1.0 + pylab.arange(pylab.size(ranks))) / ( 1.0 + pylab.sort(ranks))
        R = (1.0 + pylab.arange(pylab.size(ranks))) / pylab.size(ranks)
        return P, R
Esempio n. 6
0
    def prec_rec(ranks):
        """
        ::

            Return precision and recall arrays for ranks array data    
        """
        P = (1.0 + pylab.arange(pylab.size(ranks))) / ( 1.0 + pylab.sort(ranks))
        R = (1.0 + pylab.arange(pylab.size(ranks))) / pylab.size(ranks)
        return P, R
Esempio n. 7
0
def fast_conv_vect(input_signal, impulse):
    # FFT를 수행하는 데 필요한 포인트의 양을 찾아 벡터화 합니다.
    length = size(impulse) + size(input_signal) - 1  # 선형 컨벌루션 길이
    next_pow = nextpow2(length)
    # 곱셈 결과의 IDFT가 순환 컨벌루션이기에 공통적으로 일치시키기 위해서는 N>=L을 충족해야만 합니다.
    # (여기서 L=N1+N2-1;N1=length(input_signal);N2=length(impulse))
    # fft(x, n)는 n개의 포인트를 가지는 FFT입니다. x가 n보다 적으면 남는 부분을 0으로 채우고, 더 많은 경우에는 잘립니다.
    spectral_m = fft(impulse, next_pow) * fft(
        input_signal, next_pow)  # 임펄스와 입력 신호를 FFT하여 곱합니다.
    return inverse_fft(spectral_m)  # 타임 도메인 재계산 후 반환.
def splitparams(params, freeparams=3):
    """Say you've got a long parameter list specifying several Gaussians. This function splits it into
    a separate list for each Gaussian and then returns a list of those lists.
    yoffset (first element of parameter list) is not returned with any of the Gaussians."""
    splitapart = [[0.0] * freeparams] * ((size(params) - 1) / freeparams)
    for ii in range((size(params) - 1) / freeparams):
        ymax = params[freeparams * ii + 1]
        stddev = params[freeparams * ii + 2]
        x0 = params[freeparams * ii + 3]
        splitapart[ii] = [ymax, stddev, x0]
    return splitapart
def splitparams(params,freeparams=3):
    """Say you've got a long parameter list specifying several Gaussians. This function splits it into
    a separate list for each Gaussian and then returns a list of those lists.
    yoffset (first element of parameter list) is not returned with any of the Gaussians."""
    splitapart = [ [0.0]*freeparams ]*((size(params)-1)/freeparams)
    for ii in range((size(params)-1)/freeparams):
        ymax=params[freeparams*ii+1]
        stddev=params[freeparams*ii+2]
        x0=params[freeparams*ii+3]
        splitapart[ii]=[ymax,stddev,x0]
    return splitapart
Esempio n. 10
0
def approximate_x(dt=0.1):
   # time
   t = p.arange(0, 5+dt, dt)
   # initialize trajectory
   x = p.zeros(p.size(t), dtype='float')
   # set initial condition
   x[0] = 1.

   for i in range(p.size(t)-1):
      x[i+1] = x[i] - x[i]*dt

   return t, x
Esempio n. 11
0
def plot_average_return_per_episode(data,
                                    n,
                                    sigmas=None,
                                    betas=None,
                                    nlabel=False,
                                    lnstyle=None):
    if sigmas is None: sigmas = [0, 0.25, 0.5, 0.75, 1]
    lines = []
    if betas is None: betas = [0.95, 1]
    number_of_lines = 0
    labels = []
    min_return = inf
    for beta in betas:
        for sigma in sigmas:
            temp_lines = [-inf for _ in range(10)]
            temp_data = sort(data[(data['sigma'] == sigma) * (data['n'] == n) *
                                  (data['beta'] == beta)],
                             order='alpha')
            if size(temp_data) > 0:
                offset = size(temp_lines) - size(temp_data['Average_Return'])
                for i in range(size(temp_data)):
                    temp_lines[i + offset] = temp_data['Average_Return'][i]
                    if temp_data['Average_Return'][i] < min_return:
                        min_return = temp_data['Average_Return'][i]
                lines.append(temp_lines)
                temp_label = r'$\sigma$ = ' + str(sigma)
                if beta != 1: temp_label += r', $\beta$ = ' + str(beta)
                if nlabel: temp_label += ", n = " + str(n)
                labels.append(temp_label)
                number_of_lines += 1

    colors = ['b', 'g', 'c', 'k', 'y', 'm', 'b']
    alphas = [i + 1 for i in range(10)]
    if lnstyle is None:
        linestyle = (['solid'] * 5)
        linestyle.append('dashed')
    else:
        linestyle = [lnstyle] * 5

    for i in range(number_of_lines):
        plt.plot(alphas,
                 lines[i],
                 color=colors[i],
                 linestyle=linestyle[i],
                 label=labels[i])

    plt.legend(loc=4)
    plt.xlabel(r'$1/\alpha$', fontsize=14)
    plt.ylabel('Average Return per Episode', fontsize=14)
    plt.ylim(ymin=min_return + 100, )
Esempio n. 12
0
def calcaV(W,method = "ratio"):
    """Calculate aV"""
    if method == "ratio":
        return pl.log(pl.absolute(W/pl.roll(W,-1,axis=1)))
    else:
        aVs = pl.zeros(pl.shape(W))
        n = pl.arange(1,pl.size(W,axis=1)+1)
        f = lambda b,t,W: W - b[0] * pl.exp(-b[1] * t)
        
        for i in xrange(pl.size(W,axis=0)):
            params,result = optimize.leastsq(f,[1.,1.],args=(n,W[i]))
            aVs[i] = params[1] * pl.ones(pl.shape(W[i]))
            
        return aVs
def MM_E_step(x, K, opts, tmp_mu, tmp_v, tmp_PI, xpos, xneg):
    PS = np.zeros([K, size(x)])
    D = np.zeros([K, size(x)
                  ])  # storages probability of samples wrt distributions
    tmp_a = np.zeros(
        K)  #it will remain zero for non-gamma or inv gamma distributions
    tmp_b = np.zeros(
        K)  #it will remain zero for non-gamma or inv gamma distributions
    for k in range(K):
        if opts['Components_Model'][k] == 'Gauss':
            Nobj = scipy.stats.norm(tmp_mu[k], np.power(tmp_v[k], 0.5))
            PS[k, :] = Nobj.pdf(x)
        if opts['Components_Model'][k] == 'Gamma':
            tmp_a[k] = alb.alphaGm(tmp_mu[k], tmp_v[k])
            tmp_b[k] = alb.betaGm(tmp_mu[k], tmp_v[k])
            PS[k, :] = alb.gam(x, tmp_a[k], tmp_b[k])
            PS[k, xneg] = 0
        if opts['Components_Model'][k] == '-Gamma':
            tmp_a[k] = alb.alphaGm(-1 * tmp_mu[k], tmp_v[k])
            tmp_b[k] = alb.betaGm(-1 * tmp_mu[k], tmp_v[k])
            PS[k, :] = alb.gam(-1 * x, tmp_a[k], tmp_b[k])
            PS[k, xpos] = 0
        if opts['Components_Model'][k] == 'InvGamma':
            tmp_a[k] = alb.alphaIG(tmp_mu[k], tmp_v[k])
            tmp_b[k] = alb.betaIG(tmp_mu[k], tmp_v[k])
            PS[k, :] = alb.invgam(x, tmp_a[k], tmp_b[k])
            PS[k, xneg] = 0
        if opts['Components_Model'][k] == '-InvGamma':
            tmp_a[k] = alb.alphaIG(-1 * tmp_mu[k], tmp_v[k])
            tmp_b[k] = alb.betaIG(-1 * tmp_mu[k], tmp_v[k])
            PS[k, :] = alb.invgam(-1 * x, tmp_a[k], tmp_b[k])
            PS[k, xpos] = 0
        if opts['Components_Model'][k] == 'Beta':
            tmp_a[k] = alb.a_beta_distr(tmp_mu[k], tmp_v[k])
            tmp_b[k] = alb.b_beta_distr(tmp_mu[k], tmp_v[k])
            PS[k, :] = scipy.stats.beta.pdf(x, tmp_a[k], tmp_b[k])

    PS[np.isnan(PS)] = 0
    PS[np.isinf(PS)] = 0
    D = np.multiply(PS, np.matrix(tmp_PI).T)
    resp = np.divide(D, np.matrix(np.sum(D, 0)))
    N = np.sum(resp, 1)
    tmp_PI = np.divide(N, np.sum(resp)).T
    dum = np.add(np.log(PS), np.log(tmp_PI).T)
    dum[np.isinf(dum)] = 0
    dum[np.isinf(dum)] = 0
    Exp_lik = np.sum(np.multiply(resp, dum))

    return PS, resp, tmp_PI, N, Exp_lik
Esempio n. 14
0
def bin(Ws,binsize=1):
    """Split Ws into bins and return the average of each bin"""
    if binsize == 1:
        return Ws;
    else:
        extra = 0 if pl.size(Ws,axis=0) % binsize == 0 else 1
        dims = [i for i in pl.shape(Ws)]
        dims[0] = dims[0] / binsize + extra
        dims = tuple(dims)
        W_binned = pl.zeros(dims)

        for i in xrange(pl.size(W_binned,axis=0)):
            W_binned[i] = pl.mean(Ws[i*binsize:(i+1)*binsize],axis=0)

        return W_binned          
def read_all_csc(data_folder, dtype='int16', assume_same_fs=True, memmap=False, memmap_folder=None, save_for_spikedetekt=False, channels_to_save=None, return_sliced_data=False):
    if sys.version_info[0] > 2:
        mode = 'br'
    else:
        mode = 'r'

    os_name = platform.system()
    if os_name == 'Windows':
        sep = '\\'
    elif os_name=='Linux':
        sep = r'/'

    files = [os.path.join(data_folder, f) for f in os.listdir(data_folder) if f.endswith('.ncs')]
    order = [int(file.split('.')[0].split('CSC')[1]) for file in files]
    sort_order =  sorted(range(len(order)),key=order.__getitem__)
    ordered_files = [files[i] for i in sort_order]

    if memmap:
        if not memmap_folder:
            raise NameError("A memmap_folder should be defined for memmapped data")
        out_filename = data_folder.split(sep)[-1]+'.dat'
        out_full_filename = os.path.join(memmap_folder, out_filename)

    data = None
    i = 0;
    for file in ordered_files:
        fin = open(file, mode=mode)
        x = read_single_csc(fin, assume_same_fs=assume_same_fs, memmap=memmap)
        if not assume_same_fs or memmap:
            channel_data = x['packets']['samp'].ravel()
            if data is None:
                data = pylab.memmap(out_full_filename, dtype=dtype, mode='w+', shape=(pylab.size(files), channel_data.size))
            else:
                data[i,:] = channel_data
                data.flush()
                i = i+1
                print(i)
        else:
            channel_data = x['trace']
            if data is None:
                data = pylab.zeros(shape=(pylab.size(files), channel_data.size), dtype=dtype)
            else:
                data[i,:] = channel_data
                i = i+1
                print(i)

    data_to_return = data
    if save_for_spikedetekt:
        if channels_to_save:
            data2 = data[channels_to_save,:]
            if return_sliced_data:
                data_to_return = data2
        else:
            data2 = data
        data2 = pylab.transpose(data2)
        data2.reshape(data2.size)
        filename = os.path.join(memmap_folder, 'spikedetekt_'+out_filename)
        data2.astype(dtype).tofile(filename)

    return data_to_return
Esempio n. 16
0
def plot_phases(in_file, plot_type, plot_log):
    plot_flag = 0

    def no_log(x):
        return x

    fig = pylab.figure(1)
    ax = fig.add_subplot(111)

    try:
        img = spimage.sp_image_read(in_file, 0)
    except IOError:
        raise IOError("Can't read %s." % in_file)

    values = img.image.reshape(pylab.size(img.image))

    if plot_log:
        log_function = pylab.log
    else:
        log_function = no_log

    if plot_type == PHASES:
        hist = pylab.histogram(pylab.angle(values), bins=500)
        ax.plot((hist[1][:-1] + hist[1][1:]) / 2, log_function(hist[0]))
    elif plot_flag == HISTOGRAM:
        hist = pylab.histogram2d(pylab.real(values),
                                 pylab.imag(values),
                                 bins=500)
        ax.imshow(log_function(hist[0]),
                  extent=(hist[2][0], hist[2][-1], -hist[1][-1], -hist[1][0]),
                  interpolation='nearest')
    else:
        ax.plot(pylab.real(values), pylab.imag(values), '.')
    return fig
Esempio n. 17
0
def plot_phases(in_file, plot_type, plot_log):
    flags = ['histogram','phases']
    plot_flag = 0
    log_flag = 0

    def no_log(x):
        return x

    fig = pylab.figure(1)
    ax = fig.add_subplot(111)

    try:
        img = spimage.sp_image_read(in_file,0)
    except:
        raise IOError("Can't read %s." % in_file)

    values = img.image.reshape(pylab.size(img.image))

    if plot_log:
        log_function = pylab.log
    else:
        log_function = no_log

    if plot_type == PHASES:
        hist = pylab.histogram(pylab.angle(values),bins=500)
        ax.plot((hist[1][:-1]+hist[1][1:])/2.0,log_function(hist[0]))
    elif plot_flag == HISTOGRAM:
        hist = pylab.histogram2d(pylab.real(values),pylab.imag(values),bins=500)
        ax.imshow(log_function(hist[0]),extent=(hist[2][0],hist[2][-1],-hist[1][-1],-hist[1][0]),interpolation='nearest')
    else:
        ax.plot(pylab.real(values),pylab.imag(values),'.')
    return fig
Esempio n. 18
0
def wavwrite(snd, Fs, sndfile):
    """
    This function implements the wawwritefunction of Octave or Matlab to write a wav sound file from a vector snd with 
    sampling rate Fs, with: 
    import sound 
    sound.wavwrite(snd,Fs,'sound.wav')
    """
    import wave
    import pylab

    WIDTH = 2  #2 bytes per sample
    #FORMAT = pyaudio.paInt16
    CHANNELS = 1
    #RATE = 22050
    RATE = Fs  #32000

    length = pylab.size(snd)

    wf = wave.open(sndfile, 'wb')
    wf.setnchannels(CHANNELS)
    wf.setsampwidth(WIDTH)
    wf.setframerate(RATE)
    data = struct.pack('h' * length, *snd)
    wf.writeframes(data)
    wf.close()
Esempio n. 19
0
def plotAutoCor(Ps):
    """Calculates and plots the autocorrelation function"""
    
    Cs = pl.zeros(pl.size(Ps)/2)
    t = pl.arange(pl.size(Ps)/2)

    style = raw_input("Please enter a line style: ")

    for i in xrange(pl.size(Ps)/2):
        Cs[i] = autoCor(Ps,t[i])

    pl.plot(t,Cs,style)
    pl.xlabel("$t$")
    pl.ylabel("$C(t)$")

    return Cs
Esempio n. 20
0
 def adjustFieldValue(self, index, new_val, all_val):
     """adjust the value of the parameter from the spinboxes to existing values in parameter space"""
     if (new_val < all_val[index] and index > 0):
         index -= 1
     elif (new_val > all_val[index] and index < pb.size(all_val)):
         index += 1
     return index
Esempio n. 21
0
    def Global_Stiffness(self):
        '''
        Generates Global Stiffness Matrix for the plane structure
        '''
        elem = self.element;
        B = py.zeros((6,6))
        for i in range (0,py.size(elem,0)): 
            #for each element find the stifness matrix
            K = py.zeros((self.n_nodes*2,self.n_nodes*2))            
            el = elem[i]
            
            #nodes formatted for input            
            [node1, node2, node3] = el;
            node1x = 2*(node1-1);node2x = 2*(node2-1);node3x = 2*(node3-1);
            node1y = 2*(node1-1)+1;node2y = 2*(node2-1)+1;node3y = 2*(node3-1)+1;
            
            #Area, Strain Matrix and E Matrix multiplied to get element stiffness            
            [J,B] = self.B(el)
            local_k =0.5*abs(J)*py.dot(py.transpose(B),py.dot(self.E_matrix,B))
            if self.debug:            
                print 'K for elem', el, '\n', local_k
                
            #Element K-Matrix converted into Global K-Matrix format 
            K[py.ix_([node1x,node1y,node2x,node2y,node3x,node3y],[node1x,node1y,node2x,node2y,node3x,node3y])] = K[py.ix_([node1x,node1y,node2x,node2y,node3x,node3y],[node1x,node1y,node2x,node2y,node3x,node3y])]+local_k

            #Adding contibution into Global Stiffness           
            self.k_global = self.k_global + K
            
        if self.debug:            
                print 'Global Stiffness','\n', self.k_global        
Esempio n. 22
0
 def epsilon_greedy_probability(self, state, action):
     q = self.get_q(state)
     if size(unique(q)) < self.env.get_num_actions():
         max_q = max(q)
         max_observations = 0
         for value in q:
             if value == max_q: max_observations += 1
         probabilities = zeros(size(q))
         for i in range(size(q)):
             if q[i] == max_q: probabilities[i] = ((1-self.epsilon) / max_observations) + \
                                                  (self.epsilon / self.env.get_num_actions())
             else: probabilities[i] = self.epsilon / self.env.get_num_actions()
         return probabilities[action]
     else:
         if action == argmax(q):
             return self.optimal_p
         else:
             return self.epsilon / self.env.get_num_actions()
Esempio n. 23
0
def find_multi_values(y_array, y_want, dy):
    """
    find values' index for a function value y
    [email protected] 2016/09/15    
    """
    i = 0
    while i <= pylab.size(y_want) - 1:
        if i == 0:
            i_want = numpy.where(
                (y_array >= y_want[0] - dy) * (y_array <= y_want[0] + dy))
        else:
            want = numpy.where(
                (y_array >= y_want[i] - dy) * (y_array <= y_want[i] + dy))
            i_want = numpy.concatenate((i_want, want), 1)
        i = i + 1
    if pylab.size(i_want) <= 0:
        print "points not found!"
    return i_want
Esempio n. 24
0
 def pvec(self,xarr,t):
 #==============================
     """Find solution to 1D acoustics by characteristics"""
     q=pl.zeros([pl.size(xarr)])
     i=0
     for x in xarr:
         imat=self.getmat(x)
         p1=self.w1x(x,t)
         p2=self.w2x(x,t)
         q[i] = p1+p2 #Pressure
         i+=1
     return q
Esempio n. 25
0
 def uvec(self,xarr,t):
 #==============================
     """Find solution to 1D acoustics by characteristics"""
     q=pl.zeros([pl.size(xarr)])
     i=0
     for x in xarr:
         imat=self.getmat(x)
         p1=self.w1x(x,t)
         p2=self.w2x(x,t)
         q[i] = (p2-p1)/self.z[imat] #Velocity
         i+=1
     return q
Esempio n. 26
0
 def pvec(self, xarr, t):
     #==============================
     """Find solution to 1D acoustics by characteristics"""
     q = pl.zeros([pl.size(xarr)])
     i = 0
     for x in xarr:
         imat = self.getmat(x)
         p1 = self.w1x(x, t)
         p2 = self.w2x(x, t)
         q[i] = p1 + p2  #Pressure
         i += 1
     return q
Esempio n. 27
0
 def uvec(self, xarr, t):
     #==============================
     """Find solution to 1D acoustics by characteristics"""
     q = pl.zeros([pl.size(xarr)])
     i = 0
     for x in xarr:
         imat = self.getmat(x)
         p1 = self.w1x(x, t)
         p2 = self.w2x(x, t)
         q[i] = (p2 - p1) / self.z[imat]  #Velocity
         i += 1
     return q
Esempio n. 28
0
    def write_gmsh_contour(self, lc=100000, boundary_extend=True):
        """
    write the contour created with create_contour to the .geo file with mesh
    spacing <lc>.  If <boundary_extend> is true, the spacing in the interior
    of the domain will be the same as the distance between nodes on the contour.
    """
        #FIXME: sporadic results when used with ipython, does not stops writing the
        #       file after a certain point.  calling restart() then write again
        #       results in correct .geo file written.  However, running the script
        #       outside of ipython works.
        s = "::: writing gmsh contour to \"%s%s.geo\" :::"
        print_text(s % (self.direc, self.fn), self.color)
        c = self.longest_cont
        f = self.f

        pts = size(c[:, 0])

        # write the file to .geo file :
        f.write("// Mesh spacing\n")
        f.write("lc = " + str(lc) + ";\n\n")

        f.write("// Points\n")
        for i in range(pts):
            f.write("Point(" + str(i) + ") = {" + str(c[i,0]) + "," \
                    + str(c[i,1]) + ",0,lc};\n")

        f.write("\n// Lines\n")
        for i in range(pts - 1):
            f.write("Line(" + str(i) + ") = {" + str(i) + "," + str(i + 1) +
                    "};\n")
        f.write("Line(" + str(pts-1) + ") = {" + str(pts-1) + "," \
                + str(0) + "};\n\n")

        f.write("// Line loop\n")
        loop = ""
        loop += "{"
        for i in range(pts - 1):
            loop += str(i) + ","
        loop += str(pts - 1) + "}"
        f.write("Line Loop(" + str(pts + 1) + ") = " + loop + ";\n\n")

        f.write("// Surface\n")
        surf_num = pts + 2
        f.write("Plane Surface(" + str(surf_num) + ") = {" + str(pts + 1) +
                "};\n\n")

        if not boundary_extend:
            f.write("Mesh.CharacteristicLengthExtendFromBoundary = 0;\n\n")

        self.surf_num = surf_num
        self.pts = pts
        self.loop = loop
Esempio n. 29
0
def plotAutoCor(Ps):
	"""Calculates and plots the autocorrelation function"""

	Ncor = input("Please enter the number of configurations between measurements: ")
    
	Cs = pl.zeros(pl.size(Ps)/2)
	t = Ncor * pl.arange(pl.size(Ps)/2)

	style = raw_input("Please enter a line style: ")

	for i in xrange(pl.size(Ps)/2):
		Cs[i] = autoCor(Ps,i)
		
	tau = 0.5 + pl.sum(Cs / Cs[0])

	print("Integral autocorrelation time: %f" % tau)

	pl.plot(t,Cs,style)
	pl.xlabel("$t$")
	pl.ylabel("$C(t)$")

	return Cs
Esempio n. 30
0
def get_maximum(data, sigmas=None, betas=None):
    if sigmas is None: sigmas = [0, 0.25, 0.5, 0.75, 1]
    if betas is None: betas = [0.95, 1]

    file_names = []
    for beta in betas:
        for sigma in sigmas:
            temp_data = data[(data['sigma'] == sigma) * (data['beta'] == beta)]
            if size(temp_data) != 0:
                max_indx = argmax(temp_data['Average_Return'])
                file_names.append(temp_data['File_Name'][max_indx])

    return file_names
def gaussians(params,x,numgaussians=1):
    """Defines multiple Gaussians with characteristics defined by 'params':
    params = yoffset,ymax,stddev,x0
    For multiple Gaussians, string together parameters into one long list (so we can fit using leastsq).
    Important: for multiple Gaussians, there is only one yoffset, and this will be the first element of the list;
    each individual Gaussian then has three parameters."""
    freeparams = 3 # number free parameters per gaussian, not counting yoffset
    if numgaussians==1:
        if size(params) != freeparams+1: raise NameError('Incorrect number of parameters supplied to function gaussians.')
        yoffset,ymax,stddev,x0=params
        return yoffset+ymax*exp(-((x-x0)/stddev)**2/2.0)
    else:
        if numgaussians != (size(params)-1)/freeparams: 
            raise NameError('Incorrect number of parameters supplied to function gaussians.')
        yoffset=params[0]
        total=zeros(len(x))
        for ii in range(numgaussians):
            ymax=params[freeparams*ii+1]
            stddev=params[freeparams*ii+2]
            x0=params[freeparams*ii+3]
            total = total + ymax*exp(-((x-x0)/stddev)**2/2.0)
        return total+yoffset
Esempio n. 32
0
  def write_gmsh_contour(self, lc=100000, boundary_extend=True):  
    """
    write the contour created with create_contour to the .geo file with mesh
    spacing <lc>.  If <boundary_extend> is true, the spacing in the interior 
    of the domain will be the same as the distance between nodes on the contour.
    """ 
    #FIXME: sporadic results when used with ipython, does not stops writing the
    #       file after a certain point.  calling restart() then write again 
    #       results in correct .geo file written.  However, running the script 
    #       outside of ipython works.
    print "::: writing gmsh contour :::"
    c   = self.longest_cont
    f   = self.f
    x   = self.x
    y   = self.y

    pts = size(c[:,0])

    # write the file to .geo file :
    f.write("// Mesh spacing\n")
    f.write("lc = " + str(lc) + ";\n\n")
    
    f.write("// Points\n")
    for i in range(pts):
      f.write("Point(" + str(i) + ") = {" + str(c[i,0]) + "," \
              + str(c[i,1]) + ",0,lc};\n")
    
    f.write("\n// Lines\n")
    for i in range(pts-1):
      f.write("Line(" + str(i) + ") = {" + str(i) + "," + str(i+1) + "};\n")
    f.write("Line(" + str(pts-1) + ") = {" + str(pts-1) + "," \
            + str(0) + "};\n\n")
    
    f.write("// Line loop\n")
    loop = ""
    loop += "{"
    for i in range(pts-1):
      loop += str(i) + ","
    loop += str(pts-1) + "}"
    f.write("Line Loop(" + str(pts+1) + ") = " + loop + ";\n\n")
    
    f.write("// Surface\n")
    surf_num = pts+2
    f.write("Plane Surface(" + str(surf_num) + ") = {" + str(pts+1) + "};\n\n")

    if not boundary_extend:
      f.write("Mesh.CharacteristicLengthExtendFromBoundary = 0;\n\n")

    self.surf_num = surf_num
    self.pts      = pts
    self.loop     = loop
 def __init__(self,xy,connectivity,displacement): 
     '''
     member variables of the class BarElementFEM
     '''
     self._debug=False
     self._xy = py.array(xy) # copying xy values of the nodes
     self._connectivity = py.array(connectivity) # copying connectivity
     self._displacement = py.array(displacement)
     self.n_element = py.size(self._connectivity,0)
     self.n_nodes = py.shape(xy)[0] #number of node
     #global stiffness matrix intialized with zeros
     self.k_global = py.zeros((self.n_nodes*2,self.n_nodes*2))
     self.L = py.zeros(self.n_element)
     self.strain_global=[]
Esempio n. 34
0
 def __calculate__(self):
     global USE_IDENTITY_LINE
     if USE_IDENTITY_LINE:
         sd1 = (self.signal_plus - self.signal_minus) / pl.sqrt(2)
     else:
         mean_plus = MeanStatistic(signal=self.signal_plus,
                                   buffer=self.buffer,
                                   buffer_name='plus').compute()
         mean_minus = MeanStatistic(signal=self.signal_minus,
                                    buffer=self.buffer,
                                    buffer_name='minus').compute()
         sd1 = (self.signal_plus - mean_plus - self.signal_minus +
                mean_minus) / pl.sqrt(2)
     return pl.sqrt(pl.sum(sd1[self.indexes(sd1)]**2) / pl.size(sd1))
Esempio n. 35
0
 def __calculate__(self):
     global USE_IDENTITY_LINE
     if USE_IDENTITY_LINE:
         sd1 = (self.signal_plus - self.signal_minus) / pl.sqrt(2)
     else:
         mean_plus = MeanStatistic(signal=self.signal_plus,
                                   buffer=self.buffer,
                                   buffer_name='plus').compute()
         mean_minus = MeanStatistic(signal=self.signal_minus,
                                    buffer=self.buffer,
                                    buffer_name='minus').compute()
         sd1 = (self.signal_plus - mean_plus
                - self.signal_minus + mean_minus) / pl.sqrt(2)
     return pl.sqrt(pl.sum(sd1[self.indexes(sd1)] ** 2) / pl.size(sd1))
Esempio n. 36
0
    def draw_rand_gaussian_pos(self, min_r=pl.array([])):
        '''optional min_r, array or tuple of arrays on the form
        array([[r0,r1,...,rn],[z0,z1,...,zn]])'''

        x = pl.normal(0, self.radius, self.n)
        y = pl.normal(0, self.radius, self.n)
        z = pl.normal(0, self.radius, self.n)

        min_r_z = {}
        if pl.size(min_r) > 0:  # != False:
            if type(min_r) == type(()):
                for j in xrange(pl.shape(min_r)[0]):
                    min_r_z[j] = pl.interp(z, min_r[j][0, ], min_r[j][1, ])
                    if j > 0:
                        [w] = pl.where(min_r_z[j] < min_r_z[j - 1])
                        min_r_z[j][w] = min_r_z[j - 1][w]
                    minrz = min_r_z[j]

            else:
                minrz = pl.interp(z, min_r[0], min_r[1])

            R_z = pl.sqrt(x**2 + y**2)
            [u] = pl.where(R_z < minrz)

            while len(u) > 0:
                for i in xrange(len(u)):
                    x[u[i]] = pl.normal(0, self.radius, 1)
                    y[u[i]] = pl.normal(0, self.radius, 1)
                    z[u[i]] = pl.normal(0, self.radius, 1)
                    if type(min_r) == type(()):
                        for j in xrange(pl.shape(min_r)[0]):
                            min_r_z[j][u[i]] = pl.interp(
                                z[u[i]], min_r[j][0, ], min_r[j][1, ])
                            if j > 0:
                                [w] = pl.where(min_r_z[j] < min_r_z[j - 1])
                                min_r_z[j][w] = min_r_z[j - 1][w]
                            minrz = min_r_z[j]
                    else:
                        minrz[u[i]] = pl.interp(z[u[i]], min_r[0, ],
                                                min_r[1, ])
                R_z = pl.sqrt(x**2 + y**2)
                [u] = pl.where(R_z < minrz)

        soma_pos = {
            'xpos': x,
            'ypos': y,
            'zpos': z,
        }
        return soma_pos
Esempio n. 37
0
    def create_contour(self, var, zero_cntr, skip_pts):
        """
    Create a contour of the data field with index <var> of <dd> provided at
    initialization.  <zero_cntr> is the value of <var> to contour, <skip_pts>
    is the number of points to skip in the contour, needed to prevent overlap.
    """
        s    = "::: creating contour from %s's \"%s\" field with skipping %i " + \
               "point(s) :::"
        print_text(s % (self.dd.name, var, skip_pts), self.color)

        skip_pts = skip_pts + 1

        # create contour :
        field = self.dd.data[var]
        fig = figure()
        self.ax = fig.add_subplot(111)
        self.ax.set_aspect('equal')
        self.c = self.ax.contour(self.x, self.y, field, [zero_cntr])

        # Get longest contour:
        cl = self.c.allsegs[0]
        ind = 0
        amax = 0
        amax_ind = 0

        for a in cl:
            if size(a) > amax:
                amax = size(a)
                amax_ind = ind
            ind += 1

        # remove skip points and last point to avoid overlap :
        self.longest_cont = cl[amax_ind]
        s = "::: contour created, length %s nodes :::"
        print_text(s % shape(self.longest_cont)[0], self.color)
        self.remove_skip_points(skip_pts)
Esempio n. 38
0
 def __init__(self,E,v,iflag,xy,element,displacement, debug_status): 
     '''
     member variables of the class BarElementFEM
     '''
     self.debug = debug_status #ture if in debugging mode
     self.E = E #Youngs modulus
     self.v = v #Poisson Ratio
     self.iflag = iflag #Plane stress-strain identifier
     self.xy = py.array(xy) # copying xy values of the sorted nodes
     self.element = py.array(element) # copying connectivity
     self.displacement = py.array(displacement) # copying displacement
     self.n_element = py.size(self.element,0) #number of elements
     self.n_nodes = py.shape(xy)[0] #number of nodes      
     self.k_global = py.zeros((self.n_nodes*2,self.n_nodes*2)) #global stiffness matrix
     self.E_matrix = self.Constitutive_matrix() #Stores constitutive matrix
Esempio n. 39
0
def accumulate_integerate(x, y):
    """
    Acumulate integration function
    [email protected] 2016/09/15
    """
    length_x = pylab.size(x)
    z = numpy.zeros(length_x)
    dx = x[2] - x[1]
    s = 0
    i = 0
    while i < length_x:
        s = s + y[i] * dx
        z[i] = s
        i = i + 1
    return z
Esempio n. 40
0
def lg_angle_corr (fpower, alpha, beta, r, fout, f=0.485,n=1000,h=0.001):
    k = fpower[:,0]
    pk = fpower[:,1]
    # interpolate/extrapolate P(k) (power law extrapolation)
    pki = utils.LogInterp(k,pk)
    pkii = utils.LogInterp(k,pk/k)
    pkiii = utils.LogInterp(k,pk/(k**2))
    # init hankel transform
    h3d0 = hankel.Hankel3D(0.5,n)
    h3d2 = hankel.Hankel3D(2.5,n)
    h3d4 = hankel.Hankel3D(4.5,n)
    h3d1 = hankel.Hankel3D(1.5,n)
    h3d3 = hankel.Hankel3D(3.5,n)
    # hankel transform 
    xi0 = h3d0.transform(pki.f,r,n,h)
    xi2 = h3d2.transform(pki.f,r,n,h)   
    xi4 = h3d4.transform(pki.f,r,n,h)
    xii1 = h3d1.transform(pkii.f,r,n,h)
    xii3 = h3d3.transform(pkii.f,r,n,h)
    xiii0 = h3d0.transform(pkiii.f,r,n,h)
    xiii2 = h3d2.transform(pkiii.f,r,n,h)
    # calculation of the 'a'-s and 'b'-s
    a0 = (1+2*f/3+2*f*f/15)*xi0-(f/3+2*f*f/21)*xi2+(3*f*f/140)*xi4
    a02 = -(f/2+3*f*f/14)*xi2+(f*f/28)*xi4
    a22 = (f*f/15)*xi0-(f*f/21)*xi2+(19*f*f/140)*xi4
    b22 = (f*f/15)*xi0-(f*f/21)*xi2-(4*f*f/35)*xi4
    a10 = ((2*f+4.*f*f/5.)*xii1/r-f*f*xii3/(5.*r))  # this is a10 tilde in the paper
    a11 = (4*f*f*xiii0/(3.*r*r)-8*f*f*xiii2/(3.*r*r))
    a21 = (3*f*f*xii3/(5.*r)-2*f*f*xii1/(5.*r))
    b11 = (4*f*f*xiii0/(3.*r*r)+4*f*f*xiii2/(3.*r*r))
    b21 = -(2*f*f*xii1/(5.*r)+2*f*f*xii3/(5.*r))
    # this is the output file	
    fi = open(fout,'w')
    for i in alpha:
         for j in beta:
	     g1 = gg1(j,i)
	     g2 = gg2(j,i)
             for k in range(0,M.size(r)):
                th1 = (2.*j-i)/2.
                th2 = (i+2.*j)/2.
                original = a0[k]+a02[k]*M.cos(2.*th1)+a02[k]*M.cos(2.*th2)+a22[k]*M.cos(2.*th1)*M.cos(2.*th2)+b22[k]*M.sin(2.*th1)*M.sin(2.*th2)
                new = a10[k]*(M.cos(th1)/g1-M.cos(th2)/g2)+a11[k]*M.cos(th1)*M.cos(th2)/(g1*g2)+a21[k]*(M.cos(2.*th1)*M.cos(th2)/g2-M.cos(th1)*M.cos(2.*th2)/g1)+b11[k]*M.sin(th1)*M.sin(th2)/(g1*g2)+b21[k]*(M.sin(2.*th1)*M.sin(th2)/g2-M.sin(th1)*M.sin(2.*th2)/g1)		
                if (2.*j>i): 
                        fi.write(str(i)+" "+str(j)+" "+str(r[k])+" "+str(new + original)+" "+"\n")
    fi.close()
Esempio n. 41
0
 def start(self):
     self.format_data()
     self.y_data = gen(self.shape_cb.currentText()) * pl.absolute(
         self.real_high - self.real_low) / 2 + self.real_off
     self.x_data = pl.linspace(0, 1 / self.real_freq, pl.size(self.y_data))
     self.ax.clear()
     self.ax.plot(self.x_data * 1000, self.y_data)
     print len(self.x_data)
     print len(self.y_data)
     self.canvas.draw()
     if mode == 'has_visa':
         self.inst.write('source1:function:shape ' +
                         self.shape_cb.currentText())
         self.inst.write('source1:frequency:fixed ' +
                         self.freq_text.text() + self.freq_cb.currentText())
         self.inst.write('source1:voltage:level:immediate:high ' +
                         self.high_text.text() + self.high_cb.currentText())
         self.inst.write('source1:voltage:level:immediate:low ' +
                         self.low_text.text() + self.low_cb.currentText())
         self.inst.write('source1:voltage:level:immediate:offset ' +
                         self.off_text.text() + self.off_cb.currentText())
Esempio n. 42
0
def numModel(ti, te, mu1, mu2, n, dl):
    l = dl * pl.cos(ti)
    s = mu1 * dl
    
    u = pl.exp(-s)
    v = 1.0 - u

    a = mu2 * dl * pl.cos(ti) / pl.cos(te)

    I = 1.0
    S = pl.zeros(pl.size(te), pl.Float64)

    try:
        for i in range(1,n+1):
            A  = I * v
            S += A * pl.exp(-a * i)
            I *= u
    except:
        print "Halt dammit: ", i

    return HS * S
Esempio n. 43
0
def create_table_for_average_return(agents_directory):
    myFiles = listdir(agents_directory)
    arr = None
    sigma = 0
    alpha = 0
    n = 0
    beta = 0
    for onefile in myFiles:
        for k in range(len(onefile)):
            if onefile[k] == 'S':
                sigma = round(
                    float(read_letter_until_character(onefile, k + 1, '_')), 2)
            elif onefile[k] == 'A':
                alpha = int(read_letter_until_character(onefile, k + 1, '_'))
            elif onefile[k] == 'N':
                n = round(
                    int(read_letter_until_character(onefile, k + 1, '_')), 1)
            elif onefile[k] == 'B':
                beta = round(
                    float(read_letter_until_character(onefile, k + 1, '.p')),
                    2)

        agents = pickle.load(open(agents_directory + onefile, 'rb'))
        average = 0
        for agent in agents:
            average += (sum(agent.return_per_episode) /
                        agent.episode_number) / size(agents)

        dtype = [('File_Name', str_, 20), ('n', int), ('sigma', float),
                 ('beta', float), ('alpha', int), ('Average_Return', float)]
        if arr is None:
            arr = array((onefile, n, sigma, beta, alpha, average), dtype=dtype)
        else:
            arr = vstack([
                arr,
                array((onefile, n, sigma, beta, alpha, average), dtype=dtype)
            ])
    return arr
Esempio n. 44
0
    def __calculate__(self):

        mean_plus = MeanStatistic(signal=self.signal_plus,
                                  buffer=self.buffer,
                                  buffer_name='plus').compute()
        mean_minus = MeanStatistic(signal=self.signal_minus,
                                   buffer=self.buffer,
                                   buffer_name='minus').compute()

        #division for acceleration, deceleration or no change points
        #have to be done using sd1 vector
        global USE_IDENTITY_LINE
        if USE_IDENTITY_LINE:
            sd1 = (self.signal_plus - self.signal_minus) / pl.sqrt(2)
        else:
            sd1 = (self.signal_plus - self.signal_minus - mean_plus +
                   mean_minus) / pl.sqrt(2)
        nochange_indexes = pl.find(sd1 == 0)

        sd2 = (self.signal_plus - mean_plus + self.signal_minus -
               mean_minus) / pl.sqrt(2)
        return pl.sqrt((pl.sum(sd2[self.indexes()]**2) +
                        (pl.sum(sd2[nochange_indexes]**2)) / 2) / pl.size(sd2))
Esempio n. 45
0
    def __calculate__(self):

        mean_plus = MeanStatistic(signal=self.signal_plus,
                                  buffer=self.buffer,
                                  buffer_name='plus').compute()
        mean_minus = MeanStatistic(signal=self.signal_minus,
                                  buffer=self.buffer,
                                  buffer_name='minus').compute()

        #division for acceleration, deceleration or no change points
        #have to be done using sd1 vector
        global USE_IDENTITY_LINE
        if USE_IDENTITY_LINE:
            sd1 = (self.signal_plus - self.signal_minus) / pl.sqrt(2)
        else:
            sd1 = (self.signal_plus - self.signal_minus
                    - mean_plus + mean_minus) / pl.sqrt(2)
        nochange_indexes = pl.find(sd1 == 0)

        sd2 = (self.signal_plus - mean_plus
                   + self.signal_minus - mean_minus) / pl.sqrt(2)
        return pl.sqrt((pl.sum(sd2[self.indexes()] ** 2)
                + (pl.sum(sd2[nochange_indexes] ** 2)) / 2) / pl.size(sd2))
def computeAreaInContour(field,value):
    field = -field
    value = -value;
    valMin = field.min()
    [M,N] = py.shape(field);
    bufferedField = valMin*py.ones((M+2,N+2));
    #print bufferedField
    bufferedField[1:-1,1:-1] = field;
    print 'field' ,bufferedField
    [c,d] = py.shape(bufferedField)
    [X,Y] = py.mgrid[0:c,0:d]
    cs = plt.contour(X,Y,bufferedField,[value]);    
    plt.show()
    p =cs.collections[0].get_paths()
    s = py.size(p)
    a = 0.0;
    for i in range(s):
         p0 = cs.collections[0].get_paths()[i].vertices
         a = a + area(p0)
    
    #print 'p', p, 'p0', p0, 'size' , py.size(cs.collections[0].get_paths())
    #ar = area(p0);
    #print 'area', ar  
    return a;
Esempio n. 47
0
    def plot(self, cmap, filename=None,
             starttime=T1, endtime=T2,
             show_percentiles=False, percentiles=[10, 50, 90],
             show_class_models=True, grid=True, title_comment=False):
        """
        Plot the QC resume figure
        If a filename is specified the plot is saved to this file, otherwise
        a plot window is shown.

        :type filename: str (optional)
        :param filename: Name of output file
        :type show_percentiles: bool (optional)
        :param show_percentiles: Enable/disable plotting of approximated
                percentiles. These are calculated from the binned histogram and
                are not the exact percentiles.
        :type percentiles: list of ints
        :param percentiles: percentiles to show if plotting of percentiles is
                selected.
        :type show_class_models: bool (optional)
        :param show_class_models: Enable/disable plotting of class models.
        :type grid: bool (optional)
        :param grid: Enable/disable grid in histogram plot.
        :type cmap: cmap
        :param cmap: Colormap for PPSD.
        """

        # COMMON PARAMETERS
        psd_db_limits = (-180, -110)
        psdh_db_limits = (-200, -90)
        f_limits = (5e-3, 20)
        per_left = (10, 1, .1)
        per_right = (100, 10, 1)
        # -----------------

        # Select Time window
        # -----------
        times_used = array(self.times_used)
        starttime = max(min(times_used), starttime)
        endtime = min(max(times_used), endtime)
        bool_times_select = (times_used > starttime) & (times_used < endtime)
        times_used = times_used[bool_times_select]
        psd = self.psd[bool_times_select, :]
        spikes = self.spikes[bool_times_select]

        hist_stack = self._QC__get_ppsd(time_lim=(starttime, endtime))

        Hour = arange(0, 23, 1)
        HourUsed = array([t.hour for t in times_used])
        Day_span = (endtime - starttime) / 86400.
        # -----------

        # FIGURE and AXES
        fig = plt.figure(figsize=(9.62, 13.60), facecolor='w', edgecolor='k')
        ax_ppsd = fig.add_axes([0.1, 0.68, 0.9, 0.28])
        ax_coverage = fig.add_axes([0.1, 0.56, 0.64, 0.04])
        ax_spectrogram = fig.add_axes([0.1, 0.31, 0.64, 0.24])
        ax_spectrogramhour = fig.add_axes([0.76, 0.31, 0.20, 0.24])
        ax_freqpsd = fig.add_axes([0.1, 0.18, 0.64, 0.12])
        ax_freqpsdhour = fig.add_axes([0.76, 0.18, 0.20, 0.12])
        ax_spikes = fig.add_axes([0.1, 0.05, 0.64, 0.12])
        ax_spikeshour = fig.add_axes([0.76, 0.05, 0.20, 0.12])

        ax_col_spectrogram = fig.add_axes([0.76, 0.588, 0.20, 0.014])
        ax_col_spectrogramhour = fig.add_axes([0.76, 0.57, 0.20, 0.014])

        ########################### COVERAGE
        ax_coverage.xaxis_date()
        ax_coverage.set_yticks([])
        # plot data coverage
        starts = date2num([a.datetime for a in times_used])
        ends = date2num([a.datetime for a in times_used + PPSD_LENGTH])
        for start, end in zip(starts, ends):
            ax_coverage.axvspan(start, end, 0, 0.7, alpha=0.5, lw=0)
        # plot data really available
        aa = [(start, end) for start, end in self.times_data if (
            (end - start) > PPSD_LENGTH)]  # avoid very small gaps	otherwise very long to plot
        for start, end in aa:
            start = date2num(start.datetime)
            end = date2num(end.datetime)
            ax_coverage.axvspan(start, end, 0.7, 1, facecolor="g", lw=0)
        # plot gaps
        aa = [(start, end) for start, end in self.times_gaps if (
            (end - start) > PPSD_LENGTH)]  # avoid very small gaps	otherwise very long to plot
        for start, end in aa:
            start = date2num(start.datetime)
            end = date2num(end.datetime)
            ax_coverage.axvspan(start, end, 0.7, 1, facecolor="r", lw=0)
        # Compute uncovered periods
        starts_uncov = ends[:-1]
        ends_uncov = starts[1:]
        # Keep only major uncovered periods
        ga = (ends_uncov - starts_uncov) > (PPSD_LENGTH) / 86400
        starts_uncov = starts_uncov[ga]
        ends_uncov = ends_uncov[ga]

        ax_coverage.set_xlim(starttime.datetime, endtime.datetime)

        # labels
        ax_coverage.xaxis.set_ticks_position('top')
        ax_coverage.tick_params(direction='out')

        ax_coverage.xaxis.set_major_locator(mdates.AutoDateLocator())
        if Day_span > 5:
            ax_coverage.xaxis.set_major_formatter(DateFormatter('%D'))
        else:
            ax_coverage.xaxis.set_major_formatter(DateFormatter('%D-%Hh'))
            for label in ax_coverage.get_xticklabels():
                label.set_fontsize(10)


        for label in ax_coverage.get_xticklabels():
            label.set_ha("right")
            label.set_rotation(-25)

        ########################### SPECTROGRAM
        ax_spectrogram.xaxis_date()
        t = date2num([a.datetime for a in times_used])
        f = 1. / self.per_octaves
        T, F = np.meshgrid(t, f)
        spectro = ax_spectrogram.pcolormesh(
            T, F, transpose(psd), cmap=spectro_cmap)
        spectro.set_clim(*psd_db_limits)

        spectrogram_colorbar = colorbar(spectro, cax=ax_col_spectrogram,
                                        orientation='horizontal',
                                        ticks=linspace(psd_db_limits[0],
                                                       psd_db_limits[1], 5),
                                        format='%i')
        spectrogram_colorbar.set_label("dB")
        spectrogram_colorbar.set_clim(*psd_db_limits)
        spectrogram_colorbar.ax.xaxis.set_ticks_position('top')
        spectrogram_colorbar.ax.xaxis.label.set_position((1.1, .2))
        spectrogram_colorbar.ax.yaxis.label.set_horizontalalignment('left')
        spectrogram_colorbar.ax.yaxis.label.set_verticalalignment('bottom')

        ax_spectrogram.grid(which="major")
        ax_spectrogram.semilogy()
        ax_spectrogram.set_ylim(f_limits)
        ax_spectrogram.set_xlim(starttime.datetime, endtime.datetime)
        ax_spectrogram.set_xticks(ax_coverage.get_xticks())
        setp(ax_spectrogram.get_xticklabels(), visible=False)
        ax_spectrogram.yaxis.set_major_formatter(FormatStrFormatter("%.2f"))
        ax_spectrogram.set_ylabel('Frequency [Hz]')
        ax_spectrogram.yaxis.set_label_coords(-0.08, 0.5)

        ########################### SPECTROGRAM PER HOUR
        #psdH=array([array(psd[HourUsed==h,:]).mean(axis=0) for h in Hour])
        psdH = zeros((size(Hour), size(self.per_octaves)))
        for i, h in enumerate(Hour):
            a = array(psd[HourUsed == h, :])
            A = ma.masked_array(
                a, mask=~((a > psdh_db_limits[0]) & (a < psdh_db_limits[1])))
            psdH[i, :] = ma.getdata(A.mean(axis=0))
        psdH = array([psdH[:, i] - psdH[:, i].mean()
                      for i in arange(0, psdH.shape[1])])
        H24, F = np.meshgrid(Hour, f)
        spectroh = ax_spectrogramhour.pcolormesh(H24, F, psdH, cmap=cm.RdBu_r)
        spectroh.set_clim(-8, 8)

        spectrogram_per_hour_colorbar = colorbar(spectroh,
                                                 cax=ax_col_spectrogramhour,
                                                 orientation='horizontal',
                                                 ticks=linspace(-8, 8, 5),
                                                 format='%i')
        spectrogram_per_hour_colorbar.set_clim(-8, 8)

        ax_spectrogramhour.semilogy()
        ax_spectrogramhour.set_xlim((0, 23))
        ax_spectrogramhour.set_ylim(f_limits)
        ax_spectrogramhour.set_xticks(arange(0, 23, 4))
        ax_spectrogramhour.set_xticklabels(arange(0, 23, 4), visible=False)
        ax_spectrogramhour.yaxis.set_ticks_position('right')
        ax_spectrogramhour.yaxis.set_label_position('right')
        ax_spectrogramhour.yaxis.grid(True)
        ax_spectrogramhour.xaxis.grid(False)

        ########################### PSD BY PERIOD RANGE
        t = date2num([a.datetime for a in times_used])
        ax_freqpsd.xaxis_date()
        for pp in zip(per_left, per_right):
            mpsd = self._QC__get_psd(time_lim=(starttime, endtime), per_lim=pp)
            mpsdH = zeros(size(Hour)) + NaN
            for i, h in enumerate(Hour):
                a = array(mpsd[HourUsed == h])
                A = ma.masked_array(
                    a, mask=~((a > psdh_db_limits[0]) & (a < psdh_db_limits[1])))
                mpsdH[i] = ma.getdata(A.mean())
            ax_freqpsd.plot(t, mpsd)
            ax_freqpsdhour.plot(Hour, mpsdH - mpsdH.mean())
        ax_freqpsd.set_ylim(psd_db_limits)
        ax_freqpsd.set_xlim(starttime.datetime, endtime.datetime)
        ax_freqpsd.set_xticks(ax_coverage.get_xticks())
        setp(ax_freqpsd.get_xticklabels(), visible=False)
        ax_freqpsd.set_ylabel('Amplitude [dB]')
        ax_freqpsd.yaxis.set_label_coords(-0.08, 0.5)
        ax_freqpsd.yaxis.grid(False)
        ax_freqpsd.xaxis.grid(True)

        ########################### PSD BY PERIOD RANGE PER HOUR
        ax_freqpsdhour.set_xlim((0, 23))
        ax_freqpsdhour.set_ylim((-8, 8))
        ax_freqpsdhour.set_yticks(arange(-6, 7, 2))
        ax_freqpsdhour.set_xticks(arange(0, 23, 4))
        ax_freqpsdhour.set_xticklabels(arange(0, 23, 4), visible=False)
        ax_freqpsdhour.yaxis.set_ticks_position('right')
        ax_freqpsdhour.yaxis.set_label_position('right')

        ########################### SPIKES
        ax_spikes.xaxis_date()
        ax_spikes.bar(t, spikes, width=1. / 24)
        ax_spikes.set_ylim((0, 50))
        ax_spikes.set_xlim(starttime.datetime, endtime.datetime)
        ax_spikes.set_yticks(arange(10, 45, 10))
        ax_spikes.set_xticks(ax_coverage.get_xticks())
        #setp(ax_spikes.get_xticklabels(), visible=False)
        ax_spikes.set_ylabel("Detections [#/hour]")
        ax_spikes.yaxis.set_label_coords(-0.08, 0.5)
        ax_spikes.yaxis.grid(False)
        ax_spikes.xaxis.grid(True)

        # labels
        ax_spikes.xaxis.set_ticks_position('bottom')
        ax_spikes.tick_params(direction='out')
        ax_spikes.xaxis.set_major_locator(mdates.AutoDateLocator())
        if Day_span > 5:
            ax_spikes.xaxis.set_major_formatter(DateFormatter('%D'))
        else:
            ax_spikes.xaxis.set_major_formatter(DateFormatter('%D-%Hh'))
            for label in ax_spikes.get_xticklabels():
                label.set_fontsize(10)

        for label in ax_spikes.get_xticklabels():
            label.set_ha("right")
            label.set_rotation(25)

        ########################### SPIKES PER HOUR
        mspikesH = array([array(spikes[[HourUsed == h]]).mean() for h in Hour])
        ax_spikeshour.bar(Hour, mspikesH - mspikesH.mean(), width=1.)
        ax_spikeshour.set_xlim((0, 23))
        ax_spikeshour.set_ylim((-8, 8))
        ax_spikeshour.set_xticks(arange(0, 23, 4))
        ax_spikeshour.set_yticks(arange(-6, 7, 2))
        ax_spikeshour.set_ylabel("Daily variation")
        ax_spikeshour.set_xlabel("Hour [UTC]")
        ax_spikeshour.yaxis.set_ticks_position('right')
        ax_spikeshour.yaxis.set_label_position('right')
        ax_spikeshour.yaxis.set_label_coords(1.3, 1)

        ########################### plot gaps
        for start, end in zip(starts_uncov, ends_uncov):
            ax_spectrogram.axvspan(
                start, end, 0, 1, facecolor="w", lw=0, zorder=100)
            ax_freqpsd.axvspan(
                start, end, 0, 1, facecolor="w", lw=0, zorder=100)
            ax_spikes.axvspan(start, end, 0, 1,
                              facecolor="w", lw=0, zorder=100)

        # LEGEND
        leg = [str(xx) + '-' + str(yy) + ' s' for xx,
               yy in zip(per_left, per_right)]
        hleg = ax_freqpsd.legend(
            leg, loc=3, bbox_to_anchor=(-0.015, 0.75), ncol=size(leg))
        for txt in hleg.get_texts():
            txt.set_fontsize(8)

        # PPSD
        X, Y = np.meshgrid(self.xedges, self.yedges)
        ppsd = ax_ppsd.pcolormesh(X, Y, hist_stack.T, cmap=cmap)
        ppsd_colorbar = plt.colorbar(ppsd, ax=ax_ppsd)
        ppsd_colorbar.set_label("PPSD [%]")
        color_limits = (0, 30)
        ppsd.set_clim(*color_limits)
        ppsd_colorbar.set_clim(*color_limits)
        ax_ppsd.grid(b=grid, which="major")

        if show_percentiles:
            hist_cum = self.__get_normalized_cumulative_histogram(
                time_lim=(starttime, endtime))
            # for every period look up the approximate place of the percentiles
            for percentile in percentiles:
                periods, percentile_values = self.get_percentile(
                    percentile=percentile, hist_cum=hist_cum, time_lim=(starttime, endtime))
                ax_ppsd.plot(periods, percentile_values, color="black")

        # Noise models
        model_periods, high_noise = get_nhnm()
        ax_ppsd.plot(model_periods, high_noise, '0.4', linewidth=2)
        model_periods, low_noise = get_nlnm()
        ax_ppsd.plot(model_periods, low_noise, '0.4', linewidth=2)
        if show_class_models:
            classA_periods, classA_noise, classB_periods, classB_noise = get_class()
            ax_ppsd.plot(classA_periods, classA_noise, 'r--', linewidth=3)
            ax_ppsd.plot(classB_periods, classB_noise, 'g--', linewidth=3)

        ax_ppsd.semilogx()
        ax_ppsd.set_xlim(1. / f_limits[1], 1. / f_limits[0])
        ax_ppsd.set_ylim((-200, -80))
        ax_ppsd.set_xlabel('Period [s]')
        ax_ppsd.get_xaxis().set_label_coords(0.5, -0.05)
        ax_ppsd.set_ylabel('Amplitude [dB]')
        ax_ppsd.xaxis.set_major_formatter(FormatStrFormatter("%.2f"))

        # TITLE
        title = "%s   %s -- %s  (%i segments)"
        title = title % (self.id, starttime.date, endtime.date,
                         len(times_used))
        if title_comment:
            fig.text(0.82, 0.978, title_comment, bbox=dict(
                facecolor='red', alpha=0.5), fontsize=15)

        ax_ppsd.set_title(title)
        # a=str(UTCDateTime().format_iris_web_service())
        plt.draw()

        if filename is not None:
            plt.savefig(filename)
            plt.close()
        else:
            plt.show()
    def fit_exponential( 
            self, 
            tstart       = 0.0, 
            tend         = None, 
            guess        = dict( l0=5.0, a0=1.0, b=0.0 ), 
            num_exp      = None,
            verbose      = True,
            deconvolve   = False,
            fixed_params = [None], 
            curve_num=0 ):
        """
        fit a function of exponentials to a single curve of the file
        (my files only have one curve at this point anyway,
        curve 0). 
        The parameter num_exp (default is 1, max is 3) defines the number of
        exponentials in the funtion to be fitted.
        num_exp=1 yields:
        f(t) = a0*exp(-t/l0) + b
        where l0 is the lifetime and a0 and b are constants,
        and we fit over the range from tstart to tend.
        You don't have to pass this parameter anymore; just pass an initial guess and
        the number of parameters passed will determine the type of model used.
        
        If tend==None, we fit until the end of the curve.
        
        If num_exp > 1, you will need to modify the initial
        parameters for the fit (i.e. pass the method an explicit `guess`
        parameter) because the default has only three parameters
        but you will need two additional parameters for each additional
        exponential (another lifetime and another amplitude) to describe
        a multi-exponential fit. 
        For num_exp=2:
        f(t) = a1*exp(-t/l1) + a0*exp(-t/l0) + b
        
        and for num_exp=3:
        f(t) = a2*exp(-t/l2) + a1*exp(-t/l1) + a0*exp(-t/l0) + b
        
        verbose=True (default) results in printing of fitting results to terminal.
        
        """
        self.fitstart = tstart
        self.deconvolved = deconvolve
        tpulse = 1.0e9/self.curveheaders[0]['InpRate0'] # avg. time between pulses, in ns

        if num_exp is None:
            num_exp = (1 + int(guess.has_key('l1')) +
                     int(guess.has_key('l2')) +
                     int(guess.has_key('l3')) +
                     int(guess.has_key('l4')))
            num_a   = (1 + int(guess.has_key('a1')) +
                     int(guess.has_key('a2')) +
                     int(guess.has_key('a3')) +
                     int(guess.has_key('a4')))
            if num_exp != num_a:
                raise ValueError("Missing a parameter! Unequal number of lifetimes and amplitudes.")

        keylist = [ "l0", "a0", "b" ]
        errlist = [ "l0_err", "a0_err" ]
        if num_exp == 2:
            keylist = [ "l1", "a1", "l0", "a0", "b" ]
            errlist = [ "l1_err", "a1_err", "l0_err", "a0_err" ]
        elif num_exp == 3 and not guess.has_key('t_ag') and not guess.has_key('t_d3'):
            keylist = [ "l2", "a2", "l1", "a1", "l0", "a0", "b" ]
            errlist = [ "l2_err", "a2_err", "l1_err", "a1_err", "l0_err", "a0_err" ]
        elif num_exp == 3 and guess.has_key('t_ag'):
            keylist = [ "l2", "a2", "l1", "a1", "l0", "a0", "t_ag" ]
            errlist = [ "l2_err", "a2_err", "l1_err", "a1_err", "l0_err", "a0_err" ]
        elif num_exp == 3 and guess.has_key('t_d3'):
            keylist = [ "l2", "a2", "l1", "a1", "l0", "a0", "t_d3" ]
            errlist = [ "l2_err", "a2_err", "l1_err", "a1_err", "l0_err", "a0_err" ]
        elif num_exp == 4:
            keylist = [ "l3", "a3", "l2", "a2", "l1", "a1", "l0", "a0", "b" ]
            errlist = [ "l3_err", "a3_err", "l2_err", "a2_err", "l1_err", "a1_err", "l0_err", "a0_err" ]
        elif num_exp == 5:
            keylist = [ "l4", "a4", "l3", "a3", "l2", "a2", "l1", "a1", "l0", "a0", "b" ]
            errlist = [ "l4_err", "a4_err", "l3_err", "a3_err", "l2_err", "a2_err", "l1_err", "a1_err", "l0_err", "a0_err" ]
                    
                    
        if deconvolve==False:
            params = [ guess[key] for key in keylist ]
            free_params = [ i for i,key in enumerate(keylist) if not key in fixed_params ]
            initparams = [ guess[key] for key in keylist if not key in fixed_params ]
            def f(t, *args ):
                for i,arg in enumerate(args): params[ free_params[i] ] = arg
                local_params = params[:]
                b = local_params.pop(-1)
                result = pylab.zeros(len(t))
                for l,a in zip(params[::2],params[1::2]):
                    result += abs(a)*pylab.exp(-(t-tstart)/abs(l))
                return result+b

        else:
            raise NameError("Deconvolution with this module is not kept current. Use FastFit module from fit directory instead.")
            if self.irf == None: raise AttributeError("No detector trace!!! Use self.set_detector() method.")
            t0 = tstart
            tstart = 0.0
            keylist.append( "tshift" )
            params = [ guess[key] for key in keylist ]
            free_params = [ i for i,key in enumerate(keylist) if not key in fixed_params ]
            initparams = [ guess[key] for key in keylist if not key in fixed_params ]
            def f( t, *args ):
                for i,arg in enumerate(args): params[ free_params[i] ] = arg
                tshift = params[-1]
                ideal = fmodel( t, *args )
                irf = cspline1d_eval( self.irf_generator, t-tshift, dx=self.irf_dt, x0=self.irf_t0 )
                convoluted = pylab.real(pylab.ifft( pylab.fft(ideal)*pylab.fft(irf) )) # very small imaginary anyway
                return convoluted

            def fmodel( t, *args ):
                for i,arg in enumerate(args): params[ free_params[i] ] = arg
                local_params = params[:]
                tshift = local_params.pop(-1)
                if guess.has_key('t_ag'):
                    t_ag = abs(local_params.pop(-1))
                elif guess.has_key('t_d3'):
                    t_d3 = abs(local_params.pop(-1))
                elif guess.has_key('a_fix'):
                    scale = local_params.pop(-1)
                else:
                    b = local_params.pop(-1)
                    
                result = pylab.zeros(len(t))
                for l,a in zip(local_params[::2],local_params[1::2]):
                    if guess.has_key('t_ag'): l = 1.0/(1.0/l + 1.0/t_ag)
                    if guess.has_key('t_d3'): l *= t_d3
                    if guess.has_key('a_fix'): a *= scale
                    result += abs(a)*pylab.exp(-t/abs(l))/(1.0-pylab.exp(-tpulse/abs(l)))
                return result
                

        istart = pylab.find( self.t[curve_num] >= tstart )[0]
        if tend is not None:
            iend = pylab.find( self.t[curve_num] <= tend )[-1]
        else:
            iend = len(self.t[curve_num])

        # sigma (std dev.) is equal to sqrt of intensity, see
        # Lakowicz, principles of fluorescence spectroscopy (2006)
        # sigma gets inverted to find a weight for leastsq, so avoid zero
        # and imaginary weight doesn't make sense.
        trace_scaling = self.curves[0].max()/self.raw_curves[0].max()
        sigma = pylab.sqrt(self.raw_curves[curve_num][istart:iend]*trace_scaling) # use raw curves for actual noise, scale properly
        self.bestparams, self.pcov = curve_fit( f, self.t[curve_num][istart:iend],
                                        self.curves[curve_num][istart:iend],
                                        p0=initparams,
                                        sigma=sigma)

        if pylab.size(self.pcov) > 1 and len(pylab.find(self.pcov == pylab.inf))==0:
            self.stderr = pylab.sqrt( pylab.diag(self.pcov) ) # is this true?
        else:
            self.stderr = [pylab.inf]*len(guess)
            
        stderr = [numpy.NaN]*len(params)
        for i,p in enumerate(self.bestparams):
            params[ free_params[i] ] = p
            stderr[ free_params[i] ] = self.stderr[i]
        self.stderr = stderr

        self.fitresults = dict()
        keys = keylist[:]
        stderr = stderr[:]
        p = params[:]
        if deconvolve:
            tshift = p.pop(-1)
            self.fitresults['tshift'] = tshift
            tshift_err = stderr.pop(-1)
            self.fitresults['tshift_err'] = tshift_err
            keys.pop(-1)
            self.fitresults['irf_dispersion'] = self.irf_dispersion

        b = p.pop(-1)
        self.fitresults['b'] = b
        b_err = stderr.pop(-1)
        self.fitresults['b_err'] = b_err
        keys.pop(-1)
        self.lifetime = [ abs(l) for l in p[::2] ]
        for l,a,lkey,akey in zip(p[::2],p[1::2],keys[::2],keys[1::2]):
            if guess.has_key('t_ag'): l = 1.0/(1.0/l + 1.0/b)
            if guess.has_key('t_d3'): l *= b
            if guess.has_key('a_fix'): a *= b
            self.fitresults[lkey] = abs(l)
            self.fitresults[akey] = abs(a)
        for l,a,lkey,akey in zip(stderr[::2],stderr[1::2],errlist[::2],errlist[1::2]):
            self.fitresults[lkey] = l
            self.fitresults[akey] = a
        self.fitresults['l0_int'] = self.fitresults['l0']*self.fitresults['a0']

        if num_exp > 1: self.fitresults['l1_int'] = self.fitresults['l1']*self.fitresults['a1']
        if num_exp > 2: self.fitresults['l2_int'] = self.fitresults['l2']*self.fitresults['a2']
        if num_exp > 3: self.fitresults['l3_int'] = self.fitresults['l3']*self.fitresults['a3']
        if num_exp > 4: self.fitresults['l4_int'] = self.fitresults['l4']*self.fitresults['a4']

        self.bestfit = f( self.t[curve_num][istart:iend], *self.bestparams )
        if deconvolve: self.model = fmodel( self.t[curve_num][istart:iend], *self.bestparams )
        
        Chi2 = pylab.sum( (self.bestfit - self.curves[0][istart:iend])**2 / sigma**2 )
        #Chi2 *= self.raw_curves[0].max()/self.curves[0].max() # undo any scaling
        mean_squares = pylab.mean( (self.bestfit - self.curves[0][istart:iend])**2 )
        degrees_of_freedom = len(self.bestfit) - len(free_params)
        self.fitresults['MSE'] = mean_squares/degrees_of_freedom
        self.fitresults['ReducedChi2'] = Chi2/degrees_of_freedom

        if verbose:
            print "Fit results: (Reduced Chi2 = %.3E)" % (self.fitresults['ReducedChi2'])
            print "             (MSE = %.3E)" % (self.fitresults['MSE'])
            print "  Offset/t_ag/scale = %.3f +-%.3e" % (self.fitresults['b'], self.fitresults['b_err'])
            print "  l0=%.3f +-%.3f ns, a0=%.3e +-%.3e" % (self.fitresults['l0'],
                                                            self.fitresults['l0_err'],
                                                            self.fitresults['a0'],
                                                            self.fitresults['a0_err'])
            if num_exp > 1:
                print "  l1=%.3f +-%.3f ns, a1=%.3e +-%.3e" % (self.fitresults['l1'],
                                                            self.fitresults['l1_err'],
                                                            self.fitresults['a1'],
                                                            self.fitresults['a1_err'])
            if num_exp > 2:
                print "  l2=%.3f +-%.3f ns, a2=%.3e +-%.3e" % (self.fitresults['l2'],
                                                            self.fitresults['l2_err'],
                                                            self.fitresults['a2'],
                                                            self.fitresults['a2_err'])
            if num_exp > 3:
                print "  l3=%.3f +-%.3f ns, a3=%.3e +-%.3e" % (self.fitresults['l3'],
                                                            self.fitresults['l3_err'],
                                                            self.fitresults['a3'],
                                                            self.fitresults['a3_err'])
            if num_exp > 4:
                print "  l4=%.3f +-%.3f ns, a4=%.3e +-%.3e" % (self.fitresults['l4'],
                                                            self.fitresults['l4_err'],
                                                            self.fitresults['a4'],
                                                            self.fitresults['a4_err'])
            print " "

        self.has_fit = True
Esempio n. 49
0
def plot_data(plot_var, pl, pr, fl, fr, q, time, location, i_lo_cut, i_up_cut):
    global first_run, e_small, i_plot, par_plot_color, par_plot_linestyle, s, clean_plot
    global plot_p_min, plot_p_max, plot_var_min, plot_var_max, use_color_list, i_plot, handle_list, tightened, highlighted, plotted_init_slope

    f_lo_cut = fl[0]
    f_up_cut = fr[-1]
    p_lo_cut = pl[0]
    p_up_cut = pr[-1]

    if plot_var == 'f':
        plot_var_l = fl
        plot_var_r = fr
        plot_var_lo_cut = f_lo_cut
        plot_var_up_cut = f_up_cut
    elif plot_var == 'n':
        plot_var_l = 4 * pi * fl * pl**2
        plot_var_r = 4 * pi * fr * pr**2
        plot_var_lo_cut = 4 * pi * f_lo_cut * p_lo_cut**2
        plot_var_up_cut = 4 * pi * f_up_cut * p_up_cut**2
    elif plot_var == 'e':
        plot_var_l = 4 * pi * c**2 * fl * pl**3
        plot_var_r = 4 * pi * c**2 * fr * pr**3
        plot_var_lo_cut = 4 * pi * c**2 * f_lo_cut * p_lo_cut**3
        plot_var_up_cut = 4 * pi * c**2 * f_up_cut * p_up_cut**3
    if (first_run):
        s = plt.subplot(122)

    if clean_plot:
        s.cla()

    s.set_xscale('log')
    s.set_yscale('log')

    plt.xlabel('$p/m_e c$', labelpad=0.2, fontsize=fontsize_axlabels)
    plt.ylabel('d$' + plot_var + ' / $d$p$',
               fontsize=fontsize_axlabels,
               labelpad=-0.)
    plt.tick_params(axis='both', which='major', labelsize=fontsize_axlabels)

    if first_run:
        plot_p_min = p_lo_cut
        plot_p_max = p_up_cut

        handle_list = []

        if (plot_var == "e"):
            plot_var_min = 0.1 * e_small
        elif (plot_var == "f"):
            plot_var_min = 0.1 * e_small / (4 * pi * (c**2) * p_max_fix**3)
        elif (plot_var == "n"):
            plot_var_min = 0.1 * e_small / (c * p_max_fix)

    if par_fixed_dims:  # overwrite
        if (plot_var != "e"):
            plt.ylim(9.5e-13, 1.e-3)
            plt.xlim(p_fix[1], p_fix[-2] * 0.5)
        else:
            plt.ylim(e_small, 1.e-2)
            plt.xlim(p_fix[1], p_fix[-2] * 0.5)

    if (par_plot_e3):
        plt.ylim(10. * plot_var_min,
                 10. * max(plot_var_r) * max(pr)**3)  # override

    if (par_vis_all_borders):
        plt.grid()
    else:
        s.spines['top'].set_visible(False)
        s.spines['right'].set_visible(False)
        s.spines['bottom'].set_linewidth(1.5)
        s.spines['left'].set_linewidth(1.5)

    if (par_visible_gridy):
        plt.grid(True, 'major', 'y', ls='--', lw=.5, c='k', alpha=.3)
    if (par_visible_gridx):
        plt.grid(True, 'major', 'x', ls='--', lw=.5, c='k', alpha=.3)

# plot floor value
    p_range = linspace(s.get_xlim()[0], s.get_xlim()[1])
    e_smalls = zeros(len(p_range))
    e_smalls[:] = e_small
    if (plot_var == "e"):
        plt.plot(p_range, e_smalls, color="xkcd:azure", label="$e_{small}$")
    elif (plot_var == "n"):
        plt.plot(p_range,
                 e_small / (c * p_range),
                 color="xkcd:azure",
                 label="$n_{small}$")

    par_plot_color = set_plot_color(par_plot_color, i_plot, colors)
    # par_plot_linestyle = set_plot_color(par_plot_linestyle, i_plot, linestyles)      ### WARNING temporary trick

    spectrum_label = ("d$%s$(p)/d$p$ %s, \n[%3.1f, %3.1f, %3.1f] kpc " %
                      (plot_var, par_test_name, location[0] / 1000.,
                       location[1] / 1000., location[2] / 1000.))
    spectrum_label = ("d$%s$(p)/d$p$ [%3.1f, %3.1f, %3.1f] kpc " %
                      (plot_var, location[0] / 1000., location[1] / 1000.,
                       location[2] / 1000.))
    # spectrum_label  = ("d$%s$/d$p$, %s (  )" % (plot_var, par_test_name) ) #
    # spectrum_label  = (" %s (z=%3.1fkpc)" % ( par_test_name , location[2]/1000.) )

    for i in range(0, size(fr)):
        if (par_plot_e3):  # multiply times gamma**3
            plt.plot([pl[i], pr[i]], [(pl[i]**3) * plot_var_l[i],
                                      (pr[i]**3) * plot_var_r[i]],
                     lw=par_plot_width,
                     color=par_plot_color,
                     alpha=par_alpha)
            plt.plot([pl[i], pl[i]],
                     [plot_var_min, (pl[i]**3) * plot_var_l[i]],
                     lw=par_plot_width,
                     color=par_plot_color,
                     alpha=par_alpha)
            plt.plot([pr[i], pr[i]],
                     [plot_var_min, (pr[i]**3) * plot_var_r[i]],
                     lw=par_plot_width,
                     color=par_plot_color,
                     alpha=par_alpha)
        else:
            plt.plot([pl[i], pr[i]], [plot_var_l[i], plot_var_r[i]],
                     lw=2 * par_plot_width,
                     solid_capstyle='round',
                     color=par_plot_color,
                     alpha=par_alpha,
                     linestyle=par_plot_linestyle)
            plt.plot([pl[i], pl[i]], [plot_var_r[i - 1], plot_var_l[i]],
                     lw=2 * par_plot_width,
                     solid_capstyle='round',
                     color=par_plot_color,
                     alpha=par_alpha,
                     linestyle=par_plot_linestyle)
            plt.plot([pl[i], pl[i]], [plot_var_min, plot_var_l[i]],
                     lw=par_plot_width,
                     solid_capstyle='round',
                     color="xkcd:gray",
                     alpha=par_alpha * 0.2)
            plt.plot([pr[i], pr[i]], [plot_var_min, plot_var_r[i]],
                     lw=par_plot_width,
                     solid_capstyle='round',
                     color="xkcd:gray",
                     alpha=par_alpha * 0.2)
    if (not par_plot_e3):
        plt.plot([pr[size(fr) - 1], pr[size(fr) - 1]],
                 [plot_var_r[size(fr) - 1], plot_var_min],
                 lw=2 * par_plot_width,
                 solid_capstyle='round',
                 color=par_plot_color,
                 alpha=par_alpha)  # rightmost edge
    spectrum = mlines.Line2D([], [],
                             color=par_plot_color,
                             solid_capstyle='round',
                             lw=par_plot_width,
                             alpha=par_alpha,
                             linestyle=par_plot_linestyle,
                             label=spectrum_label)

    if (not highlighted):
        if (len(highlight_bins) > 0):
            par_plot_color = set_plot_color(par_plot_color, i_plot,
                                            xkcd_colorsh)
            for ind in highlight_bins:
                i = ind
                i1 = i + 1
                plt.fill([p_fix[i], p_fix[i1], p_fix[i1], p_fix[i]],
                         [e_small, e_small, 10., 10.],
                         color="mediumseagreen",
                         alpha=0.20)
            if (not (clean_plot is True)):
                highlighted = True

    if ((par_plot_init_slope is True) and (plotted_init_slope is False)):
        if (plot_var == 'n'):
            init_spec = plt.plot(p_range, (1.0 + 2.e-1) * f_init * 4 * pi *
                                 p_range**(-(q_init - 2)),
                                 color='gray',
                                 linestyle=":",
                                 alpha=0.75,
                                 label=r"d$n(p,t)$/d$p$, $E<1/bt$",
                                 lw=3)  # initial spectrum
        if (plot_var == 'e'):
            init_spec = plt.plot(p_range, (1.0 + 2.e-1) * f_init * 4 * pi *
                                 p_range**(-(q_init - 3)),
                                 color='gray',
                                 linestyle=":",
                                 alpha=0.45,
                                 label=r"d$e(p,t)$/d$p$, $E<1/bt$",
                                 lw=3)  # initial spectrum
        if (not (clean_plot is True)):
            plotted_init_slope = True  # if cleaning plot is on, init slope must be replotted each iteration

    if (par_visible_title):
        if (par_simple_title):
            plt.title("Spectrum of %s(p), Time = %7.3f" % (plot_var, time))
        else:
            plt.title(
                "Spectrum of %s(p) \n Time = %7.3f | location: %7.2f %7.2f %7.2f "
                % (plot_var, time, location[0], location[1], location[2]))
    if (tightened is not True):
        plt.tight_layout()
        tightened = True

    if (par_plot_legend):
        handle_list.append(spectrum)
        plt.legend(handles=handle_list,
                   loc=default_legend_loc
                   if par_legend_loc == (-2, -2) else par_legend_loc,
                   edgecolor="gray",
                   facecolor="white",
                   framealpha=0.65,
                   fontsize=fontsize_legend)

    if (clean_plot):
        handle_list = []

    if (first_run is True):
        first_run = False

    if (hide_axes is True):
        s.axis(
            'off'
        )  # allows one to hide all axes for the plot, useful for combining mulitple plots.

    return s
Esempio n. 50
0
            varTrain,
            varForecast,
            forecastVar,
            forecastMonth,
            predMonth,
            startYr,
            numYrsRequired,
            region,
            hemStr,
            iceType,
            normalize=0,
            outWeights=outWeights,
            weight=weightBool)
        predForecastData = [1]
        predForecastData.append(predVarForecast)
        predTrainData = np.ones((size(yrsTrain)))
        predTrainData = np.column_stack((predTrainData, array(predVarTrain)))

        model = sm.OLS(extentDetrendTrain, predTrainData)
        fit = model.fit()
        extentDetrendForecast = fit.predict(predForecastData)[0]

        extentTrendPersist = (lineTrain[-1] + (lineTrain[-1] - lineTrain[-2]))
        extentForrAbs = extentDetrendForecast + extentTrendPersist

        if (weightBool == 0):
            worksheet.write(row, col + 3, extentForrAbs)
        else:
            worksheet.write(row, col + 4, extentForrAbs)

    weightBool = 0
Esempio n. 51
0
data = pl.loadtxt("thread_info.dat")

nthread = int(max(data[:,0])) + 1
print "Number of threads:", nthread

tasks = {}
tasks[-1] = []
for i in range(nthread):
    tasks[i] = []


start_t = min(data[:,4])
end_t = max(data[:,5])
data[:,4] -= start_t
data[:,5] -= start_t
num_lines = pl.size(data) / 8
for line in range(num_lines):
    thread = int(data[line,0])
    tasks[thread].append({})
    tasks[thread][-1]["type"] = types[ str(int(data[line,1])) ]
    tasks[thread][-1]["subtype"] = subtypes[str(int(data[line,2]))]
    tasks[thread][-1]["tic"] = int(data[line,4]) / CPU_CLOCK * 1000
    tasks[thread][-1]["toc"] = int(data[line,5]) / CPU_CLOCK * 1000
    tasks[thread][-1]["t"] = (tasks[thread][-1]["toc"]+tasks[thread][-1]["tic"]) / 2

combtasks = {}
combtasks[-1] = []
for i in range(nthread):
    combtasks[i] = []

for thread in range(nthread):
Esempio n. 52
0
File: view.py Progetto: jwblin/qtcm
from matplotlib.toolkits.basemap import Basemap
import os
import Scientific.IO.NetCDF as S


#--- Get data (assuming lon goes from 0 to 360):  Make arrays cyclic
#    in longitude:

time_idx = 5   #@@@ USER ADJUSTABLE

f = S.NetCDFFile('qm_seasonal_1yr.nc', mode='r')
lat = f.variables['lat'].getValue()
lon = f.variables['lon'].getValue()
u1_all = f.variables['u1'].getValue()
if p.allclose(lon[0], 0):
    u1 = p.zeros( (p.size(lat), p.size(lon)+1) )
    u1[:,0:-1] = u1_all[time_idx,:,:]
    u1[:,-1] = u1_all[time_idx,:,0]

tmp = copy.copy(lon)
lon = p.zeros((p.size(tmp)+1,))
lon[0:-1] = tmp[:]
lon[-1] = tmp[0]+360
del tmp

f.close()


#--- Mapping information:

map = Basemap( projection='cyl', resolution='l'
Esempio n. 53
0
def plot_phases(in_file,*arguments):
    flags = ['histogram','phases']
    plot_flag = 0
    log_flag = 0
    def no_log(x):
        return x

    fig = pylab.figure(1)
    ax = fig.add_subplot(111)
n
    try:
        img = spimage.sp_image_read(in_file,0)
    except:
        print "Error when reading %s.\n" % in_file
        return
    values = img.image.reshape(pylab.size(img.image))

    for flag in arguments:
        if flag in flags:
            plot_flag = flag
        elif flag in log_flags:
            log_flag = flag
        else:
            print "unknown flag %s" % flag

    if log_flag == 'log':
        log_function = pylab.log
    else:
        log_function = no_log

    if plot_flag == 'phases':
shape_data_ss = (pl.shape(data_ecog)[0], pl.shape(data_ecog)[1]/int(f_sampling/f_subsample))
data_ecog_lp_ss = pl.memmap(os.path.join(memap_folder, 'data_ecog_lp_ss.dat'), dtype='int16', mode='w+', shape=shape_data_ss)
for i in pl.arange(0, pl.shape(data_ecog)[0]):
    data_ecog_lp_ss[i,:] = signal.decimate(filters.low_pass_filter(data_ecog[i,:], Fsampling=f_sampling, Fcutoff=f_lp_cutoff), int(f_sampling/f_subsample))
    data_ecog_lp_ss.flush()
    print(i)
pl.save(os.path.join(memap_folder, 'data_ecog_lp_ss.npy'), data_ecog_lp_ss)



spike_samples = tf.spikedetect(data_probe_hp, threshold_multiplier=6.5, bad_channels=probe_bad_channels)
pl.save(os.path.join(memap_folder, 'spike_samples.npy'), spike_samples)


spike_samples_clean = spike_samples
for i in pl.arange(pl.size(spike_samples_clean)-1,-1,-1):
    data = data_probe_hp[:, spike_samples[i]-60:spike_samples[i]+60]
    stdevs = sp.std(data,1)
    if np.max(data) > 3000 or pl.any(stdevs>600):
        spike_samples_clean = pl.delete(spike_samples_clean, i)
    if i%100==0:
        print(i)
spike_samples_clean = pl.delete(spike_samples_clean, 0)
pl.save(os.path.join(memap_folder, 'spike_samples_clean.npy'), spike_samples_clean)

channels = np.empty(0)
for i in pl.arange(0, pl.size(spike_samples_clean)):
    data = np.array(data_probe_hp[:, spike_samples_clean[i]].tolist())
    channels = np.append(channels, np.argmax(data))
    if i%100==0:
        print(i)
Esempio n. 55
0
def diff_xy(nc1,nc2,params,tms,lev=None, v1=None, v2=None):
    """
    DIFF plot crta
    RADI ZA WRF, WRFChem
    nc1,nc2 - nc file
    params='U10' - za sad radi samo sa jednim parametrom (problem kod returna)
    tms- vremena u kojem crtamo file [0,10,20...]    
    lev level na kojoj visini uzimamo
    v1,v2 - color range
    npr
    
    from collections import OrderedDict
    plot_xy(nc,OrderedDict([('T2','pcolor'),('UV10','quiver')]),2,1)

    """
    
    import matplotlib.pyplot as plt
    import ggWRFutils as gW
    from datetime import datetime
    import numpy as np   
    from pylab import size
    if size(params)>1:
        wvar1={}
        for p in params:
            if p=='WS10':
                wvar1[p]=np.sqrt(nc1.variables['U10'][:]**2+nc1.variables['U10'][:]**2)
            elif p=='UV10': 
                wvar1['U10']=nc1.variables['U10'][:,:,:]    
                wvar1['V10']=nc1.variables['V10'][:,:,:]    
            elif p=='UV':
                wvar1['U']=nc1.variables['U'][:,lev,:,:]     
                wvar1['V']=nc1.variables['V'][:,lev,:,:]     
            elif len(nc1.variables[p].shape) > 3:
                wvar1[p]=nc1.variables[p][:,lev,:,:]     
            else:                
                wvar1[p]=nc1.variables[p][:]  
        wvar2={}
        for p in params:
            if p=='WS10':
                wvar2[p]=np.sqrt(nc2.variables['U10'][:]**2+nc2.variables['U10'][:]**2)
            elif p=='UV10': 
                wvar2['U10']=nc2.variables['U10'][:,:,:]    
                wvar2['V10']=nc2.variables['V10'][:,:,:]    
            elif p=='UV':
                wvar2['U']=nc2.variables['U'][:,lev,:,:]     
                wvar2['V']=nc2.variables['V'][:,lev,:,:]     
            elif len(nc2.variables[p].shape) > 3:
                wvar2[p]=nc2.variables[p][:,lev,:,:]     
            else:                
                wvar2[p]=nc2.variables[p][:]  
    elif size(params)==1:
        p=params                
        wvar1={}
        if p=='WS10':
            wvar1[p]=np.sqrt(nc1.variables['U10'][:]**2+nc1.variables['U10'][:]**2)
        elif p=='UV10': 
            wvar1['U10']=nc1.variables['U10'][:,:,:]    
            wvar1['V10']=nc1.variables['V10'][:,:,:]    
        elif p=='UV':
            wvar1['U']=nc1.variables['U'][:,lev,:,:]     
            wvar1['V']=nc1.variables['V'][:,lev,:,:]     
        elif len(nc1.variables[p].shape) > 3:
            wvar1[p]=nc1.variables[p][:,lev,:,:]     
        else:                
            wvar1[p]=nc1.variables[p][:]  
        wvar2={}
        if p=='WS10':
            wvar2[p]=np.sqrt(nc2.variables['U10'][:]**2+nc2.variables['U10'][:]**2)
        elif p=='UV10': 
            wvar2['U10']=nc2.variables['U10'][:,:,:]    
            wvar2['V10']=nc2.variables['V10'][:,:,:]    
        elif p=='UV':
            wvar2['U']=nc2.variables['U'][:,lev,:,:]     
            wvar2['V']=nc2.variables['V'][:,lev,:,:]     
        elif len(nc2.variables[p].shape) > 3:
            wvar2[p]=nc2.variables[p][:,lev,:,:]     
        else:                
            wvar2[p]=nc2.variables[p][:]  
       
    Nx,Ny,Nz,lon,lat,dx,dy=gW.getDimensions(nc1)
#    fig_out=[]
#    for p in params:    
#        varIN=wvar1[p][tms,:,:] - wvar2[p][tms,:,:]      
#        fig=plt.figure()          
#        plt.pcolor(lon,lat,varIN, cmap='RdBu',vmin=varIN.min(),vmax=varIN.max(), shading='flat')
#        plt.colorbar()
#        plt.xlim(lon.min(),lon.max())
#        plt.ylim(lat.min(),lat.max())
#        fig_out.append(fig)
    varIN=wvar1[p][tms,:,:] - wvar2[p][tms,:,:]  
    if v1==None:
        plt.pcolor(lon,lat,varIN, cmap='RdBu',vmin=varIN.min(),vmax=varIN.max(), shading='flat')
    else:
        plt.pcolor(lon,lat,varIN, cmap='RdBu',vmin=v1,vmax=v2, shading='flat')
    plt.colorbar()
    plt.xlim(lon.min(),lon.max())
    plt.ylim(lat.min(),lat.max())
    fig_out=plt.gcf()
    return fig_out
Esempio n. 56
0
def stereoplot(strike, dip, filename):    
    # Here is the stereonet plotting section
    
    bigr = 1.2
    phid = pylab.arange(2, 90, 2)  # Angular range for
    phir = phid * pylab.pi / 180
    omegad = 90 - phid 
    omegar = pylab.pi / 2 - phir
    
    # Set up for plotting great circles with centers along
    # positive x-axis
    
    x1 = bigr * pylab.tan(phir)
    y1 = pylab.zeros(pylab.size(x1))
    r1 = bigr / pylab.cos(phir)
    theta1ad = (180 - 80) * pylab.ones(pylab.size(x1))
    theta1ar = theta1ad * pylab.pi / 180
    theta1bd = (180 + 80) * pylab.ones(pylab.size(x1))
    theta1br = theta1bd * pylab.pi / 180
    
    # Set up for plotting great circles 
    # with centers along the negative x-axis
    x2 = -1 * x1
    y2 = y1
    r2 = r1
    theta2ad = -80 * pylab.ones(pylab.size(x2))
    theta2ar = theta2ad * pylab.pi / 180
    theta2bd = 80 * pylab.ones(pylab.size(x2))
    theta2br = theta2bd * pylab.pi / 180
    
    
    # Set up for plotting small circles
    # with centers along the positive y-axis
    y3 = bigr / pylab.sin(omegar)
    x3 = pylab.zeros(pylab.size(y3))
    r3 = bigr / pylab.tan(omegar)
    theta3ad = 3 * 90 - omegad
    theta3ar = 3 * pylab.pi / 2 - omegar
    theta3bd = 3 * 90 + omegad
    theta3br = 3 * pylab.pi / 2 + omegar
    
    # Set up for plotting small circles
    # with centers along the negative y-axis
    y4 = -1 * y3
    x4 = x3
    r4 = r3
    
    theta4ad = 90 - omegad
    theta4ar = pylab.pi / 2 - omegar
    theta4bd = 90 + omegad
    theta4br = pylab.pi / 2 + omegar
    
    
    # Group all x, y, r, and theta information for great cricles 
    phi = pylab.append(phid, phid, 0)
    x = pylab.append(x1, x2, 0)
    y = pylab.append(y1, y2, 0)
    r = pylab.append(r1, r2)
    
    thetaad = pylab.append(theta1ad, theta2ad, 0)
    thetaar = pylab.append(theta1ar, theta2ar, 0)
    thetabd = pylab.append(theta1bd, theta2bd, 0)
    thetabr = pylab.append(theta1br, theta2br, 0)
    
    # Plot portions of all great circles that lie inside the
    # primitive circle, with thick lines (1 pt.) at 10 degree increments
    
    for i in range(0, len(x)):
        thd = pylab.arange(thetaad[i], thetabd[i] + 1, 1)
        thr = pylab.arange(thetaar[i], thetabr[i] + pylab.pi / 180, pylab.pi / 180)
        xunit = x[i] + r[i] * pylab.cos(pylab.radians(thd))
        yunit = y[i] + r[i] * pylab.sin(pylab.radians(thd))
        # p = pylab.plot(xunit,yunit,'b',lw=.5) #commented out to remove small verticle lines
        pylab.hold(True)   
    
    
    # Now "blank out" the portions of the great circle cyclographic traces 
    # within 10 degrees of the poles of the primitive circle.
    rr = bigr / pylab.tan(80 * pylab.pi / 180)
    ang1 = pylab.arange(0, pylab.pi + pylab.pi / 180, pylab.pi / 180)
    xx = pylab.zeros(pylab.size(ang1)) + rr * pylab.cos(ang1)
    yy = bigr / pylab.cos(10 * pylab.pi / 180) * pylab.ones(pylab.size(ang1)) - rr * pylab.sin(ang1)
    p = pylab.fill(xx, yy, 'w')
    yy = -bigr / pylab.cos(10 * pylab.pi / 180) * pylab.ones(pylab.size(ang1)) + rr * pylab.sin(ang1)
    p = pylab.fill(xx, yy, 'w')
    
    for i in range(1, len(x)):
        thd = pylab.arange(thetaad[i], thetabd[i] + 1, 1)
        thr = pylab.arange(thetaar[i], thetabr[i] + pylab.pi / 180, pylab.pi / 180)
        xunit = x[i] + r[i] * pylab.cos(pylab.radians(thd))
        yunit = y[i] + r[i] * pylab.sin(pylab.radians(thd))
        
        if pylab.mod(phi[i], 10) == 0:
            p = pylab.plot(xunit, yunit, 'b', lw=1)
            angg = thetaad[i]
        pylab.hold(True)
    
    
    # Now "blank out" the portions of the great circle cyclographic traces 
    # within 2 degrees of the poles of the primitive circle.
    rr = bigr / pylab.tan(88 * pylab.pi / 180)
    ang1 = pylab.arange(0, pylab.pi + pylab.pi / 180, pylab.pi / 180)
    xx = pylab.zeros(pylab.size(ang1)) + rr * pylab.cos(ang1)
    yy = bigr / pylab.cos(2 * pylab.pi / 180) * pylab.ones(pylab.size(ang1)) - rr * pylab.sin(ang1)
    
    p = pylab.fill(xx, yy, 'w')
    yy = -bigr / pylab.cos(2 * pylab.pi / 180) * pylab.ones(pylab.size(ang1)) + rr * pylab.sin(ang1)
    p = pylab.fill(xx, yy, 'w')
    
    
    # Group all x, y, r, and theta information for small circles
    phi = pylab.append(phid, phid, 0)
    x = pylab.append(x3, x4, 0)
    y = pylab.append(y3, y4, 0)
    r = pylab.append(r3, r4)
    
    thetaad = pylab.append(theta3ad, theta4ad, 0)
    thetaar = pylab.append(theta3ar, theta4ar, 0)
    thetabd = pylab.append(theta3bd, theta4bd, 0)
    thetabr = pylab.append(theta3br, theta4br, 0)
    
    # Plot primitive circle
    thd = pylab.arange(0, 360 + 1, 1)
    thr = pylab.arange(0, 2 * pylab.pi + pylab.pi / 180, pylab.pi / 180)
    xunit = bigr * pylab.cos(pylab.radians(thd))
    
    yunit = bigr * pylab.sin(pylab.radians(thd))
    p = pylab.plot(xunit, yunit)
    pylab.hold(True)
        
    # Plot portions of all small circles that lie inside the
    # primitive circle, with thick lines (1 pt.) at 10 degree increments
    
    for i in range(0, len(x)):
        thd = pylab.arange(thetaad[i], thetabd[i] + 1, 1)
        thr = pylab.arange(thetaar[i], thetabr[i] + pylab.pi / 180, pylab.pi / 180)
        xunit = x[i] + r[i] * pylab.cos(pylab.radians(thd))
        yunit = y[i] + r[i] * pylab.sin(pylab.radians(thd))
        blug = pylab.mod(thetaad[i], 10)
        if pylab.mod(phi[i], 10) == 0:
            p = pylab.plot(xunit, yunit, 'b', lw=1)
            angg = thetaad[i]
        # else: #Commented out to remove the small horizontal lines
            # p = pylab.plot(xunit,yunit,'b',lw=0.5)
        pylab.hold(True)
    
    # Draw thick north-south and east-west diameters
    xunit = [-bigr, bigr]
    yunit = [0, 0]
    p = pylab.plot(xunit, yunit, 'b', lw=1)
    pylab.hold(True)
    xunit = [0, 0]
    yunit = [-bigr, bigr]
    p = pylab.plot(xunit, yunit, 'b', lw=1)
    pylab.hold(True)
    
    '''
    This is the plotting part'''
    
   
    trend1 = strike
    plunge1 = pylab.absolute(dip)
    # num = leng(lines1(:,1));
    trendr1 = [foo * pylab.pi / 180 for foo in trend1]
    
    plunger1 = [foo * pylab.pi / 180 for foo in plunge1]
    rho1 = [bigr * pylab.tan(pylab.pi / 4 - ((foo) / 2)) for foo in plunger1]
        # polarb plots ccl from 3:00, so convert to cl from 12:00
    # pylab.polar(pylab.pi/2-trendr1,rho1,'o')
    pylab.plot(9000, 90000, 'o', markerfacecolor="b", label='Positive Dip')
    pylab.plot(9000, 90000, 'o', markerfacecolor="w", label='Negative Dip')      
    pylab.legend(loc=1) 
    for n in range(0, len(strike)):
        if dip[n] > 0:
            pylab.plot(rho1[n] * pylab.cos(pylab.pi / 2 - trendr1[n]), rho1[n] * pylab.sin(pylab.pi / 2 - trendr1[n]), 'o', markerfacecolor="b", label='Positive Dip')
        else:
            pylab.plot(rho1[n] * pylab.cos(pylab.pi / 2 - trendr1[n]), rho1[n] * pylab.sin(pylab.pi / 2 - trendr1[n]), 'o', markerfacecolor="w", label='Negative Dip')
         
    '''above is self'''
    pylab.axis([-bigr, bigr, -bigr, bigr])
def device_raw_data_loading(**kwarg):

    # Extraction of inputs
    device_data_set_address = kwarg["device_data_set_address"]
    zero_conversion_threshold = array(kwarg["zero_conversion_threshold"])
    number_of_subregions = int(array(kwarg["number_of_subregions"]))
    number_of_symbols_per_preamble = array(
        kwarg["number_of_symbols_per_preamble"])
    number_of_chips_per_subregion = array(
        kwarg["number_of_chips_per_subregion"])
    time_length_of_a_single_chip_in_second = array(
        kwarg["time_length_of_a_single_chip_in_second"])
    sampling_frequency = array(kwarg["sampling_frequency"])
    communication_frequency = array(kwarg["communication_frequency"])
    characteristics_extractor_methods = kwarg[
        "characteristics_extractor_methods"]
    project_name = kwarg["project_name"]

    # Extracting the Address of all Records in the 'DataSet Folder' for a Single Device
    list_of_records = os.listdir(device_data_set_address)

    # Extracting of Essential Parameters
    number_of_subregions_per_preamble = int(number_of_subregions /
                                            number_of_symbols_per_preamble)

    # extraction of modules
    selected_methods = importer_methods_manager(
        project_name, characteristics_extractor_methods)

    # Extracting all Records of Current Device
    overall_burst_index = 0
    vertical_hashmap_of_all_bursts = {}
    for records_Index in [0, 1]:  #range(len(list_of_records)):
        name_of_current_record = list_of_records[records_Index]
        print("    Record:" + str(records_Index))
        address_of_current_record = device_data_set_address + "/" + name_of_current_record

        # Loading a Single Record
        record = loadmat(address_of_current_record)
        record = csc_matrix(record['sparse_matrix'])
        record = record.toarray()
        # Extracting the 'Bursts' and 'subRegions' of a Single Record
        threshold = zero_conversion_threshold * amax(record)
        bursts_indices_matrix = Burst_Index_Extractor(
            record, threshold)  # indices_of_Bursts = find (
        # content_of_Current_Record > .1 * max ( content_of_Current_Record ) )

        key_values = bursts_indices_matrix.keys()

        for burst_Index in [0, 1]:  #range(len(key_values)):
            print("        burst_Index:" + str(burst_Index))
            temp = bursts_indices_matrix[str(burst_Index)]

            starting_point = temp[0]
            ending_point = temp[1]

            current_burst = record[starting_point:ending_point, 0]

            # Extraction of Preamble
            length_of_a_single_preamble = int(
                number_of_symbols_per_preamble *
                number_of_subregions_per_preamble *
                number_of_chips_per_subregion *
                time_length_of_a_single_chip_in_second * sampling_frequency)

            if size(current_burst, 0) < length_of_a_single_preamble:
                temp0 = []
                temp1 = zeros(
                    length_of_a_single_preamble - size(current_burst, 0), 1)
                temp0 = temp0.append(current_burst)
                temp0 = temp0.append(temp1)
                current_burst = array(temp0)
            else:
                current_burst = array(
                    current_burst[0:length_of_a_single_preamble])
            current_burst = array(current_burst)

            # burst phase compensation
            current_burst = phase_compensator(
                preamble=current_burst,
                sampling_frequency=sampling_frequency,
                communication_frequency=communication_frequency)
            # TODO: Omit these lines

            # subRegions of 'current_burst'
            length_of_a_single_subregion = int(
                size(current_burst) / number_of_subregions)

            pure_all_subregions = []
            vertical_hash_map_of_a_single_burst = {}
            fields = {}
            for subRegion_Index in arange(number_of_subregions + 1):
                if subRegion_Index < number_of_subregions:
                    starting_index = subRegion_Index * length_of_a_single_subregion
                    ending_index = (subRegion_Index +
                                    1) * length_of_a_single_subregion
                    a_single_subregion = current_burst[
                        starting_index:ending_index]

                    # Selected Character Extraction Methods
                    subRegion_characteristics = characteristics_extractor(
                        a_single_subregion, selected_methods)

                    pure_all_subregions = hstack(
                        (pure_all_subregions, a_single_subregion))
                    # TODO: this assignmentation is still in vein

                    # getting all characteristics of a single subRegion
                    for key in subRegion_characteristics.keys():
                        vertical_hash_map_of_a_single_burst[
                            key + "_single_subRegion_" +
                            str(subRegion_Index)] = array(
                                subRegion_characteristics[key])

                        fields[key] = key

                        # saving the current subRegion in the Dictionary of Burst
                        if not (("%s_all_subregions" % key) in locals()):
                            vars()[("%s_all_subregions" % key)] = array(
                                subRegion_characteristics[key])

                        else:
                            vars()[("%s_all_subregions" % key)] = hstack(
                                (vars()[("%s_all_subregions" % key)],
                                 array(subRegion_characteristics[key])))

                else:
                    # saving and deleting all subRegions together in the Nr+1 subRegion
                    for key in fields.keys():
                        vertical_hash_map_of_a_single_burst[
                            key + "_single_subRegion_" +
                            str(subRegion_Index)] = array(
                                vars()[("%s_all_subregions" % key)])
                        del (vars()[("%s_all_subregions" % key)])

                    # saving the Single Burst in the Dictionary of all Bursts
                    vertical_hashmap_of_all_bursts[
                        str(overall_burst_index
                            )] = vertical_hash_map_of_a_single_burst
                    overall_burst_index += 1

    return vertical_hashmap_of_all_bursts
Esempio n. 58
0
mu2   = 1.4

mu2   = 115121.238297125

muMin = 1.0
muMax = 1.35
muN   = 10

muArr = pl.load('FeMu.dat')
#muArr = pl.linspace(muMin, muMax, muN)

mu1   = muArr[5]

#print muArr

I  = pl.zeros(pl.size(th_e), pl.Float)

for mu in muArr:
    I += LS_HP2(th_i, th_e, mu, mu2) / pl.size(muArr)


print mu

I_XRR  = a[:,1]
I_ORI  = LS(th_i, th_e)
I_HP1  = LS_HP1(th_i, th_e, mu1, mu2)
I_HP2  = LS_HP2(th_i, th_e, mu1, mu2)
I_KM   = LS_KM(th_i, th_e)
I_INT  = numModel(th_i, th_e, mu1, mu2, 10000, 0.001)