コード例 #1
0
ファイル: mobility.py プロジェクト: ejetzer/hall
def main(data_files1, data_files2,
         err, B=0.5003991, l=2e-2, w=1e-2,
         outfile=None):
    mu_H = lambda V_5, V_1: V_5 * l / ( V_1 * B * w)
    mu_He = lambda V_5, V_1, V_5e, V_1e: pylab.sqrt(\
                ( (l / ( V_1 * B * w)) * V_5e )**2 +\
                ( (V_5 / ( V_1 * B * w)) * 1e-4 )**2 +\
                ( (V_5 * l / ( V_1 * B * w)) * 1e-4 )**2 +\
                ( (V_5 * l / ( V_1**2 * B * w)) * V_1e )**2 +\
                ( (V_5 * l / ( V_1 * B**2 * w)) * 2e-8 )**2 +\
                ( (V_5 * l / ( V_1 * B * w**2)) * 1e-4 )**2)
    x5, x1, V5, V1, N5, N1 = [], [], [], [], [], []
    for df in data_files1:
        databox = spinmob.data.load(df)
        x, V, N = interpolate(databox)
        x5 += x
        V5 += V
        N5 += N
    for df in data_files2:
        databox = spinmob.data.load(df)
        x, V, N = interpolate(databox)
        x1 += x
        V1 += V
        N1 += N
    min_len = min([len(x5), len(x1)])
    xs = pylab.array(x5[:min_len])
    V5, V1 = pylab.array(V5[:min_len]), pylab.array(V1[:min_len])
    N5, N1 = pylab.array(N5[:min_len]), pylab.array(N1[:min_len])
    e5, e1 = err / pylab.sqrt(N5), err / pylab.sqrt(N1)
    ys, es = mu_H(V5, V1), mu_He(V5, V1, e5, e1)
    make_fig(xs, ys, es, outfile)
コード例 #2
0
ファイル: pop.py プロジェクト: NeuroArchive/netpyne
    def createCellsFixedNum (self):
        ''' Create population cells based on fixed number of cells'''
        cells = []
        seed(sim.id32('%d'%(sim.cfg.seeds['loc']+self.tags['numCells']+sim.net.lastGid)))
        randLocs = rand(self.tags['numCells'], 3)  # create random x,y,z locations

        if sim.net.params.shape == 'cylinder':
            # Use the x,z random vales 
            rho = randLocs[:,0] # use x rand value as the radius rho in the interval [0, 1)
            phi = 2 * pi * randLocs[:,2] # use z rand value as the angle phi in the interval [0, 2*pi) 
            x = (1 + sqrt(rho) * cos(phi))/2.0
            z = (1 + sqrt(rho) * sin(phi))/2.0
            randLocs[:,0] = x
            randLocs[:,2] = z
    
        elif sim.net.params.shape == 'ellipsoid':
            # Use the x,y,z random vales 
            rho = np.power(randLocs[:,0], 1.0/3.0) # use x rand value as the radius rho in the interval [0, 1); cuberoot
            phi = 2 * pi * randLocs[:,1] # use y rand value as the angle phi in the interval [0, 2*pi) 
            costheta = (2 * randLocs[:,2]) - 1 # use z rand value as cos(theta) in the interval [-1, 1); ensures uniform dist 
            theta = arccos(costheta)  # obtain theta from cos(theta)
            x = (1 + rho * cos(phi) * sin(theta))/2.0
            y = (1 + rho * sin(phi) * sin(theta))/2.0
            z = (1 + rho * cos(theta))/2.0 
            randLocs[:,0] = x
            randLocs[:,1] = y
            randLocs[:,2] = z
        
        for icoord, coord in enumerate(['x', 'y', 'z']):
            if coord+'Range' in self.tags:  # if user provided absolute range, convert to normalized
                self.tags[coord+'normRange'] = [float(point) / getattr(sim.net.params, 'size'+coord.upper()) for point in self.tags[coord+'Range']]
            # constrain to range set by user
            if coord+'normRange' in self.tags:  # if normalized range, rescale random locations
                minv = self.tags[coord+'normRange'][0] 
                maxv = self.tags[coord+'normRange'][1] 
                randLocs[:,icoord] = randLocs[:,icoord] * (maxv-minv) + minv

        for i in self._distributeCells(int(sim.net.params.scale * self.tags['numCells']))[sim.rank]:
            gid = sim.net.lastGid+i
            self.cellGids.append(gid)  # add gid list of cells belonging to this population - not needed?
            cellTags = {k: v for (k, v) in self.tags.iteritems() if k in sim.net.params.popTagsCopiedToCells}  # copy all pop tags to cell tags, except those that are pop-specific
            cellTags['popLabel'] = self.tags['popLabel']
            cellTags['xnorm'] = randLocs[i,0] # set x location (um)
            cellTags['ynorm'] = randLocs[i,1] # set y location (um)
            cellTags['znorm'] = randLocs[i,2] # set z location (um)
            cellTags['x'] = sim.net.params.sizeX * randLocs[i,0] # set x location (um)
            cellTags['y'] = sim.net.params.sizeY * randLocs[i,1] # set y location (um)
            cellTags['z'] = sim.net.params.sizeZ * randLocs[i,2] # set z location (um)
            cells.append(self.cellModelClass(gid, cellTags)) # instantiate Cell object
            if sim.cfg.verbose: print('Cell %d/%d (gid=%d) of pop %s, on node %d, '%(i, sim.net.params.scale * self.tags['numCells']-1, gid, self.tags['popLabel'], sim.rank))
        sim.net.lastGid = sim.net.lastGid + self.tags['numCells'] 
        return cells
コード例 #3
0
 def _make_dct(self):
     """
     ::
         Construct the discrete cosine transform coefficients for the
         current size of constant-Q transform
     """
     DCT_OFFSET = self.lcoef
     nm = 1 / P.sqrt(self._cqtN / 2.0)
     self.DCT = P.empty((self._dctN, self._cqtN))
     for i in P.arange(self._dctN):
         for j in P.arange(self._cqtN):
             self.DCT[i, j] = nm * P.cos(i * (2 * j + 1) *
                                         (P.pi / 2.0) / self._cqtN)
     for j in P.arange(self._cqtN):
         self.DCT[0, j] *= P.sqrt(2.0) / 2.0
コード例 #4
0
def Flow_rate(dp, L, dz, liquid_type, pipe_standard, merterial, loss):
    if dp == '0.000':
        dp = '0'
    L = float(L)
    dz = float(dz)
    dp = float(dp)
    k = 0
    if len(loss) == 1:
        k = float(loss[0])
    else:
        for i in range(len(loss)):
            if loss[i] == 0:
                pass
            else:
                if loss[i][1] == '' or '0':
                    pass
                else:
                    k += minor_loss[loss[i][0]] * int(loss[i][1])

    mu = table[1][liquid_type]
    rho = table[0][liquid_type]
    D = table[2][pipe_standard]
    ep = table[3][merterial]
    A = (pi * D**2) / 4
    jd = ep / D

    f = random.uniform(0.007, 0.05)
    es = 1
    af = 10
    V = 1
    while es > 0.000001:
        v1 = 2 * dp / rho
        v2 = 2 * dz * 9.81
        v3 = (f * L / D) + k
        V = pl.sqrt((v1 + v2) / v3)
        Re = rho * V * D / mu
        f = chen_f(Re, jd)
        es = abs(af - f)
        af = f
        Re = Re = rho * V * D / mu
    if Re <= 2100:  #충류일경우 f = 64/Re
        f = 64 / Re
        v1 = 2 * dp / rho
        v2 = 2 * dz * 9.81
        v3 = (f * L / D) + k
        V = pl.sqrt((v1 + v2) / v3)
    Flow = (A * V)
    return [Flow, f]
コード例 #5
0
 def _pvoc(self, X_hat, Phi_hat=None, R=None):
     """
     ::
       a phase vocoder - time-stretch
       inputs:
         X_hat - estimate of signal magnitude
         [Phi_hat] - estimate of signal phase
         [R] - resynthesis hop ratio
       output:
         updates self.X_hat with modified complex spectrum
     """
     N = self.nfft
     W = self.wfft
     H = self.nhop
     R = 1.0 if R is None else R
     dphi = (2 * P.pi * H * P.arange(N / 2 + 1)) / N
     print("Phase Vocoder Resynthesis...", N, W, H, R)
     A = P.angle(self.STFT) if Phi_hat is None else Phi_hat
     phs = A[:, 0]
     self.X_hat = []
     n_cols = X_hat.shape[1]
     t = 0
     while P.floor(t) < n_cols:
         tf = t - P.floor(t)
         idx = P.arange(2) + int(P.floor(t))
         idx[1] = n_cols - 1 if t >= n_cols - 1 else idx[1]
         Xh = X_hat[:, idx]
         Xh = (1 - tf) * Xh[:, 0] + tf * Xh[:, 1]
         self.X_hat.append(Xh * P.exp(1j * phs))
         U = A[:, idx[1]] - A[:, idx[0]] - dphi
         U = U - P.np.round(U / (2 * P.pi)) * 2 * P.pi
         phs += (U + dphi)
         t += P.randn() * P.sqrt(PVOC_VAR * R) + R  # 10% variance
     self.X_hat = P.np.array(self.X_hat).T
コード例 #6
0
ファイル: phot.py プロジェクト: indebetouw/pyphot
    def setbgcoords(self):
        if self.type == None:
            raise Exception("region type=None - has it been set?")
        if self.type == "circle":
            if len(self.coords) != 3:
                raise Exception(
                    "region coords should be ctr_ra, ctr_dec, rad_arcsec - the coord array has unexpected length %d"
                    % len(self.coords))
            self.bg0coords = pl.array(self.coords)
            self.bg1coords = pl.array(self.coords)
            # set larger radii for annulus
            self.bg0coords[2] = self.coords[2] * self.bgfact[0]
            self.bg1coords[2] = self.coords[2] * self.bgfact[1]
        elif self.type == "polygon":
            n = self.coords.shape[1]
            self.coords = pl.array(self.coords)
            ctr = [self.coords[:, 0].mean(), self.coords[:, 1].mean()]
            x = self.coords[:, 0] - ctr[0]
            y = self.coords[:, 1] - ctr[1]
            r = pl.sqrt(x**2 + y**2)
            th = pl.arctan2(y, x)

            ct = pl.cos(th)
            st = pl.sin(th)
            # inner and outer background regions
            b = self.bgfact
            self.bg0coords = pl.array([r * b[0] * ct, r * b[0] * st]).T + ctr
            self.bg1coords = pl.array([r * b[1] * ct, r * b[1] * st]).T + ctr

        else:
            raise Exception("unknown region type %s" % self.type)
コード例 #7
0
def se_over_slope(x, y, estimated, model):
    """
    For a linear regression model, calculate the ratio of the standard error of
    this fitted curve's slope to the slope. The larger the absolute value of
    this ratio is, the more likely we have the upward/downward trend in this
    fitted curve by chance.
    
    Args:
        x: an 1-d pylab array with length N, representing the x-coordinates of
            the N sample points
        y: an 1-d pylab array with length N, representing the y-coordinates of
            the N sample points
        estimated: an 1-d pylab array of values estimated by a linear
            regression model
        model: a pylab array storing the coefficients of a linear regression
            model

    Returns:
        a float for the ratio of standard error of slope to slope
    """
    assert len(y) == len(estimated)
    assert len(x) == len(estimated)
    EE = ((estimated - y)**2).sum()
    var_x = ((x - x.mean())**2).sum()
    SE = pylab.sqrt(EE / (len(x) - 2) / var_x)
    return SE / model[0]
コード例 #8
0
ファイル: rbf.py プロジェクト: keithwoj/SCAMR
def mq(d,**parms):
    # multiquadric, f(r) = sqrt(1 + (ep r)^2)
    c = parms.get('centers')
    #op = parms.get('operator','interp')
    ep = parms.get('shapeparm',1)
    DM = dmatrix(d, centers = c)
    # eps_r = epsilon*r where epsilon may be an array or scalar
    eps_r = dot(ep*eye(DM.shape[0]),DM)
    return sqrt(1+(eps_r)**2)
コード例 #9
0
ファイル: GvsT.py プロジェクト: ejetzer/hall
def main(data_files, a, b, c, d, pguess, eguess, perr=1, eerr=1, outfile=None):
    xs, ys, es, Ns = [], [], [], []
    for df in data_files:
        databox = spinmob.data.load(df)
        x, y, e, N = databox[:4]
        xs += list(x)
        ys += [abs(i) for i in y]
        es += list(e)
        Ns += list(N)
    xs, ys, es, Ns = pylab.array(xs), pylab.array(ys), pylab.array(es), pylab.array(Ns)
    pes, ees = perr / pylab.sqrt(Ns), eerr / pylab.sqrt(Ns)
    if 'current' in databox.hkeys:
        current = databox.h('current')
    else:
        current = 0.001 # A
        databox.h(current=current)
    Rs = ys / current
    Rerrs = pes / current, ees / current
    fits = splitfit(xs, Rs, Rerrs, a, b, c, d, pguess, eguess, outfile)
    return fits
コード例 #10
0
ファイル: Hall_coefficient.py プロジェクト: ejetzer/hall
def main(data_files, a, b, c, d, pguess, eguess, perr=1, eerr=1,
         I=0.001, B=0.5003991, sample_thickness=1e-3,
         outfile=None):
    R_H = lambda V_H: V_H*sample_thickness / (I*B) # Vm/AT
    R_He = lambda V_H, V_He: pylab.sqrt(\
                ( sample_thickness/(I*B) * V_He )**2 + \
                ( V_H / (I*B) * 1e-4 )**2 + \
                ( V_H * sample_thickness / (B**2 * I) * 2e-8 )**2 )
    xs, ys, es, Ns = [], [], [], []
    for df in data_files:
        databox = spinmob.data.load(df)
        x, y, e, N = databox[:4]
        xs += list(x)
        ys += [R_H(i) for i in y]
        es += [R_He(i, j) for i, j in zip(y, e)]
        Ns += list(N)
    xs, ys, es, Ns = pylab.array(xs), pylab.array(ys), pylab.array(es), pylab.array(Ns)
    pes, ees = perr / pylab.sqrt(Ns), eerr / pylab.sqrt(Ns)
    fits = splitfit(xs, ys, (pes, ees), a, b, c, d, pguess, eguess, outfile)
    return fits
コード例 #11
0
def gauss_pdf(n, mu=0.0, sigma=1.0):
    """
    ::

        Generate a gaussian kernel
         n - number of points to generate
         mu - mean
         sigma - standard deviation
    """
    var = sigma**2
    return 1.0 / pylab.sqrt(2 * pylab.pi * var) * pylab.exp(
        -(pylab.r_[0:n] - mu)**2 / (2.0 * var))
コード例 #12
0
    def rank_by_distance_bhatt(self, qkeys, ikeys, rkeys, dists):
        """
        ::

            Reduce timbre-channel distances to ranks list by ground-truth key indices
            Bhattacharyya distance on timbre-channel probabilities and Kullback distances
        """
        # timbre-channel search using pre-computed distances
        ranks_list = []
        t_keys, t_lens = self.get_adb_lists(0)
        rdists = pylab.ones(len(t_keys)) * float('inf')
        qk = self._get_probs_tc(qkeys)
        for i in range(len(ikeys[0])):  # number of include keys
            ikey = []
            dk = pylab.zeros(self.timbre_channels)
            for t_chan in range(self.timbre_channels):  # timbre channels
                ikey.append(ikeys[t_chan][i])
                try:
                    # find dist of key i for query
                    i_idx = rkeys[t_chan].index(
                        ikey[t_chan])  # dataset include-key match
                    # the reduced distance function in include_keys order
                    # distance is Bhattacharyya distance on probs and dists
                    dk[t_chan] = dists[t_chan][i_idx]
                except:
                    print("Key not found in result list: ", ikey, "for query:",
                          qkeys[t_chan])
                    raise error.BregmanError()
            rk = self._get_probs_tc(ikey)
            a_idx = t_keys.index(ikey[0])  # audiodb include-key index
            rdists[a_idx] = distance.bhatt(
                pylab.sqrt(pylab.absolute(dk)),
                pylab.sqrt(pylab.absolute(qk * rk)))
        #search for the index of the relevant keys
        rdists = pylab.absolute(rdists)
        sort_idx = pylab.argsort(rdists)  # Sort fields into database order
        for r in self.ground_truth:  # relevant keys
            ranks_list.append(pylab.where(
                sort_idx == r)[0][0])  # Rank of the relevant key
        return ranks_list, rdists
コード例 #13
0
ファイル: plot_gp.py プロジェクト: liziniu/CVAE
def plot_gpr(xs,
             ys,
             s2,
             axes=None,
             line_settings={
                 'color': 'black',
                 'lw': 2.0
             },
             shade_settings={
                 'facecolor': 'lightyellow',
                 'alpha': 0.5
             }):
    """
    Plot the mean predicted values and 95% confidence interval, two times
    the standard error, as a shaded area.

    Parameters
    ----------
    xs: array
        an N length np array of the x* data
    ys: array
        an N length np array of the y* data
    s2: array
        an N length np array of the variance, var(y*), data
    line_setting: dictionary, optional
        An dictionary with keywords and values to pass to the mean line of the
        plot. For example {'linewidth':2}
    shade_setting: dictionary, optional
        An dictionary with keywords and values to pass to the fillbetween
        function.
    axes: axes, optional
        The axes to put the plot.
        If axes is not specified then the current will be used (in pylab).

    Returns
    -------
    Returns a tuple with
    line: matplotlib.lines.Line2D
        the mean line
    poly: matplotlib.collections.PolyCollection
        shaded 95% confidence area
    """
    ax = axes
    if ax == None:
        ax = pl.gca()
    xsf = xs.flatten()
    ysf = ys.flatten()
    s2f = 2 * pl.sqrt(s2.flatten())
    verts = zip(xsf, ysf + s2f) + zip(xsf[::-1], (ysf - s2f)[::-1])
    poly = ax.fill_between(xsf, ysf - s2f, ysf + s2f, **shade_settings)
    line = ax.plot(xs, ys, **line_settings)
    return line, poly
コード例 #14
0
def rmse(y, estimated):
    """
    Calculate the root mean square error term.

    Args:
        y: an 1-d pylab array with length N, representing the y-coordinates of
            the N sample points
        estimated: an 1-d pylab array of values estimated by the regression
            model

    Returns:
        a float for the root mean square error term
    """

    return pylab.sqrt(sum((y - estimated)**2) / len(y))
コード例 #15
0
    def OpenPlot(self):

        def getspec(self):

            global x
            global y

            ftypes = [('Data files', '*.dat'), ('All files', '*')] 
            dlg = tkFileDialog.Open(self, filetypes = ftypes)

            fl = dlg.show()

            if fl != '':
                f = open(fl)
                data = np.loadtxt(f)

            x = data[:,0]
            y = data[:,1]

        getspec(self)

        minorLocator = AutoMinorLocator()
        golden = (plt.sqrt(5) + 1.)/2.
        figprops = dict(figsize = (6., 6./golden), dpi = 128)
        adjustprops = dict(left = 0.15, bottom = 0.20, right = 0.90, top = 0.93, wspace = 0.2, hspace = 0.2)

        MyPlot = plt.figure(1, **figprops)
        MyPlot.subplots_adjust(**adjustprops)
        plt.clf()

        MySubplot = MyPlot.add_subplot(1, 1, 1)
        
        MySubplot.plot(x,y)

        plt.xlabel('$\lambda$ ($\AA$)', fontsize = 18) 
        plt.ylabel('$F_\lambda$ ($10^{-17}$ erg/$cm^2$/s/$\AA$)', fontsize = 18)

        axes = plt.gca()

        plt.tick_params(which = 'both', width = 2) 
        plt.tick_params(which = 'major', length = 4) 
        plt.tick_params(which = 'minor', length = 4, color = 'r')

        MySubplot.xaxis.grid(True, which = "both")
        plt.ticklabel_format(style = 'sci', axis = 'x', scilimits = (0,0))
        MySubplot.xaxis.set_minor_locator(minorLocator)

        print("Done!")
コード例 #16
0
 def _stft_specgram(self):
     if not self._have_x:
         print(
             "Error: You need to load a sound file first: use self.load_audio('filename.wav')\n"
         )
         return False
     else:
         fp = self._check_feature_params()
         self.STFT = P.mlab.specgram(self.x,
                                     NFFT=self.nfft,
                                     noverlap=self.nfft - self.nhop)[0]
         self.STFT /= P.sqrt(self.nfft)
         self._have_stft = True
     if self.verbosity:
         print("Extracted STFT: nfft=%d, hop=%d" % (self.nfft, self.nhop))
     return True
コード例 #17
0
def image_derivative(im, derivative, do_scale=True):
    assert (len(im.shape) == 2 or len(im.shape) == 3)
    assert (im.dtype == 'float')
    assert (derivative in ['x', 'y', 'grad'])
    # obtain a gray image
    if len(im.shape) == 3:
        gray_im = pylab.mean(im, 2)
    else:
        gray_im = im
    # either x,y derivative or full gradient
    if derivative == 'grad':
        dy = ndimage.sobel(gray_im, 0)
        dx = ndimage.sobel(gray_im, 1)
        deriv_im = pylab.sqrt(dy * dy + dx * dx)
    else:
        deriv_im = ndimage.sobel(gray_im, {'x': 1, 'y': 0}[derivative])
    return imscale(deriv_im) if do_scale else deriv_im
コード例 #18
0
    def _make_cqt(self):
        """
        ::

            Build a constant-Q transform (CQT) from lists of
            linear center frequencies, logarithmic center frequencies, and
            constant-Q bandwidths.
        """
        fftfrqs = self._fftfrqs
        logfrqs = self._logfrqs
        logfbws = self._logfbws
        fp = self.feature_params
        ovfctr = 0.5475  # Norm constant so CQT'*CQT close to 1.0
        tmp2 = 1.0 / (ovfctr * logfbws)
        tmp = (logfrqs.reshape(1, -1) - fftfrqs.reshape(-1, 1)) * tmp2
        self.Q = P.exp(-0.5 * tmp * tmp)
        self.Q *= 1.0 / (2.0 * P.sqrt((self.Q * self.Q).sum(0)))
        self.Q = self.Q.T
コード例 #19
0
ファイル: plot_gp.py プロジェクト: GreenSteam/pypr
def plot_gpr(xs, ys, s2, axes=None,
        line_settings={'color':'black', 'lw':2.0}, 
        shade_settings={'facecolor':'lightyellow', 'alpha':0.5}):
    """
    Plot the mean predicted values and 95% confidence interval, two times
    the standard error, as a shaded area.

    Parameters
    ----------
    xs: array
        an N length np array of the x* data
    ys: array
        an N length np array of the y* data
    s2: array
        an N length np array of the variance, var(y*), data
    line_setting: dictionary, optional
        An dictionary with keywords and values to pass to the mean line of the
        plot. For example {'linewidth':2}
    shade_setting: dictionary, optional
        An dictionary with keywords and values to pass to the fillbetween
        function.
    axes: axes, optional
        The axes to put the plot.
        If axes is not specified then the current will be used (in pylab).

    Returns
    -------
    Returns a tuple with
    line: matplotlib.lines.Line2D
        the mean line
    poly: matplotlib.collections.PolyCollection
        shaded 95% confidence area
    """
    ax = axes
    if ax==None:
        ax = pl.gca()
    xsf = xs.flatten()
    ysf = ys.flatten()
    s2f = 2*pl.sqrt(s2.flatten())
    verts = zip(xsf, ysf+s2f) + zip(xsf[::-1], (ysf-s2f)[::-1])
    poly = ax.fill_between(xsf, ysf-s2f, ysf+s2f, **shade_settings)
    line = ax.plot(xs, ys, **line_settings)
    return line, poly
コード例 #20
0
    def __init__(self, parent, controller):

        tk.Frame.__init__(self, parent) ;
        label = ttk.Label(self, text = "Spectra App", font = LARGE_FONT) ;
        label.pack(pady = 10, padx = 10) ;

        button1 = ttk.Button(self, text = "Back to Start", command = lambda: controller.show_frame(StartPage));
        button1.pack() ;

        self.minorLocator = AutoMinorLocator(); 
        golden = (plt.sqrt(5) + 1.)/2. ;
        figprops = dict(figsize = (6., 6./golden), dpi = 128) ;
        adjustprops = dict(left = 0.15, bottom = 0.20, right = 0.90, top = 0.93, wspace = 0.2, hspace = 0.2) ;

        self.plot = plt.figure(1, **figprops) ;
        self.plot.subplots_adjust(**adjustprops) ;
        plt.clf () ;

        self.subplot = self.plot.add_subplot(1, 1, 1) ;

        self.subplot.plot([1,2,3,4,5],[3,4,5,6,7])

        plt.xlabel('$\lambda$ ($\AA$)', fontsize=18) ;
        plt.ylabel('$F_\lambda$ ($10^{-17}$ erg/$cm^2$/s/$\AA$)', fontsize=18) ;

        axes = plt.gca() ;

        plt.tick_params(which = 'both', width = 2) ;
        plt.tick_params(which = 'major', length = 4) ;
        plt.tick_params(which = 'minor', length = 4, color = 'r') ;

        plt.close(self.plot) ;

        canvas = FigureCanvasTkAgg(self.plot, self) ;
        canvas.show() ;
        canvas.get_tk_widget().pack(side = tk.BOTTOM, fill = tk.BOTH, expand = True) ;

        toolbar = NavigationToolbar2TkAgg(canvas, self) ;
        toolbar.update()
        canvas._tkcanvas.pack(side = tk.TOP, fill = tk.BOTH, expand = True) ;
コード例 #21
0
def feature_scale(M, normalize=False, dbscale=False, norm=False, bels=False):
    """
    ::

        Perform mutually-orthogonal scaling operations, otherwise return identity:
          normalize [False]
          dbscale  [False]
          norm      [False]
    """
    if not (normalize or dbscale or norm or bels):
        return M
    else:
        X = M.copy()  # don't alter the original
        if norm:
            X = X / P.tile(P.sqrt((X * X).sum(0)), (X.shape[0], 1))
        if normalize:
            X = _normalize(X)
        if dbscale or bels:
            X = P.log10(P.clip(X, 0.0001, X.max()))
            if dbscale:
                X = 20 * X
    return X
コード例 #22
0
    def _cqft(self):
        """
        ::

            Constant-Q Fourier transform.
        """

        if not self._power():
            return False
        fp = self._check_feature_params()
        if self.intensify:
            self._cqft_intensified()
        else:
            self._make_log_freq_map()
            self.CQFT = P.sqrt(
                P.array(P.mat(self.Q) * P.mat(P.absolute(self.STFT)**2)))
            self._is_intensified = False
        self._have_cqft = True
        if self.verbosity:
            print("Extracted CQFT: intensified=%d" % self._is_intensified)
        self.inverse = self.icqft
        self.X = self.CQFT
        return True
コード例 #23
0
def main(argv):
    #if len(argv) < 4:
    #  print """Usage:
    #  """
    #  sys.exit(1)
    #nlayers = int(argv[1])

    # get number of layers
    f = open(FLAGS.dir + "weights/nlayers.txt")
    nlayers = int(f.readlines()[0])
    f.close()
    print str(nlayers) + " layer(s) detected."

    pylab.figure(figsize=(32, 24), dpi=320, facecolor='w', edgecolor='k')

    # Make a picture with the filters
    [n_inputs, n_neurons, weights
     ] = visualizing.weights_filename_to_array(FLAGS.dir + "weights/W0.txt")

    image_side_length = int(pylab.sqrt(n_inputs))
    nsubplot = pylab.ceil(pylab.sqrt(n_neurons))
    for i in range(n_neurons):
        filter = pylab.resize(weights[i],
                              [image_side_length, image_side_length])
        pylab.subplot(nsubplot, nsubplot, i + 1)
        pylab.imshow(filter, interpolation='nearest')
        pylab.gray()

    pylab.savefig(FLAGS.dir + "filters" + ".png")
    pylab.clf()

    # Make a picture with the all weights
    nsubplot_vertical = nlayers + 1
    nsubplot_horizontal = 4
    for i in range(nlayers):
        # V
        location = 1 + (nlayers - i) * nsubplot_horizontal + 0
        filename = FLAGS.dir + "weights/V" + str(i) + ".txt"
        visualizing.plot_weight_matrix(nsubplot_vertical, nsubplot_horizontal,
                                       location, filename, True, "V" + str(i))

        # W
        location = 1 + (nlayers - i) * nsubplot_horizontal + 1
        filename = FLAGS.dir + "weights/W" + str(i) + ".txt"
        visualizing.plot_weight_matrix(nsubplot_vertical, nsubplot_horizontal,
                                       location, filename, False, "W" + str(i))

        # F
        location = 1 + (nlayers - i) * nsubplot_horizontal + 2
        filename = FLAGS.dir + "weights/F" + str(i) + ".txt"
        visualizing.plot_weight_matrix(nsubplot_vertical, nsubplot_horizontal,
                                       location, filename, False, "F" + str(i))

        # G
        location = 1 + (nlayers - i) * nsubplot_horizontal + 3
        filename = FLAGS.dir + "weights/G" + str(i) + ".txt"
        visualizing.plot_weight_matrix(nsubplot_vertical, nsubplot_horizontal,
                                       location, filename, True, "G" + str(i))

    # Last layer
    location = 1 + 1
    filename = FLAGS.dir + "weights/W" + str(nlayers) + ".txt"
    visualizing.plot_weight_matrix(nsubplot_vertical, nsubplot_horizontal,
                                   location, filename, False,
                                   "W" + str(nlayers))

    pylab.savefig(FLAGS.dir + "weights" + ".png")

    # Make pictures with examples and representations
    visualizing.visualize_representations(FLAGS.dir, nlayers)
コード例 #24
0
ファイル: rbf.py プロジェクト: keithwoj/SCAMR
def dmatrix(d,**centers):
    """
    DM = dmatrix(d,**centers)
       
    Arguments:
    d = data
    *centers may contain centers, c, different from d, otherwise c = d
        
    Typically d = c but, in general, data does not have to equal its centers
    as in the case of the evaluation matrix, where the d becomes the
    evaluation points and the centers are the collocation data.
    
    Output DM:
    Compute the distance matrix with entries being the distances between the
    data and the centers.
    The Euclidian distance matrix, DM, is the m by n matrix with entries
         ||d_0 - c_0|| ||d_0 - c_1|| ... ||d_0 - c_n||
         ||d_1 - c_0|| ||d_1 - c_1|| ... ||d_1 - c_n||
                          ...
         ||d_m - c_0|| ||d_m - c_1|| ... ||d_m - c_n||
    
    m = # pts, n = dim of space
    
    ****** ASSUMPTION: # pts >= dimension of space
    ****** ASSUMPTION: c, d are ROW vectors, otherwise convert to row vectors
    
    Remark:
    d and c are called vectors but it might be more appropriate to call
    them matrices (or rank dim(d), rank dim(c) tensors). When called vectors
    it is assumed that each row is a vector in the space implying the number
    of columns is the dimension of the space and the number of rows is the
    number of points
    """
    # Test Input:
    # Are d and c arrays of row vectors?
    # If d and c are column vectors, convert them to row vectors.
    # If d and c are square, i.e. # pts = dimension of space, notify user 
    if d.ndim > 1:    
        if d.shape[1] > d.shape[0]:
            d = d.T
        elif d.shape[1] == d.shape[0]:
            print("Assuming data is in row-vector form.")
    else:   # 1-D data, convert to 2-D data with shape (M,1)
        d = array([d]).T
    
    ## **************** WHY DOES c = kwargs.get('centers',d) RETURN NONE????
    if centers.get('centers') is None:
        c = d
    else:
        c = centers.get('centers')

    if c.ndim > 1:
        if c.shape[1] > c.shape[0]:
            c = c.T
        elif c.shape[1] == c.shape[0]:
            print("Assuming centers are in row-vector form.")
    else:   # 1-D data, convert to 2-D data with shape (N,1)
        c = array([c]).T
    # **************************************************************************
    #                               Begin Algorithm
    # **************************************************************************
    # Obtain shape of input:
    M, sd = d.shape    # M = # pts, sd = dim of data space
    N, sc = c.shape    # N = # pts, sc = dim of centers space
    #
    # Raise error if centers and data have different dimension    
    if sd != sc:
        raise NameError('Data and centers must have same dimension')
    # ********** Construct the Distance Matrix DM **********
    # Initialize the distance matrix: (data # of pts) by (centers # of pts)
    # Denote the 
    # d_0 = (d[0,0], d[0,1], ...), d_1 = (d[1,0], d[1,1], ...), etc.
    #
    # The distance matrix is the M by N matrix with entries
    #      ||d_0 - c_0|| ||d_0 - c_1|| ... ||d_0 - c_n||
    #      ||d_1 - c_0|| ||d_1 - c_1|| ... ||d_1 - c_n||
    #                       ...
    #      ||d_m - c_0|| ||d_m - c_1|| ... ||d_m - c_n||
    #
    DM = zeros((M,N))
    # Determine the distance of each point in the data-set from its center
    for i in range(M):
        # Compute the row ||d_i - c_0|| ||d_i - c_1|| ... ||d_i - c_n||
        DM[i,:] = ((d[i]-c)**2).sum(1)
    # Finish distance formula by taking square root of each entry
    return sqrt(DM)
コード例 #25
0
ファイル: pop.py プロジェクト: NeuroArchive/netpyne
    def createCellsDensity (self):
        ''' Create population cells based on density'''
        cells = []
        shape = sim.net.params.shape
        sizeX = sim.net.params.sizeX
        sizeY = sim.net.params.sizeY
        sizeZ = sim.net.params.sizeZ
        
        # calculate volume
        if shape == 'cuboid':
            volume = sizeY/1e3 * sizeX/1e3 * sizeZ/1e3  
        elif shape == 'cylinder':
            volume = sizeY/1e3 * sizeX/1e3/2 * sizeZ/1e3/2 * pi
        elif shape == 'ellipsoid':
            volume = sizeY/1e3/2.0 * sizeX/1e3/2.0 * sizeZ/1e3/2.0 * pi * 4.0 / 3.0

        for coord in ['x', 'y', 'z']:
            if coord+'Range' in self.tags:  # if user provided absolute range, convert to normalized
                self.tags[coord+'normRange'] = [point / sim.net.params['size'+coord.upper()] for point in self.tags[coord+'Range']]
            if coord+'normRange' in self.tags:  # if normalized range, rescale volume
                minv = self.tags[coord+'normRange'][0] 
                maxv = self.tags[coord+'normRange'][1] 
                volume = volume * (maxv-minv)

        funcLocs = None  # start with no locations as a function of density function
        if isinstance(self.tags['density'], str): # check if density is given as a function 
            if shape == 'cuboid':  # only available for cuboids
                strFunc = self.tags['density']  # string containing function
                strVars = [var for var in ['xnorm', 'ynorm', 'znorm'] if var in strFunc]  # get list of variables used 
                if not len(strVars) == 1:
                    print 'Error: density function (%s) for population %s does not include "xnorm", "ynorm" or "znorm"'%(strFunc,self.tags['popLabel'])
                    return
                coordFunc = strVars[0] 
                lambdaStr = 'lambda ' + coordFunc +': ' + strFunc # convert to lambda function 
                densityFunc = eval(lambdaStr)
                minRange = self.tags[coordFunc+'Range'][0]
                maxRange = self.tags[coordFunc+'Range'][1]

                interval = 0.001  # interval of location values to evaluate func in order to find the max cell density
                maxDensity = max(map(densityFunc, (arange(minRange, maxRange, interval))))  # max cell density 
                maxCells = volume * maxDensity  # max number of cells based on max value of density func 
                
                seed(sim.id32('%d' % sim.cfg.seeds['loc']+sim.net.lastGid))  # reset random number generator
                locsAll = minRange + ((maxRange-minRange)) * rand(int(maxCells), 1)  # random location values 
                locsProb = array(map(densityFunc, locsAll)) / maxDensity  # calculate normalized density for each location value (used to prune)
                allrands = rand(len(locsProb))  # create an array of random numbers for checking each location pos 
                
                makethiscell = locsProb>allrands  # perform test to see whether or not this cell should be included (pruning based on density func)
                funcLocs = [locsAll[i] for i in range(len(locsAll)) if i in array(makethiscell.nonzero()[0],dtype='int')] # keep only subset of yfuncLocs based on density func
                self.tags['numCells'] = len(funcLocs)  # final number of cells after pruning of location values based on density func
                if sim.cfg.verbose: print 'Volume=%.2f, maxDensity=%.2f, maxCells=%.0f, numCells=%.0f'%(volume, maxDensity, maxCells, self.tags['numCells'])
            else:
                print 'Error: Density functions are only implemented for cuboid shaped networks'
                exit(0)
        else:  # NO ynorm-dep
            self.tags['numCells'] = int(self.tags['density'] * volume)  # = density (cells/mm^3) * volume (mm^3)

        # calculate locations of cells 
        seed(sim.id32('%d'%(sim.cfg.seeds['loc']+self.tags['numCells']+sim.net.lastGid)))
        randLocs = rand(self.tags['numCells'], 3)  # create random x,y,z locations

        if sim.net.params.shape == 'cylinder':
            # Use the x,z random vales 
            rho = randLocs[:,0] # use x rand value as the radius rho in the interval [0, 1)
            phi = 2 * pi * randLocs[:,2] # use z rand value as the angle phi in the interval [0, 2*pi) 
            x = (1 + sqrt(rho) * cos(phi))/2.0
            z = (1 + sqrt(rho) * sin(phi))/2.0
            randLocs[:,0] = x
            randLocs[:,2] = z
    
        elif sim.net.params.shape == 'ellipsoid':
            # Use the x,y,z random vales 
            rho = np.power(randLocs[:,0], 1.0/3.0) # use x rand value as the radius rho in the interval [0, 1); cuberoot
            phi = 2 * pi * randLocs[:,1] # use y rand value as the angle phi in the interval [0, 2*pi) 
            costheta = (2 * randLocs[:,2]) - 1 # use z rand value as cos(theta) in the interval [-1, 1); ensures uniform dist 
            theta = arccos(costheta)  # obtain theta from cos(theta)
            x = (1 + rho * cos(phi) * sin(theta))/2.0
            y = (1 + rho * sin(phi) * sin(theta))/2.0
            z = (1 + rho * cos(theta))/2.0 
            randLocs[:,0] = x
            randLocs[:,1] = y
            randLocs[:,2] = z

        for icoord, coord in enumerate(['x', 'y', 'z']):
            if coord+'normRange' in self.tags:  # if normalized range, rescale random locations
                minv = self.tags[coord+'normRange'][0] 
                maxv = self.tags[coord+'normRange'][1] 
                randLocs[:,icoord] = randLocs[:,icoord] * (maxv-minv) + minv
            if funcLocs and coordFunc == coord+'norm':  # if locations for this coordinate calculated using density function
                randLocs[:,icoord] = funcLocs

        if sim.cfg.verbose and not funcLocs: print 'Volume=%.4f, density=%.2f, numCells=%.0f'%(volume, self.tags['density'], self.tags['numCells'])

        for i in self._distributeCells(self.tags['numCells'])[sim.rank]:
            gid = sim.net.lastGid+i
            self.cellGids.append(gid)  # add gid list of cells belonging to this population - not needed?
            cellTags = {k: v for (k, v) in self.tags.iteritems() if k in sim.net.params.popTagsCopiedToCells}  # copy all pop tags to cell tags, except those that are pop-specific
            cellTags['popLabel'] = self.tags['popLabel']
            cellTags['xnorm'] = randLocs[i,0]  # calculate x location (um)
            cellTags['ynorm'] = randLocs[i,1]  # calculate y location (um)
            cellTags['znorm'] = randLocs[i,2]  # calculate z location (um)
            cellTags['x'] = sizeX * randLocs[i,0]  # calculate x location (um)
            cellTags['y'] = sizeY * randLocs[i,1]  # calculate y location (um)
            cellTags['z'] = sizeZ * randLocs[i,2]  # calculate z location (um)
            cells.append(self.cellModelClass(gid, cellTags)) # instantiate Cell object
            if sim.cfg.verbose: 
                print('Cell %d/%d (gid=%d) of pop %s, pos=(%2.f, %2.f, %2.f), on node %d, '%(i, self.tags['numCells']-1, gid, self.tags['popLabel'],cellTags['x'], cellTags['y'], cellTags['z'], sim.rank))
        sim.net.lastGid = sim.net.lastGid + self.tags['numCells'] 
        return cells
コード例 #26
0
ファイル: network.py プロジェクト: wwlytton/netpyne
    def strToFunc(self, preCellsTags, postCellsTags, connParam):
        # list of params that have a function passed in as a string
        paramsStrFunc = [param for param in ['weight', 'delay', 'probability', 'convergence', 'divergence'] if param in connParam and isinstance(connParam[param], str)]  

        # dict to store correspondence between string and actual variable
        dictVars = {}  
        dictVars['pre_x']       = lambda preTags,postTags: preTags['x'] 
        dictVars['pre_y']       = lambda preTags,postTags: preTags['y'] 
        dictVars['pre_z']       = lambda preTags,postTags: preTags['z'] 
        dictVars['pre_xnorm']   = lambda preTags,postTags: preTags['xnorm'] 
        dictVars['pre_ynorm']   = lambda preTags,postTags: preTags['ynorm'] 
        dictVars['pre_znorm']   = lambda preTags,postTags: preTags['znorm'] 
        dictVars['post_x']      = lambda preTags,postTags: postTags['x'] 
        dictVars['post_y']      = lambda preTags,postTags: postTags['y'] 
        dictVars['post_z']      = lambda preTags,postTags: postTags['z'] 
        dictVars['post_xnorm']  = lambda preTags,postTags: postTags['xnorm'] 
        dictVars['post_ynorm']  = lambda preTags,postTags: postTags['ynorm'] 
        dictVars['post_znorm']  = lambda preTags,postTags: postTags['znorm'] 
        dictVars['dist_x']      = lambda preTags,postTags: abs(preTags['x'] - postTags['x'])
        dictVars['dist_y']      = lambda preTags,postTags: abs(preTags['y'] - postTags['y']) 
        dictVars['dist_z']      = lambda preTags,postTags: abs(preTags['z'] - postTags['z'])
        dictVars['dist_3D']    = lambda preTags,postTags: sqrt((preTags['x'] - postTags['x'])**2 +
                                (preTags['y'] - postTags['y'])**2 + 
                                (preTags['z'] - postTags['z'])**2)
        dictVars['dist_2D']     = lambda preTags,postTags: sqrt((preTags['x'] - postTags['x'])**2 +
                                (preTags['z'] - postTags['z'])**2)
        dictVars['dist_xnorm']  = lambda preTags,postTags: abs(preTags['xnorm'] - postTags['xnorm'])
        dictVars['dist_ynorm']  = lambda preTags,postTags: abs(preTags['ynorm'] - postTags['ynorm']) 
        dictVars['dist_znorm']  = lambda preTags,postTags: abs(preTags['znorm'] - postTags['znorm'])
        dictVars['dist_norm3D'] = lambda preTags,postTags: sqrt((preTags['xnorm'] - postTags['xnorm'])**2 +
                                sqrt(preTags['ynorm'] - postTags['ynorm']) + 
                                sqrt(preTags['znorm'] - postTags['znorm']))
        dictVars['dist_norm2D'] = lambda preTags,postTags: sqrt((preTags['xnorm'] - postTags['xnorm'])**2 +
                                sqrt(preTags['znorm'] - postTags['znorm']))
        # add netParams variables
        for k,v in f.net.params.iteritems():
            if isinstance(v, Number):
                dictVars[k] = v

        # for each parameter containing a function
        for paramStrFunc in paramsStrFunc:
            strFunc = connParam[paramStrFunc]  # string containing function
            strVars = [var for var in dictVars.keys() if var in strFunc and var+'norm' not in strFunc]  # get list of variables used (eg. post_ynorm or dist_xyz)
            lambdaStr = 'lambda ' + ','.join(strVars) +': ' + strFunc # convert to lambda function 
            lambdaFunc = eval(lambdaStr)
       
            # initialize randomizer in case used in function
            seed(f.sim.id32('%d'%(f.cfg['seeds']['conn'])))

            if paramStrFunc in ['probability']:
                # replace function with dict of values derived from function (one per pre+post cell)
                connParam[paramStrFunc+'Func'] = {(preGid,postGid): lambdaFunc(
                    **{strVar: dictVars[strVar] if isinstance(dictVars[strVar], Number) else dictVars[strVar](preCellTags, postCellTags) for strVar in strVars})  
                    for preGid,preCellTags in preCellsTags.iteritems() for postGid,postCellTags in postCellsTags.iteritems()}

            elif paramStrFunc in ['convergence']:
                # replace function with dict of values derived from function (one per post cell)
                connParam[paramStrFunc+'Func'] = {postGid: lambdaFunc(
                    **{strVar: dictVars[strVar] if isinstance(dictVars[strVar], Number) else dictVars[strVar](None, postCellTags) for strVar in strVars}) 
                    for postGid,postCellTags in postCellsTags.iteritems()}

            elif paramStrFunc in ['divergence']:
                # replace function with dict of values derived from function (one per post cell)
                connParam[paramStrFunc+'Func'] = {preGid: lambdaFunc(
                    **{strVar: dictVars[strVar] if isinstance(dictVars[strVar], Number) else dictVars[strVar](preCellTags, None) for strVar in strVars}) 
                    for preGid, preCellTags in preCellsTags.iteritems()}

            else:
                # store lambda function and func vars in connParam (for weight and delay, since only calcualted for certain conns)
                connParam[paramStrFunc+'Func'] = lambdaFunc
                connParam[paramStrFunc+'FuncVars'] = {strVar: dictVars[strVar] for strVar in strVars} 
コード例 #27
0
def print_stats(list_1):
    print("\t N\t", len(list_1))
    print("\t mean\t", pylab.mean(list_1))
    print("\t error\t", pylab.std(list_1) / pylab.sqrt(len(list_1)))
コード例 #28
0
drop_prob  = {13}    # Dropout probability for pre-output layer
VAL        = {14}  # Whether to have validation or not
""".format(
    seed, num_filts, K, D, dil_reg, reg_out, batch_size, samps,
    stride, iSNR, desired_sr, mu, C, drop_prob, VAL
    )

print(TXT)
P.seed(seed)

# Getting the data
x1, sr = read("new/ajay1.wav")
y1, sr = read("new/anh_synced_ajay1.wav")
y1 = y1[:len(x1)]
x1 = resample_poly(x1, desired_sr, sr)
vx1 = P.sqrt((x1**2).mean())
x1 = x1 / vx1 * .05
y1 = resample_poly(y1, desired_sr, sr)
vy1 = P.sqrt((y1**2).mean())
y1 = y1 / vy1 * .05
sr = desired_sr

x2, sr = read("new/ajay2.wav")
y2, sr = read("new/anh_synced_ajay2.wav")
y2 = y2[:len(x2)]
x2 = resample_poly(x2, desired_sr, sr)
vx2 = P.sqrt((x2**2).mean())
x2 = x2 / vx2 * .05
y2 = resample_poly(y2, desired_sr, sr)
vy2 = P.sqrt((y2**2).mean())
y2 = y2 / vy2 * .05
コード例 #29
0
ファイル: phot.py プロジェクト: indebetouw/pyphot
    def phot(self, im, showmask=True):
        # TODO if we switch to astropy.photometry then we can have that
        # do the work with subpixels properly, but for now they don't
        # do rms of the bg correctly so we can't use their stuff yet.

        mask = self.setmask(im)

        if showmask:
            cmap1 = pl.matplotlib.colors.LinearSegmentedColormap.from_list(
                'my_cmap', ["black", "blue"], 2)
            cmap1._init()
            cmap1._lut[:, -1] = pl.array([0, 0.5, 0, 0, 0])
            pl.imshow(mask > 0,
                      origin="bottom",
                      interpolation="nearest",
                      cmap=cmap1)

        from scipy import ndimage
        from scipy.ndimage import measurements as m
        nin = len(pl.where(mask == 1)[0])
        nout = len(pl.where(mask == 2)[0])

        floor = pl.nanmin(im.data)
        if floor < 0: floor = 0
        raw = m.sum(im.data, mask, 1) - floor * nin

        #bg=m.mean(im.data,mask,2)
        #bgsig=m.standard_deviation(im.data,mask,2)

        #        from astropy.stats import sigma_clip
        #        clipped = sigma_clip(im.data,sig=3,iters=2)
        #        # http://astropy.readthedocs.org/en/latest/api/astropy.stats.sigma_clip.html#astropy.stats.sigma_clip
        #        # TODO what we really want is to sigma-clip only the BG array/mask
        #        # because including the source will probably just be domimated by the
        #        # source...
        #        bg   =m.mean(              clipped,mask,2)-floor
        #        bgsig=m.standard_deviation(clipped,mask,2)

        # sigma_clip doesn't handle nans
        from scipy import stats

        def mymode(x):
            return stats.mode(x, axis=None)[0][0]

#        pdb.set_trace()
#        xx=stats.mode(im.data,axis=None)
#        print xx

        bg = ndimage.labeled_comprehension(im.data, mask, 2, mymode, "float",
                                           0) - floor
        #        bg = ndimage.labeled_comprehension(im.data,mask,2,pl.mean,"float",0)
        bgsig = m.standard_deviation(im.data, mask, 2)

        # assume uncert dominated by BG level.
        # TODO add sqrt(cts in source) Poisson - need gain or explicit err/pix
        uncert = bgsig * nin / pl.sqrt(nout)

        results = raw, bg, raw - bg * nin, uncert

        f = self.photfactor(im)
        if f:
            if self.debug: print "phot factor = ", f
            results = pl.array(results) * f

        if self.debug:
            #            print "max=", m.maximum(im.data,mask,1), m.maximum(im.data,mask,2)
            #            print "nin,nout=",nin,nout
            print "raw, bg, bgsubbed, uncert=", results
            pdb.set_trace()

        return results
コード例 #30
0
 def extract(self):
     Features.extract(self)
     self.X = P.sqrt((P.diff(self.X)**2).sum(0)) / self.X.shape[0]
コード例 #31
0
from __future__ import division, print_function
import matplotlib.pylab as P
import torch as T
from curvfife import CurvFiFE
from tqdm import trange, tqdm
import hickle
from scipy.optimize import fmin_l_bfgs_b as BFGS_min
norm_cdf = lambda x: (1 + T.erf(x / P.sqrt(2))) / 2
norm_ppf = lambda x: P.sqrt(2) * T.erfinv(2 * T.clamp(x, 1e-9, 1 - 1e-9) - 1)

from datetime import datetime
import os
import errno


def to_tens(a, dims=None):
    """Converts to pytorch tensor with `dims` dimensions if not already"""
    if not (type(a) == T.Tensor):
        a = T.tensor(a, dtype=T.float64)
    if dims is None:
        d = T.tensor(0.0, dtype=T.float64)
    else:
        d = T.zeros((1, ) * dims, dtype=T.float64)
    return a.double() + d


def trapz(y, x):
    y_avg = (y[1:, :] + y[:-1, :]) / 2
    dx = x[1:] - x[:-1]
    return dx.matmul(y_avg)
コード例 #32
0
ファイル: visualize_model.py プロジェクト: athuls/gsra
def main(argv):
  #if len(argv) < 4:
  #  print """Usage:
  #  """
  #  sys.exit(1)
  #nlayers = int(argv[1])

  # get number of layers
  f = open(FLAGS.dir+"weights/nlayers.txt")
  nlayers = int(f.readlines()[0])
  f.close()
  print str(nlayers) + " layer(s) detected."

  pylab.figure(figsize=(32, 24), dpi=320, facecolor='w', edgecolor='k')

  # Make a picture with the filters
  [n_inputs, n_neurons, weights] = visualizing.weights_filename_to_array(FLAGS.dir+"weights/W0.txt")

  image_side_length = int(pylab.sqrt(n_inputs))
  nsubplot = pylab.ceil(pylab.sqrt(n_neurons))
  for i in range(n_neurons):
    filter = pylab.resize(weights[i], [image_side_length, image_side_length])
    pylab.subplot(nsubplot, nsubplot, i+1)
    pylab.imshow(filter, interpolation='nearest')
    pylab.gray()

  pylab.savefig(FLAGS.dir + "filters" + ".png")
  pylab.clf()

  # Make a picture with the all weights
  nsubplot_vertical = nlayers+1
  nsubplot_horizontal = 4
  for i in range(nlayers):
    # V
    location = 1+(nlayers-i)*nsubplot_horizontal+0
    filename = FLAGS.dir+"weights/V" + str(i) + ".txt"
    visualizing.plot_weight_matrix(nsubplot_vertical, nsubplot_horizontal, location,
                       filename, True, "V"+str(i))

    # W
    location = 1+(nlayers-i)*nsubplot_horizontal+1
    filename = FLAGS.dir+"weights/W" + str(i) + ".txt"
    visualizing.plot_weight_matrix(nsubplot_vertical, nsubplot_horizontal,
                                   location, filename, False, "W"+str(i))

    # F
    location = 1+(nlayers-i)*nsubplot_horizontal+2
    filename = FLAGS.dir+"weights/F" + str(i) + ".txt"
    visualizing.plot_weight_matrix(nsubplot_vertical, nsubplot_horizontal,
                                   location, filename, False, "F"+str(i))

    # G
    location = 1+(nlayers-i)*nsubplot_horizontal+3
    filename = FLAGS.dir+"weights/G" + str(i) + ".txt"
    visualizing.plot_weight_matrix(nsubplot_vertical, nsubplot_horizontal,
                                   location, filename, True, "G"+str(i))

  # Last layer
  location = 1+1
  filename = FLAGS.dir+"weights/W" + str(nlayers) + ".txt"
  visualizing.plot_weight_matrix(nsubplot_vertical, nsubplot_horizontal,
                                 location, filename, False, "W"+str(nlayers))

  pylab.savefig(FLAGS.dir + "weights" + ".png")

  # Make pictures with examples and representations
  visualizing.visualize_representations(FLAGS.dir, nlayers)
コード例 #33
0
import matplotlib.pylab as plt
import pandas
from matplotlib.pylab import sqrt, boxplot
import math
import numpy as np
from processing import *
from datetime import datetime


fig_width_pt = 496.0  # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0/72.27               # Convert pt to inch
golden_mean = (sqrt(5)-1.0)/2.0         # Aesthetic ratio
fig_width = fig_width_pt*inches_per_pt  # width in inches
fig_height = fig_width*golden_mean      # height in inches
fig_size =  [fig_width,fig_height]
params = {'backend': 'ps',
          'axes.labelsize': 10,
          'font.size': 10,
          'xtick.labelsize': 8,
          'ytick.labelsize': 8,
          'text.usetex': False,
          'figure.figsize': fig_size}
plt.rcParams.update(params)
# Generate data

#minidolar
# dataframe = pandas.read_csv('minidolar/wdo.csv', sep = '|',  engine='python', decimal='.',header=0)
#
# series = pandas.Series(dataframe['fechamento'].values, index=dataframe['ts'])
# y = np.array(dataframe['fechamento'].tolist())
# # Plot data
コード例 #34
0
    M[0, 1] = 0
    M[-1, -2] = 0
    Mi = P.pinv(M)
    smooth_X = (lambda_s / N) * Mi.dot(X.T).T
    return smooth_X


print "Loading Audio..."
# y1, fs = librosa.load("r1.wav")
# y2, fs = librosa.load("r2.wav")
# y1, fs = librosa.load("test_anh.wav")
# y2, fs = librosa.load("test_ajay.wav")
y1, fs = sf.read("anh_2.wav")
y2, fs = sf.read("ajay2.wav")
# Add some simple padding
i1 = P.argmax(y1 > P.sqrt((y1**2).mean()) / 3)
i2 = P.argmax(y2 > P.sqrt((y2**2).mean()) / 3)
I = max(i1, i2) * 2
z1 = y1[i1 // 5:(i1 // 5) * 2]
y1 = P.hstack([z1] * ((I - i1) // len(z1)) + [z1[:((I - i1) % len(z1))]] +
              [y1])
z2 = y2[i2 // 5:(i2 // 5) * 2]
y2 = P.hstack([z2] * ((I - i2) // len(z2)) + [z2[:((I - i2) % len(z2))]] +
              [y2])
# y1 = P.concatenate([P.zeros(I - i1), y1])
# y2 = P.concatenate([P.zeros(I - i2), y2])
print("Setting padding to {0:.2f} s".format(I / fs))
# manually downsample by factor of 2
fs = fs // 2
y1 = decimate(y1, 2, zero_phase=True)
y2 = decimate(y2, 2, zero_phase=True)