Ejemplo n.º 1
0
def emfTitration(ax, massAcid, emf, massSample, concAcid, alk_emf0, alkGuess,
                 rgb, sublabel):
    """EMF change as acid is added throughout a titration."""
    ax.axvline(1e3 * alk_emf0['x'][0] * massSample / concAcid,
               color=_rgb_final,
               linestyle='--',
               zorder=1)
    ax.axvline(1e3 * alkGuess * massSample / concAcid,
               color=_rgb_guess,
               linestyle='--',
               zorder=1)
    ax.scatter(massAcid * 1e3,
               emf,
               c=rgb,
               edgecolors='k',
               clip_on=False,
               zorder=2)
    ax.set_xlim([0, np_max(massAcid) * 1e3])
    yrange = np_max(emf) - np_min(emf)
    ax.set_ylim([np_min(emf) - yrange * 0.05, np_max(emf) + yrange * 0.05])
    ax.set_xlabel('Acid mass / g')
    ax.set_ylabel('EMF / mV')
    ax.set_title('{} Final EMF$^\circ$ = {:.2f} mV'.format(
        sublabel, alk_emf0['x'][1]),
                 fontsize=10)
    return ax
Ejemplo n.º 2
0
    def update(self, abs_tol=1e-5, rel_tol=1e-3):
        '''
        during the update step, you calculate the gradient of Q
        and update w and b. 
        '''
        # accumulate the decay rates, in order to correct the averages
        self.beta_m_ac *= self.beta_m_ac

        #The update should run after
        #FFANN.feedForward() and FFANN.backPropagation().

        #these will be used to determine if the stopping conditions are satisfied
        _w2 = 0
        _check = 0
        self.Q.randomDataPoint()

        for l in range(self.Q.model.total_layers - 1):
            for j in range(self.Q.model.nodes[l + 1]):
                for i in range(self.Q.model.nodes[l]):
                    #get the grad of the loss. The results should be stored in loss.dQdw and loss.dQdb
                    #This way it should be easy to update the weights and biases of FFANN
                    self.Q.grad(l, j, i)

                    self.mw[l][j][i] = self.beta_m * self.mw[l][j][i] + (
                        1 - self.beta_m) * self.Q.dQdw
                    self.vw_max[l][j][i] = np_max([
                        self.beta_v * self.vw_max[l][j][i],
                        np_abs(self.Q.dQdw)
                    ])
                    dw = self.alpha / (self.vw_max[l][j][i] +
                                       self.epsilon) * self.mw[l][j][i] / (
                                           1 - self.beta_m_ac)

                    #update the weight using loss.dQdw
                    self.Q.model.addToWeight(l, j, i, -dw)

                    _w2 = abs_tol + np_abs(
                        self.Q.model.weights[l][j][i]) * rel_tol
                    _check += (dw / _w2) * (dw / _w2)

                #update the bias using loss.dQdb (it is the same for all i, so don't run loss.grad again).
                self.mb[l][j] = self.beta_m * self.mb[l][j] + (
                    1 - self.beta_m) * self.Q.dQdb
                self.vb_max[l][j] = np_max(
                    [self.beta_v * self.vb_max[l][j],
                     np_abs(self.Q.dQdb)])
                dw = self.alpha / (self.vb_max[l][j] + self.epsilon
                                   ) * self.mb[l][j] / (1 - self.beta_m_ac)

                self.Q.model.addToBias(l, j, -dw)

                _w2 = abs_tol + np_abs(self.Q.model.biases[l][j]) * rel_tol
                _check += (dw / _w2) * (dw / _w2)

        _check = np_sqrt(1. / self.Q.N * _check)
        return _check
Ejemplo n.º 3
0
def plot_optimized_network(network, blocking=True, save_plot=True):
    _title = 'Optimized network vs. non-optimized'
    figure(_title)

    graph = network.network_plot
    plot_layout = spring_layout(graph)

    balls = network.get_ball_distribution()

    print(network.ref_distribution)
    print(balls)

    min_val = 0
    max_val = max([np_max(network.ref_distribution), np_max(balls)])

    cmap = cm.Greys
    color_vals = cm.ScalarMappable(cmap=cmap,
                                   norm=Normalize(vmin=min_val, vmax=max_val))
    color_vals._A = []

    subplot(2, 1, 1)
    title('Initial network')
    colorbar(color_vals)

    draw_networkx_edges(graph, plot_layout, alpha=.3)
    draw_networkx_nodes(graph,
                        plot_layout,
                        node_size=100,
                        edgecolors='k',
                        node_color=network.ref_distribution,
                        cmap=cmap,
                        vmin=min_val,
                        vmax=max_val)
    axis('off')  # Disable axis

    subplot(2, 1, 2)
    title('Optimized network')
    colorbar(color_vals)

    draw_networkx_edges(graph, plot_layout, alpha=.3)
    draw_networkx_nodes(graph,
                        plot_layout,
                        node_size=100,
                        edgecolors='k',
                        node_color=balls,
                        cmap=cmap,
                        vmin=min_val,
                        vmax=max_val)
    axis('off')
    draw()

    if save_plot:
        savefig('../results/optimized_network.png')
    show(block=blocking)  # Open matplotlib window
Ejemplo n.º 4
0
def count_seq(x):

    stop = np_nonzero(np_diff(np_hstack((x, 0))) == -1)
    start = np_nonzero(np_diff(np_hstack((0, x))) == 1)

    ld = {}
    if len(start[0]):
        d = stop[0] - start[0] + 1
        for ll in xrange(np_max((np_min(d), 2)), np_max(d) + 1):
            ld[str(ll)] = np_sum((d == ll).astype(int))

    return ld
Ejemplo n.º 5
0
def guessGran(massAcid, emf, tempK, massSample, concAcid):
    """Simple Gran plot first guesses for alkalinity and EMF0."""
    f1Guess = f1(massAcid, emf, tempK, massSample)
    LGuess = logical_and(
        f1Guess > 0.1*np_max(f1Guess),
        f1Guess < 0.9*np_max(f1Guess),
    )
    alkGuess = granAlkGuess(massAcid[LGuess], f1Guess[LGuess], massSample,
        concAcid)
    emf0Guess = mean(granEmf0Guess(massAcid[LGuess], emf[LGuess],
        tempK[LGuess], massSample, concAcid, alkGuess))
    hGuess = emf2h(emf, emf0Guess, tempK)
    pHGuess = -log10(hGuess)
    return alkGuess, emf0Guess, hGuess, pHGuess
 def quality(data):
     _z_ = logreg_predict(data)[0]
     sample = X[ y==_z_ ]
     sample_ = X[ y==(1-_z_) ]
     d = np_linalg_norm( [data] - sample )
     d_ = np_linalg_norm( [data] - sample_ )
     r = np_max( d_ )/np_max( d )
     # r = np_mean( d )/np_mean( d_ )
     # r = np_min( d )/np_min( d_ )
     # r = 0.5 * ( r + recall )
     if r > 1:
         r = abs( 1-r )
     r = 0.5 * ( r + recall )
     return str( r )
Ejemplo n.º 7
0
def get_arrows_plt(mesh_pv, field, meshsol, factor, is_point_arrow, phase=1):
    """Create a pyvista arrow plot

    Parameters
    ----------
    mesh_pv : UnstructuredGrid
        a pyvista mesh object
    field : ndarray
        a vector field to plot as glyph
    meshsol : MeshSolution
        a MeshSolution object
    factor : float
        an amplitude factor for the glyph plot
    is_point_arrow : bool
        True to plot arrows on the nodes
    phase : complex
        a phase shift to apply on the plot

    Returns
    -------
    arrows_plt : PolyData
        a pyvista object to plot glyph
    factor : float
        an amplitude factor for the plot glyph
    """

    vect_field = real(field * phase)

    # Compute factor
    if factor is None:
        factor = 0.2 * np_max(np_abs(mesh_pv.bounds)) / np_max(np_abs(vect_field))

    # Add third dimension if needed
    solution = meshsol.get_solution()
    if solution.dimension == 2:
        vect_field = hstack((vect_field, zeros((vect_field.shape[0], 1))))

    # Add field to mesh
    if is_point_arrow:
        mesh_pv.vectors = vect_field * factor
        arrows_plt = mesh_pv.arrows
    else:
        mesh_pv["field"] = vect_field
        mesh_cell = mesh_pv.point_data_to_cell_data()
        surf = mesh_cell.extract_geometry()
        centers2 = surf.cell_centers()
        centers2.vectors = surf["field"] * factor
        arrows_plt = centers2.arrows

    return arrows_plt, factor
Ejemplo n.º 8
0
def prep(datFile,
         volSample,
         pSal,
         totalCarbonate,
         totalPhosphate,
         totalSilicate,
         concAcid,
         WhichKs=10,
         WhoseKSO4=1,
         WhoseKF=1,
         WhoseTB=2,
         totalAmmonia=0,
         totalH2Sulfide=0):
    """Preparatory calculations for plotting."""
    massAcid, emf, tempK, massSample, concTotals, eqConstants = \
        datfile.prep(datFile, volSample, pSal, totalCarbonate, totalPhosphate,
        totalSilicate, WhichKs=WhichKs, WhoseKSO4=WhoseKSO4, WhoseKF=WhoseKF,
        WhoseTB=WhoseTB, totalAmmonia=totalAmmonia,
        totalH2Sulfide=totalH2Sulfide)
    f1Guess = solve.f1(massAcid, emf, tempK, massSample)
    LGuess = logical_and(
        f1Guess > 0.1 * np_max(f1Guess),
        f1Guess < 0.9 * np_max(f1Guess),
    )
    alkGuess, emf0Guess, _, pHGuess = solve.guessGran(massAcid, emf, tempK,
                                                      massSample, concAcid)
    granEmf0 = solve.granEmf0Guess(massAcid[LGuess], emf[LGuess],
                                   tempK[LGuess], massSample, concAcid,
                                   alkGuess)
    L = logical_and(pHGuess > 3, pHGuess < 4)
    alk_emf0 = solve.complete(massAcid, emf, tempK, massSample, concAcid,
                              concTotals, eqConstants)
    h = solve.emf2h(emf, alk_emf0['x'][1], tempK)
    mu = solve.mu(massAcid, massSample)
    alkSim = simulate.alk(h, mu, concTotals, eqConstants)
    alk0Sim = (alkSim[0] + massAcid * concAcid / (massAcid + massSample)) / mu
    RMS = sqrt(mean(alk_emf0['fun']**2))
    Npts = len(alk_emf0['fun'])
    rgb = zeros((len(emf), 3))
    for i in range(len(rgb)):
        if LGuess[i] and L[i]:
            rgb[i] = _rgb_both
        elif LGuess[i] and not L[i]:
            rgb[i] = _rgb_guess
        elif L[i] and not LGuess[i]:
            rgb[i] = _rgb_final
        else:
            rgb[i] = array([1, 1, 1])
    return (massAcid, emf, massSample, f1Guess, LGuess, alkGuess, emf0Guess,
            granEmf0, alk_emf0, alkSim, alk0Sim, RMS, Npts, rgb)
Ejemplo n.º 9
0
def mmr_geometricmedian(X):
  (m,n)=X.shape
  u=mean(X,axis=0)
  niter=1000
  xeps=sqrt(np_sum(u**2))/1000
  xerr=2*xeps
  for i in range(niter):
    d2u=sqrt(np_sum((X-tile(u,(m,1)))**2,axis=1))
    inul=where(d2u<xeps)[0]
    d2u[inul]=xeps
    unext=np_sum(X/tile(d2u.reshape((m,1)),(1,n)),axis=0)/np_sum(ones(m)/d2u)
    if np_max(unext-u)<xerr:
      break
    u=copy(unext)
  return(unext,i,np_max(unext-u))
Ejemplo n.º 10
0
def comp_point_ref(self, is_set=False):
    """Compute the point ref of the Surface

    Parameters
    ----------
    self : SurfLine
        A SurfLine object
    is_set: bool
        True to update the point_ref property

    Returns
    -------
    point_ref : complex
        the reference point of the surface
    """

    middle_array = array([line.get_middle() for line in self.get_lines()])
    point_ref = sum(middle_array) / middle_array.size

    # Use another method if the point is not is the surface
    if not self.is_inside(Z=point_ref, if_online=False):
        middle_array_abs = abs(middle_array)
        # Find "min abs" middle
        mid_id = argmin(middle_array_abs)
        Zmid = middle_array[mid_id]
        H = np_max(middle_array_abs) - np_min(middle_array_abs)

        point_ref = (abs(Zmid) + H / 100) * exp(1j * angle(Zmid))

    if is_set:
        self.point_ref = point_ref
    return point_ref
Ejemplo n.º 11
0
    def update(self, abs_tol=1e-5, rel_tol=1e-3):
        '''
        update should return a number that when it is smaller than 1
        the main loop stops. Here I choose this number to be:
        sqrt(1/dim*sum_{i=0}^{dim}(grad/(abs_tol+x*rel_tol))_i^2)
        '''

        # accumulate the decay rates, in order to correct the averages
        self.beta_m_ac *= self.beta_m_ac

        _w2 = 0
        _check = 0

        self.Q.averageGrad()

        for i in range(self.dim):

            self.mE[i] = self.beta_m * self.mE[i] + (
                1 - self.beta_m) * self.Q.grad[i]
            self.v_max[i] = np_max(
                [self.beta_v * self.v_max[i],
                 np_abs(self.Q.grad[i])])
            dw = self.alpha / (self.v_max[i] + self.epsilon) * self.mE[i] / (
                1 - self.beta_m_ac)

            self.Q.model.w[i] = self.Q.model.w[i] - dw

            _w2 = abs_tol + np_abs(self.Q.model.w[i]) * rel_tol
            _check += (dw / _w2) * (dw / _w2)

            self.Q.grad[i] = 0

        _check = np_sqrt(1. / self.dim * _check)

        return _check
Ejemplo n.º 12
0
def get_field(self, axes_list):
    """Returns the values of the field (with symmetries and sums).
    Parameters
    ----------
    self: Data
        a Data object
    axes_list: list
        a list of RequestedAxis objects
    Returns
    -------
    values: ndarray
        values of the field
    """

    values = self.values
    for axis_requested in axes_list:
        # Rebuild symmetries when needed
        axis_symmetries = self.axes[axis_requested.index].symmetries
        if (
            axis_requested.transform == "fft"
            and axis_requested.is_pattern
            or axis_requested.extension in ["sum", "rss", "mean", "rms", "integrate"]
            and axis_requested.is_pattern
        ):
            values = take(values, axis_requested.rebuild_indices, axis_requested.index)
        elif axis_requested.transform == "fft" and "antiperiod" in axis_symmetries:
            nper = axis_symmetries["antiperiod"]
            axis_symmetries["antiperiod"] = 2
            values = rebuild_symmetries(values, axis_requested.index, axis_symmetries)
            axis_symmetries["antiperiod"] = nper
        elif axis_requested.indices is not None:
            if (
                axis_requested.extension in ["sum", "rss", "mean", "rms", "integrate"]
                or max(axis_requested.indices) > values.shape[axis_requested.index]
            ):
                values = rebuild_symmetries(
                    values, axis_requested.index, axis_symmetries
                )
                self.axes[axis_requested.index].symmetries = dict()

        # sum over sum axes
        if axis_requested.extension == "sum":
            values = np_sum(values, axis=axis_requested.index, keepdims=True)
        # root sum square over rss axes
        elif axis_requested.extension == "rss":
            values = sqrt(np_sum(values ** 2, axis=axis_requested.index, keepdims=True))
        # mean value over mean axes
        elif axis_requested.extension == "mean":
            values = np_mean(values, axis=axis_requested.index, keepdims=True)
        # RMS over rms axes
        elif axis_requested.extension == "rms":
            values = sqrt(
                np_mean(values ** 2, axis=axis_requested.index, keepdims=True)
            )
        # integration over integration axes
        elif axis_requested.extension == "integrate":
            values = trapz(
                values, x=axis_requested.values, axis=axis_requested.index
            ) / (np_max(axis_requested.values) - np_min(axis_requested.values))
    return values
def mmr_geometricmedian_ker(K):
    m = K.shape[0]
    Ka = mean(K, axis=1)
    niter = 1000
    xeps = sqrt(np_sum(Ka**2)) / 1000
    xerr = 2 * xeps
    Q = linalg.qr(K)[0]
    Ra = dot(Q.T, Ka)
    aKa = dot(Ra, Ra)
    for i in range(niter):
        d2u = sqrt((zeros(m) + aKa) + diag(K) - 2 * Ka)
        inul = where(d2u < xeps)[0]
        d2u[inul] = xeps
        Kanext=np_sum(K/tile(d2u.reshape((m,1)),(1,m)),axis=0) \
               /np_sum(ones(m)/d2u)

        du = np_sum((K - tile(Ka, (m, 1))).T / tile(d2u, (m, 1)), axis=1)
        print(sqrt(np_sum(du**2)))
        if np_max(Kanext - Ka) < xerr:
            Ka = copy(Kanext)
            Ra = dot(Q.T, Ka)
            aKa = dot(Ra, Ra)
            break
        Ka = copy(Kanext)
        Ra = dot(Q.T, Ka)
        aKa = dot(Ra, Ra)

    return (Ka, aKa)
Ejemplo n.º 14
0
 def get_fit(self,
             fit_type,
             data,
             order,
             smooth,
             degree,
             begin,
             end,
             weight=None):
     z1 = np_zeros(begin)
     z2 = np_zeros(len(data[0]) - end)
     if end == 0:
         end = None
     x = data[0][begin:end]
     y = data[1][begin:end]
     if weight is not None:
         weight = weight[begin:end]
     if fit_type == "spline":
         f = inter.UnivariateSpline(x, y, w=weight, k=order, s=smooth)
     else:
         f = poly.Chebyshev.fit(x, y, degree, w=weight)
     res = y - f(x)
     nfit = f(x) / np_max(f(x))
     corr = concatenate((z1, nfit, z2))
     fitc = [x, f(x), corr, res]
     return fitc
Ejemplo n.º 15
0
 def maximum_spread(self,
                    pareto_front=None,
                    reference_front=None):  ## MS function
     """ It addresses the range of objective function values and takes into account the proximity to the true Pareto front"""
     pareto_front, reference_front = self.get_pareto_front_reference_front(
         pareto_front, reference_front)
     n_objs = reference_front.shape[1]
     pf_max = np_max(pareto_front, axis=0)
     pf_min = np_min(pareto_front, axis=0)
     rf_max = np_max(reference_front, axis=0)
     rf_min = np_min(reference_front, axis=0)
     ms = 0
     for i in range(0, n_objs):
         ms += ((min(pf_max[i], rf_max[i]) - max(pf_min[i], rf_min[i])) /
                (rf_max[i] - rf_min[i]))**2
     return sqrt(ms / n_objs)
Ejemplo n.º 16
0
def plot_animated(self, i):
    """Return the pyvista mesh object.

    Parameters
    ----------
    self : Mode
        a Mode object
    i : int
        index of the mode to plot

    Returns
    -------
    mesh : pyvista.core.pointset.StructuredGrid
        a pyvista StructuredGrid object
    """

    radial_shape = self.get_shape_pol()[:, 0]
    shape_xyz = self.get_shape_xyz()
    clim = [np_min(radial_shape), np_max(radial_shape)]
    self.parent.mesh.plot_deformation_animated(
        shape_xyz,
        radial_shape,
        factor=0.05,
        field_name="Radial displacement",
        clim=clim,
    )
Ejemplo n.º 17
0
def mmr_geometricmedian_ker(K):
  m=K.shape[0]
  Ka=mean(K,axis=1)
  aKa=np_sum(Ka)/m

  niter=1000
  xeps=sqrt(np_sum(Ka**2))/100
  xerr=2*xeps

  e1=ones(m)

  for iiter in range(niter):
    ## d2u=sqrt((zeros(m)+aKa)+diag(K)-2*Ka)
    d2u_2=aKa+diag(K)-2*Ka
    ineg=where(d2u_2<0)[0]
    d2u_2[ineg]=0.0
    d2u=sqrt(d2u_2)

    inul=where(d2u<xeps)[0]
    d2u[inul]=xeps
    xdenom=np_sum(e1/d2u)
    Kanext=np_sum(K/outer(d2u,e1),axis=0)/xdenom 
    aKanext=np_sum(Ka/d2u)/xdenom
    if np_max(Kanext-Ka)<xerr:
      Ka=copy(Kanext)
      aKa=aKanext
      break
    Ka=copy(Kanext)
    aKa=aKanext
    
  return(Ka,aKa)
Ejemplo n.º 18
0
def components(ax, massAcid, alk0Sim, alkSim, sublabel):
    """Every component of alkalinity throughout a titration."""
    ax.plot(massAcid * 1e3,
            -log10(alk0Sim),
            label='Total alk.',
            marker='o',
            markersize=3,
            c='k',
            clip_on=False)
    for component in alkSim[1].keys():
        if component.startswith('-'):  # this is a bit sketchy
            yVar = -alkSim[1][component]
        else:
            yVar = alkSim[1][component]
        if np_any(yVar > 0):
            ax.plot(massAcid * 1e3,
                    -log10(yVar),
                    label=rgbs_names[component][1],
                    marker='x',
                    markersize=3,
                    c=rgbs_names[component][0],
                    clip_on=False)
    ax.set_xlim([0, np_max(massAcid) * 1e3])
    ax.set_ylim(ax.get_ylim()[::-1])
    ax.legend(bbox_to_anchor=(1.05, 1))
    ax.set_xlabel('Acid mass / g')
    ax.set_ylabel('$-$log$_{10}$(concentration from pH / mol$\cdot$kg$^{-1}$)')
    ax.set_title(sublabel, fontsize=10)
    return ax
def mmr_geometricmedian_ker(K):
  m=K.shape[0]
  Ka=mean(K,axis=1)
  aKa=np_sum(Ka)/m

  niter=1000
  xeps=sqrt(np_sum(Ka**2))/100
  xerr=2*xeps

  e1=ones(m)

  for iiter in range(niter):
    ## d2u=sqrt((zeros(m)+aKa)+diag(K)-2*Ka)
    d2u_2=aKa+diag(K)-2*Ka
    ineg=where(d2u_2<0)[0]
    d2u_2[ineg]=0.0
    d2u=sqrt(d2u_2)

    inul=where(d2u<xeps)[0]
    d2u[inul]=xeps
    xdenom=np_sum(e1/d2u)
    Kanext=np_sum(K/outer(d2u,e1),axis=0)/xdenom 
    aKanext=np_sum(Ka/d2u)/xdenom
    if np_max(Kanext-Ka)<xerr:
      Ka=copy(Kanext)
      aKa=aKanext
      break
    Ka=copy(Kanext)
    aKa=aKanext
    
  return(Ka,aKa)
Ejemplo n.º 20
0
    def train(self):
        pop = [self.create_solution() for _ in range(self.pop_size)]
        v_max = 0.5 * (self.ub - self.lb)
        v_min = zeros(self.problem_size)
        v_list = uniform(v_min, v_max, (self.pop_size, self.problem_size))
        pop_local = deepcopy(pop)
        g_best = self.get_global_best_solution(pop=pop,
                                               id_fit=self.ID_FIT,
                                               id_best=self.ID_MIN_PROB)

        N_CLS = int(self.pop_size / 5)  # Number of chaotic local searches
        for epoch in range(self.epoch):
            r = rand()

            list_fits = [item[self.ID_FIT] for item in pop]
            fit_avg = mean(list_fits)
            fit_min = np_min(list_fits)
            for i in range(self.pop_size):
                w = self.__get_weights__(pop[i][self.ID_FIT], fit_avg, fit_min)
                v_new = w * v_list[i] + self.c1 * rand() * (pop_local[i][self.ID_POS] - pop[i][self.ID_POS]) + \
                        self.c2 * rand() * (g_best[self.ID_POS] - pop[i][self.ID_POS])
                x_new = pop[i][self.ID_POS] + v_new
                x_new = self.amend_position_random_faster(x_new)
                fit_new = self.get_fitness_position(x_new)
                pop[i] = [x_new, fit_new]
                # Update current position, current velocity and compare with past position, past fitness (local best)
                if fit_new < pop_local[i][self.ID_FIT]:
                    pop_local[i] = [x_new, fit_new]

            g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB,
                                                      g_best)

            ## Implement chaostic local search for the best solution
            cx_best_0 = (g_best[self.ID_POS] - self.lb) / (self.ub - self.lb
                                                           )  # Eq. 7
            cx_best_1 = 4 * cx_best_0 * (1 - cx_best_0)  # Eq. 6
            x_best = self.lb + cx_best_1 * (self.ub - self.lb)  # Eq. 8
            fit_best = self.get_fitness_position(x_best)
            if fit_best < g_best[self.ID_FIT]:
                g_best = [x_best, fit_best]

            bound_min = stack(
                [self.lb, g_best[self.ID_POS] - r * (self.ub - self.lb)])
            self.lb = np_max(bound_min, axis=0)
            bound_max = stack(
                [self.ub, g_best[self.ID_POS] + r * (self.ub - self.lb)])
            self.ub = np_min(bound_max, axis=0)

            pop_new_child = [
                self.create_solution() for _ in range(self.pop_size - N_CLS)
            ]
            pop_new = sorted(pop, key=lambda item: item[self.ID_FIT])
            pop = pop_new[:N_CLS] + pop_new_child

            self.loss_train.append(g_best[self.ID_FIT])
            if self.verbose:
                print(">Epoch: {}, Best fit: {}".format(
                    epoch + 1, g_best[self.ID_FIT]))
        self.solution = g_best
        return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
Ejemplo n.º 21
0
def alkComponents(titrationPotentiometric, ax=None):
    t = titrationPotentiometric
    assert 'alkSteps' in vars(t), \
        'You must first run `titrationPotentiometric.get_alkSteps().`'
    # Get the 'used' values
    solver = 'complete'  # only valid option for now
    usedMin = np_min(t.volAcid[t.solvedWith[solver]])
    usedMax = np_max(t.volAcid[t.solvedWith[solver]])
    # Draw the plot
    ax = _checksetax(ax)
    ax.plot(t.volAcid,
            -log10(t.alkSteps),
            label='Total alk.',
            marker='o',
            markersize=_markersize,
            c='k',
            alpha=_alpha)
    for component, conc in t.alkComponents.items():
        if np_any(conc != 0):
            ax.plot(t.volAcid, -log10(np_abs(conc)), **rgbs[component])
    ax.add_patch(
        patches.Rectangle((usedMin, ax.get_ylim()[1]),
                          usedMax - usedMin,
                          ax.get_ylim()[0] - ax.get_ylim()[1],
                          facecolor=0.9 * ones(3)))
    ax.invert_yaxis()
    ax.legend(bbox_to_anchor=(1.05, 1), edgecolor='k')
    ax.set_xlabel('Acid volume / ml')
    ax.set_ylabel('$-$log$_{10}$(concentration from pH / mol$\cdot$kg$^{-1}$)')
    return ax
Ejemplo n.º 22
0
    def statistic(float_set):
        """
        This function calculates standard statistical metrics over a list of floats.
        :param float_set: list of float
        :return: Dictionary of float and list of float
                 Statistic of the set of instances {mean:, median:, min:, max:, sample_variance:, samples:}
        """
        float_set_size = len(float_set)

        # Calculate the sample mean
        factor = 1 / float_set_size
        sample_mean = factor * np_sum(float_set)

        # Calculate the standard deviation
        factor = 1 / (float_set_size - 1)
        sample_variance = sqrt(factor * np_sum((float_set - sample_mean)**2))

        minimum = np_min(float_set)
        maximum = np_max(float_set)

        median_dist = median(float_set)

        return {
            'mean': sample_mean,
            'median': median_dist,
            'min': minimum,
            'max': maximum,
            'sv': sample_variance,
            'samples': float_set
        }
Ejemplo n.º 23
0
def solve_FEMM(self, output, sym, FEMM_dict):

    # Loading parameters for readibility
    angle = output.mag.angle
    qs = output.simu.machine.stator.winding.qs  # Winding phase number
    Npcpp = output.simu.machine.stator.winding.Npcpp
    L1 = output.simu.machine.stator.comp_length()
    Nt_tot = output.mag.Nt_tot  # Number of time step
    Na_tot = output.mag.Na_tot  # Number of angular step

    # Create the mesh
    femm.mi_createmesh()

    # Initialize results matrix
    Br = zeros((Nt_tot, Na_tot))
    Bt = zeros((Nt_tot, Na_tot))
    Tem = zeros((Nt_tot, 1))
    Phi_wind_stator = zeros((Nt_tot, qs))

    # Compute the data for each time step
    for ii in range(Nt_tot):
        # Update rotor position and currents
        update_FEMM_simulation(
            output,
            FEMM_dict["materials"],
            FEMM_dict["circuits"],
            self.is_mmfs,
            self.is_mmfr,
            j_t0=ii,
        )
        # Run the computation
        femm.mi_analyze()
        femm.mi_loadsolution()
        # Get the flux result
        for jj in range(Na_tot):
            Br[ii, jj], Bt[ii, jj] = femm.mo_getgapb("bc_ag2",
                                                     angle[jj] * 180 / pi)
        # Compute the torque
        Tem[ii] = comp_FEMM_torque(FEMM_dict, sym=sym)
        # Phi_wind computation
        Phi_wind_stator[ii, :] = comp_FEMM_Phi_wind(qs,
                                                    Npcpp,
                                                    is_stator=True,
                                                    Lfemm=FEMM_dict["Lfemm"],
                                                    L1=L1,
                                                    sym=sym)

    # Store the results
    output.mag.Br = Br
    output.mag.Bt = Bt
    output.mag.Tem = Tem
    output.mag.Tem_av = mean(Tem)
    if output.mag.Tem_av != 0:
        output.mag.Tem_rip = abs(
            (np_max(Tem) - np_min(Tem)) / output.mag.Tem_av)
    output.mag.Phi_wind_stator = Phi_wind_stator

    # Electromotive forces computation (update output)
    self.comp_emf()
Ejemplo n.º 24
0
def halfGran(massAcid, emf, tempK, massSample, concAcid, concTotals,
        eqConstants, pHRange=[3., 4.], suppressWarnings=False):
    """Solve for alkalinity and EMF0 using the half-Gran method [H15]."""
    xmu = mu(massAcid, massSample)
    granReps = int(20)
    stepAlk = full(granReps, nan)
    stepEmf0 = full(granReps, nan)
    granHSO4 = zeros(size(emf))
    granHF = zeros(size(emf))
    granG = full((granReps, size(emf)), nan)
    granH = full((granReps, size(emf)), nan)
    granEmf0 = full((granReps, size(emf)), nan)
    granPH = full((granReps, size(emf)), nan)
    converged = False
    for i in range(granReps):
        if i == 0:
            granG[i] = f1(massAcid, emf, tempK, massSample)
            LG = granG[i] > 0.1*np_max(granG[i])
        else:
            LG = logical_and(granPH[i-1] > pHRange[0],
                             granPH[i-1] < pHRange[1])
        stepAlk[i] = granAlkGuess(massAcid[LG], granG[i, LG], massSample,
            concAcid)
        PPC = 5e-3 # permitted % change in AT
        if i > 2:
            if np_abs(stepAlk[i] - stepAlk[i-1])/stepAlk[i] < PPC/100:
                converged = True
                break
        granEmf0[i, LG] = granEmf0Guess(massAcid[LG], emf[LG], tempK[LG],
            massSample, concAcid, stepAlk[i], granHSO4[LG], granHF[LG])
        stepEmf0[i] = nanmean(granEmf0[i])
        granH[i] = emf2h(emf, stepEmf0[i], tempK)
        granPH[i] = -log10(granH[i])
        granBicarb = xmu*concTotals['C']/(granH[i]/eqConstants['C1'] + 1)
        granHSO4 = xmu*concTotals['S']/(1 + eqConstants['S']/granH[i])
        granHF = xmu*concTotals['F']/(1 + eqConstants['F']/granH[i])
        granBorate = xmu*concTotals['B']/(1 + granH[i]/eqConstants['B'])
        granOH = eqConstants['w']/granH[i]
        granPP2 = (xmu*concTotals['P']*(1 -
            eqConstants['P1']*eqConstants['P2']/(granH[i]**2))
            / (1 + eqConstants['P1']/granH[i] +
            eqConstants['P2']*eqConstants['P3']/granH[i]**2 +
            eqConstants['P1']*eqConstants['P2']*eqConstants['P3']/granH[i]**3))
        if i < granReps-1:
            granG[i+1] = (granH[i] + granHSO4 + granHF - granBicarb
                - granOH - granBorate + granPP2)*(massSample + massAcid)
    if converged:
        finalAlk = stepAlk[~isnan(stepAlk)][-1]
        finalEmf0 = stepEmf0[~isnan(stepEmf0)][-1]
    else:
        if not suppressWarnings:
            print('Calkulate: half-Gran plot iterations did not converge!')
        finalAlk = nan
        finalEmf0 = nan
    optResult = {
        'x': [finalAlk, finalEmf0],
        'L': LG,
    }
    return optResult
Ejemplo n.º 25
0
def plot_hsm(ax, result, wavelengths):
    """
    Plotter of a HSM result
    ----------------------
    :param ax: ax object to edit
    :param result: result to plot
    :param wavelengths: wavelengths used to get result. Will become x-axis
    :return: None. Edits ax object
    """
    def lorentzian(params, x):
        """
        Lorentzian formula. Taken from SPectrA
        ----------------
        :param params: Parameters of lorentzian. Need to be four.
        :param x: x-axis. Wavelengths
        :return: array of values for current parameters and wavelengths
        """
        return params[0] + params[1] / ((x - params[2])**2 +
                                        (0.5 * params[3])**2)

    # scatter plot of actual results
    ax.scatter(wavelengths, result['intensity'])
    try:
        # try to fit and plot fit over
        wavelengths_ev = 1240 / linspace(
            np_min(wavelengths), np_max(wavelengths), num=50)
        hsm_fit = lorentzian(result['fit_parameters'], wavelengths_ev)
        ax.plot(1240 / wavelengths_ev, hsm_fit, 'r--')
    except:
        pass
    # set axis to same range every time
    ax.set_xlim(np_min(wavelengths) - 30, np_max(wavelengths) + 30)
    try:
        # add fit info if possible
        text = '\n'.join((r'SPR (nm)=%.1f' % (result['result'][0], ),
                          r'linewidth (meV)=%.1f' % (result['result'][1], ),
                          r'r^2=%.1f' % (result['result'][2], )))
        props = dict(boxstyle='round', facecolor='white', alpha=0.5)
        ax.text(0.05,
                0.95,
                text,
                transform=ax.transAxes,
                verticalalignment='top',
                bbox=props)
    except:
        pass
Ejemplo n.º 26
0
def norm(array):

    from numpy import nanmin as np_min
    from numpy import nanmax as np_max

    norm_array = (array - np_min(array)) / (np_max(array) - np_min(array))

    return norm_array
Ejemplo n.º 27
0
def log_datakeeper_step_result(simulation, datakeeper_list, index, simu_type):
    """Log the content of the datakeeper for the step index (if index=None, use reference)"""
    if simulation.layer == 2:
        msg = "    "
    else:
        msg = ""
    if simu_type is not None:
        msg += simu_type + " "
    if simulation.index is None:
        msg += "Reference "
    msg += "Results: "
    for datakeeper in datakeeper_list:
        if index is None:
            value = datakeeper.result_ref
        else:
            value = datakeeper.result[index]
        # Format log
        if isinstance(value, ndarray):
            msg += (datakeeper.symbol + "=array(min=" +
                    format(np_min(value), ".4g") + ",max=" +
                    format(np_max(value), ".4g") + ")")
        elif isinstance(value, list):
            msg += (datakeeper.symbol + "=list(min=" +
                    format(np_min(value), ".4g") + ",max=" +
                    format(np_max(value), ".4g") + ")")
        elif isinstance(value, Data) or isinstance(value, VectorField):
            msg += datakeeper.symbol + "=" + type(value).__name__
        elif value is None:
            pass
            # msg += datakeeper.symbol + "=None"
        else:
            msg += datakeeper.symbol + "=" + format(value, ".4g")
        if value is not None:
            if datakeeper.unit is not None:
                msg += " [" + datakeeper.unit + "], "
            else:
                msg += ", "
    msg = msg[:-2]

    # Get logger of the main simulation in parallel mode
    if simulation.logger_name[0:8] == "Parallel":
        log = getLogger(simulation.logger_name[9:])
    else:
        log = simulation.get_logger()
    log.info(msg)
Ejemplo n.º 28
0
def deduplicate_and_interpolate(x, y, **kwargs):
    """
    As interp1d has issues with duplicates, fix x and y so that it works
    """
    dedup_dict = defaultdict(list)
    for x_i, y_i in zip(x, y):
        dedup_dict[x_i].append(y_i)
    fixed_x, fixed_y = zip(*[(x_i, np_max(y_i))
                             for x_i, y_i in dedup_dict.items()])
    return interp1d(fixed_x, fixed_y, **kwargs)
Ejemplo n.º 29
0
    def add_curve(self, curve):
        """
        Add a curve to the bounding box.

        :param curve: Curve to add to bounding box.
        :type curve: :class:`.BezierCurve` or :class:`.NurbsCurve`
        """
        cp = curve.cp
        bmin = np_min(cp, axis=0)
        bmax = np_max(cp, axis=0)
        self.set_bounds(bmin, bmax)
Ejemplo n.º 30
0
def show_entropy(xyz, H):

    H_color = (H - np_min(H)) / (np_max(H) - np_min(H))
    fig = figure()
    ax = Axes3D(fig)
    xyz_b = xyz[where(H_color <= 0.25)[0]]
    xyz_r = xyz[where(H_color > 0.75)[0]]
    print(np_min(H), np_max(H), H[1], H_color[1], H_color, xyz_b.shape,
          xyz_r.shape)
    # xyz_g = xyz[where((H_color>0.25) and (H_color<=0.75))[0]]

    # ax.scatter(xyz[:,0],xyz[:,1],xyz[:,2], c=H_color)
    ax.scatter(xyz_b[:, 0], xyz_b[:, 1], xyz_b[:, 2], color='b')
    # ax.scatter(xyz_g[:,0],xyz_g[:,1],xyz_g[:,2], color='g')
    ax.scatter(xyz_r[:, 0], xyz_r[:, 1], xyz_r[:, 2], color='r')
    ax.set_xticks(arange(-1, 1, 0.2))
    ax.set_yticks(arange(-1, 1, 0.2))
    ax.set_zticks(arange(-1, 1, 0.2))
    grid()
    fig.show()
Ejemplo n.º 31
0
def f1Gran(ax, massAcid, massSample, concAcid, f1Guess, alkGuess, rgb,
           sublabel):
    """F1 Gran plot function for the first alkalinity estimate."""
    ax.axvline(1e3 * alkGuess * massSample / concAcid,
               color=_rgb_guess,
               linestyle='--',
               zorder=1)
    ax.scatter(massAcid * 1e3,
               f1Guess * 1e-7,
               c=rgb,
               edgecolors='k',
               clip_on=False,
               zorder=2)
    ax.set_xlim([0, np_max(massAcid) * 1e3])
    ax.set_ylim([0, np_max(f1Guess * 1.05e-7)])
    ax.set_xlabel('Acid mass / g')
    ax.set_ylabel('$F_1 \cdot 10^{-7}$')
    ax.set_title('{} First-guess alkalinity = {:.1f} μmol/kg'.format(
        sublabel, alkGuess * 1e6),
                 fontsize=10)
    return ax
Ejemplo n.º 32
0
def draw_concentric_contours(steps,
                             text,
                             textposition,
                             title,
                             filename,
                             plotly_data=[],
                             steps_mode='lines, text, markers',
                             debug_print=False):
    ply_data = []
    if plotly_data is not None: ply_data.extend(plotly_data)
    ams = []

    if steps is None and debug_print:
        print("No steps")
    elif debug_print:
        print("{}: steepest-descent: start at x0={}".format(title, x0))
        print("number of steps for Steepest Descent={}".format(steps.shape[1]))

    sef = True
    for k, ec in zip(ks, ecs):
        d, am, _ = qfc.level_k_ellipsoid(A,
                                         b,
                                         c,
                                         alpha,
                                         beta,
                                         k=k,
                                         ellipsoid_color=ec,
                                         show_eigenvectors=sef,
                                         debug_print=debug_print)
        sef = False
        ply_data.extend(d)
        ams.append(am)

    if steps is not None:
        steps_ply_data = Scatter_R2(steps[0],
                                    steps[1],
                                    color='rgb(0,0,140)',
                                    width=1.,
                                    mode=steps_mode,
                                    text=text,
                                    textposition=textposition,
                                    textfontsize=18,
                                    hoverinfo='x+y')
        ply_data.append(steps_ply_data)

    axes_max = np_max(np_abs(ams))
    plot_it_R2(ply_data,
               axes_max,
               title=title,
               filename=filename,
               buffer_scale=1.,
               buffer_fixed=.1)
    return
Ejemplo n.º 33
0
def plot_density(data, smooth = False):
    min = 0
    max = np_max(data)
    data = np.ravel(data)
    count = np.bincount(data, minlength=max + 1)
    nums = np.arange(min, max+1, 1)
    if smooth:
        nums_new = np.linspace(min, max, 1000)
        count_new = spline(nums, count, nums_new)

    fig = plt.figure()
    if smooth:
        plt.plot(nums_new, count_new)
    else:
        plt.plot(nums, count)
    plt.show()
Ejemplo n.º 34
0
 def makeColourProfile(self):
     """Make a colour profile based on ksig information"""
     working_data = np_array(self.kmerSigs, copy=True) 
     Center(working_data,verbose=0)
     p = PCA(working_data)
     components = p.pc()
     
     # now make the colour profile based on PC1
     self.kmerVals = np_array([float(i) for i in components[:,0]])
     
     # normalise to fit between 0 and 1
     self.kmerVals -= np_min(self.kmerVals)
     self.kmerVals /= np_max(self.kmerVals)
     if(False):
         plt.figure(1)
         plt.subplot(111)
         plt.plot(components[:,0], components[:,1], 'r.')
         plt.show()
Ejemplo n.º 35
0
    label = Base[0 : Base.find(".txt")].replace("_", " ")

    n_Components = len(Components)

    Wmin_vector = zeros(n_Components)
    Wmax_vector = zeros(n_Components)
    for j in range(len(Components)):
        Wavelength = pv.get_ColumnData(
            [0],
            Components_Folder + Components[j],
            HeaderSize=0,
            StringIndexes=False,
            comments_icon="#",
            unpack_check=True,
        )
        Wmin_vector[i], Wmax_vector[i] = np_min(Wavelength), np_max(Wavelength)

    Axis3D.bar3d(log10(Age), metallicity, Wmin_vector, ones(len(Age)), ones(len(metallicity)), Wmax_vector)

#     Axis3D.xaxis.set_scale('log')
#     Axis3D.xaxis.set_xlim(100000, 5e10)

plt.show()
# pv.DataPloter_One(Age, metallicity, label, pv.Color_Vector[2][i+2], LineStyle=None)


# Plot_Title      = 'Stellar bases used in SSP synthesis'
# Plot_ylabel     = 'Metallicity'
# Plot_xlabel     = 'Age ' + r'$(yr)$'
#
# pv.Labels_Legends_One(Plot_Title, Plot_xlabel, Plot_ylabel, LegendLocation = 'best')
def mmr_normalization(ilocal,iscale,XTrain,XTest,ipar):
## function to normalize the input and the output data
## !!!! the localization happens before normalization if both given !!! 
## input
##      ilocal centralization   
##                  =-1 no localization
##                  =0 mean
##                  =1 median
##                  =2 geometric median
##                  =3 shift by ipar
##                  =4 smallest enclosing ball
##                  =5 row mean row wise 
##      icenter
##                  =-1 no scaling
##                  =0 scale item wise by L2 norm
##                  =1 scale item wise by L1 norm
##                  =2 scale item wise by L_infty norm
##                  =3 scale items by stereographic projection relative to zero
##                  =4 scale variables by STD(standard deviation)
##                  =5 scale variables by MAD(median absolute deviation)
##                  =6 scale variables by absolute deviation
##                  =7 scale all variables by average STD 
##                  =8 scale all variables by maximum STD 
##                  =9 scale all variables by median MAD 
##                  =10 scale item wise by Minkowski norm, power given by ipar
##                  =11 \sum_i||u-x_i||/m where u=0
##                  =12 scale all variables by overall max
##                  =13 Mahalonobis scaling  
##      XTrain       Data matrix which will be normalized. It assumed the
##                   rows are the sample vetors and the columns are variables 
##      XTest        Data matrix which will be normalized. It assumed the
##                   rows are the sample vetors and the columns are
##                   variables.
##                   It herites the center and the scale in the variable wise ca##                   se from the XTrain,
##                   otherwise it is normalized independently  
##      ipar         additional parameter   
##  output
##      XTrain       Data matrix which is the result of the normalization
##                   of input XTrain. It assumed the rows are the sample
##                   vetors and the columns are variables  
##      XTest        Data matrix which is the result of the normalization
##                   of input XTest. It assumed the rows are the sample
##                   vetors and the columns are variables.
##      opar         the radius in case of ixnorm=2.  
##  
  if XTest is None:
    XTest=array([])
    
  opar=0;
  (mtrain,n)=XTrain.shape
  if len(XTest.shape)>=2:
    mtest=XTest.shape[0]
  elif len(XTest.shape)==1:
    mtest=XTest.shape[0]
    XTest=XTest.reshape((mtest,1))
  else:
    mtest=0
    XTest=array([])

  if ilocal==-1:
    pass
  elif ilocal==0:   ##  mean
    xcenter=mean(XTrain,axis=0)
  elif ilocal==1:   ##  median
    xcenter=median(XTrain,axis=0)
  elif ilocal==2:    ##  geometric median
    xcenter=mmr_geometricmedian(XTrain)[0]
  elif ilocal==3:    ##  shift by ipar
    xcenter=ipar
  elif ilocal==4:   ##  smallest comprising ball
    xalpha=mmr_outerball(0,XTrain)
    xcenter=dot(XTrain.T,xalpha)
  elif ilocal==5:   ## row mean row wise
    xcenter=mean(XTrain,axis=1)

  if ilocal in (0,1,2,3,4):
    XTrain=XTrain-tile(xcenter,(mtrain,1))
    if mtest>0:
      XTest=XTest-tile(xcenter,(mtest,1))
  elif ilocal==5:
    XTrain=XTrain-outer(xcenter,ones(n))
    if mtest>0:
      xcenter=mean(XTest,axis=1)
      XTest=XTest-outer(xcenter,ones(n))

## itemwise normalizations
  if iscale==-1:
    pass
  elif iscale==0:     ## scale items by L2 norm
    xscale_tra=sqrt(np_sum(XTrain**2,axis=1))
    if mtest>0:
      xscale_tes=sqrt(np_sum(XTest**2,axis=1))
  elif iscale==1:     ## scale items by L1 norm
    xscale_tra=np_sum(abs(XTrain),axis=1)
    if mtest>0:
      xscale_tes=np_sum(abs(XTest),axis=1)
  elif iscale==2:     ## scale items by L_infty norm
    xscale_tra=np_max(abs(XTrain),axis=1)
    if mtest>0:
      xscale_tes=np_max(abs(XTest),axis=1)
  elif iscale==10:     ## scale items by Minowski with ipar
    xscale_tra=np_sum(abs(XTrain)**ipar,axis=1)**(1/ipar)
    if mtest>0:
      xscale_tes=np_sum(abs(XTest)**ipar,axis=1)**(1/ipar)

  if iscale in (0,1,2,10):    
    xscale_tra=xscale_tra+(xscale_tra==0)
    XTrain=XTrain/tile(xscale_tra.reshape(mtrain,1),(1,n))
    if mtest>0:
      xscale_tes=xscale_tes+(xscale_tes==0)
      XTest=XTest/tile(xscale_tes.reshape(mtest,1),(1,n))
          
  if iscale==3:   ## scale items by stereographic projection relative to zero
    xnorm2=np_sum(XTrain**2,axis=1)
    R=ipar
    xhom=ones(mtrain)/(xnorm2+R**2)
    xhom2=xnorm2-R**2
    XTrain=concatenate((2*R**2*XTrain*outer(xhom,ones(n)),R*xhom2*xhom), \
                       axis=1)
    if mtest>0:
      xnorm2=np_sum(XTest**2,axis=1)
      xhom=ones(mtest)/(xnorm2+R**2)
      xhom2=xnorm2-R**2
      XTest=concatenate((2*R**2*XTest*outer(xhom,ones(n)),R*xhom2*xhom), \
                        axis=1)

## variable wise normalization relative to zero
## test has to use of the training scale 

  if iscale==-1:
    pass
  elif iscale==4:     ## scale vars by std to zeros center
    xscale=std(XTrain,axis=0)
##    xscale=sqrt(mean(XTrain**2,axis=0)) 
  elif iscale==5:     ## scale vars by mad
    xscale=median(abs(XTrain),axis=0)
  elif iscale==6:     ## scale vars by absolut deviation
    xscale=mean(abs(XTrain),axis=0)

  if iscale in (4,5,6):
    xscale=xscale+(xscale==0)
    XTrain=XTrain/tile(xscale,(mtrain,1))
    if mtest>0:
      XTest=XTest/tile(xscale,(mtest,1))

  if iscale==-1:
    pass
  if iscale==7:     ## scale vars by average std to zero center
##    xscale=mean(std(XTrain,axis=0))
    xscale=mean(sqrt(mean(XTrain**2,axis=0)))
  elif iscale==8:     ## scale vars by max std to zero center
##    xscale=np_max(std(XTrain,axis=0))
    xscale=np_max(sqrt(mean(XTrain**2,axis=0)))
  elif iscale==9:     ## scale vars by median mad
    xscale=median(median(abs(XTrain),axis=0))
  elif iscale==11:    ## \sum_i||u-x_i||/m where u=0
    xscale=mean(sqrt(np_sum(XTrain**2,axis=1)))
  elif iscale==12:    ## \sum_i||u-x_i||/m where u=0
    xscale=XTrain.max()

##  print(xscale)
  if iscale in (7,8,9,11,12):
    xscale=xscale+(xscale==0)
    XTrain=XTrain/xscale
    if mtest>0:
      XTest=XTest/xscale

  if iscale==13:     ## scale by Mahalonobis
    xsigma=dot(XTrain.T,XTrain) ## covariance
    [w,v]=linalg.eigh(xsigma)
    iw=where(w<=10**(-10))[0]
    w[iw]=0.0
    iw=where(w>0.0)[0]
    w_sqinv=zeros(XTrain.shape[1])
    w_sqinv[iw]=1/sqrt(w[iw])
    XTrain=dot(XTrain,v)*outer(ones(mtrain),w_sqinv)
    if mtest>0:
      XTest=dot(XTest,v)*outer(ones(mtest),w_sqinv)
    
  return(XTrain,XTest,opar)
Ejemplo n.º 37
0
    def findNewClusterCenters(self, ss=0):
        """Find a putative cluster"""

        inRange = lambda x, l, u: x >= l and x < u

        # we work from the top view as this has the base clustering
        max_index = np_argmax(self.blurredMaps[0])
        max_value = self.blurredMaps[0].ravel()[max_index]

        max_x = int(max_index / self.PM.scaleFactor)
        max_y = max_index - self.PM.scaleFactor * max_x
        max_z = -1

        ret_values = [max_value, max_x, max_y]

        start_span = int(1.5 * self.span)
        span_len = 2 * start_span + 1

        if self.debugPlots:
            self.plotRegion(max_x, max_y, max_z, fileName="Image_" + str(self.imageCounter), tag="column", column=True)
            self.imageCounter += 1

        # make a 3d grid to hold the values
        working_block = np_zeros((span_len, span_len, self.PM.scaleFactor))

        # go through the entire column
        (x_lower, x_upper) = self.makeCoordRanges(max_x, start_span)
        (y_lower, y_upper) = self.makeCoordRanges(max_y, start_span)
        super_putative_row_indices = []
        for p in self.im2RowIndicies:
            if inRange(p[0], x_lower, x_upper) and inRange(p[1], y_lower, y_upper):
                for row_index in self.im2RowIndicies[p]:
                    # check that the point is real and that it has not yet been binned
                    if row_index not in self.PM.binnedRowIndicies and row_index not in self.PM.restrictedRowIndicies:
                        # this is an unassigned point.
                        multiplier = np_log10(self.PM.contigLengths[row_index])
                        self.incrementAboutPoint3D(
                            working_block, p[0] - x_lower, p[1] - y_lower, p[2], multiplier=multiplier
                        )
                        super_putative_row_indices.append(row_index)

        # blur and find the highest value
        bwb = ndi.gaussian_filter(working_block, 8)  # self.blurRadius)
        densest_index = np_unravel_index(np_argmax(bwb), (np_shape(bwb)))
        max_x = densest_index[0] + x_lower
        max_y = densest_index[1] + y_lower
        max_z = densest_index[2]

        # now get the basic color of this dense point
        putative_center_row_indices = []

        (x_lower, x_upper) = self.makeCoordRanges(max_x, self.span)
        (y_lower, y_upper) = self.makeCoordRanges(max_y, self.span)
        (z_lower, z_upper) = self.makeCoordRanges(max_z, 2 * self.span)

        for row_index in super_putative_row_indices:
            p = np_around(self.PM.transformedCP[row_index])
            if inRange(p[0], x_lower, x_upper) and inRange(p[1], y_lower, y_upper) and inRange(p[2], z_lower, z_upper):
                # we are within the range!
                putative_center_row_indices.append(row_index)

        # make sure we have something to go on here
        if np_size(putative_center_row_indices) == 0:
            # it's all over!
            return None

        if np_size(putative_center_row_indices) == 1:
            # get out of here but keep trying
            # the calling function may restrict these indices
            return [[np_array(putative_center_row_indices)], ret_values]
        else:
            total_BP = sum([self.PM.contigLengths[i] for i in putative_center_row_indices])
            if not self.isGoodBin(total_BP, len(putative_center_row_indices), ms=5):  # Can we trust very small bins?.
                # get out of here but keep trying
                # the calling function should restrict these indices
                return [[np_array(putative_center_row_indices)], ret_values]
            else:
                # we've got a few good guys here, partition them up!
                # shift these guys around a bit
                center_k_vals = np_array([self.PM.kmerVals[i] for i in putative_center_row_indices])
                k_partitions = self.partitionVals(center_k_vals)

                if len(k_partitions) == 0:
                    return None
                else:
                    center_c_vals = np_array([self.PM.transformedCP[i][-1] for i in putative_center_row_indices])
                    # center_c_vals = np_array([self.PM.averageCoverages[i] for i in putative_center_row_indices])
                    center_c_vals -= np_min(center_c_vals)
                    c_max = np_max(center_c_vals)
                    if c_max != 0:
                        center_c_vals /= c_max
                    c_partitions = self.partitionVals(center_c_vals)

                    # take the intersection of the two partitions
                    tmp_partition_hash_1 = {}
                    id = 1
                    for p in k_partitions:
                        for i in p:
                            tmp_partition_hash_1[i] = id
                        id += 1

                    tmp_partition_hash_2 = {}
                    id = 1
                    for p in c_partitions:
                        for i in p:
                            try:
                                tmp_partition_hash_2[(tmp_partition_hash_1[i], id)].append(i)
                            except KeyError:
                                tmp_partition_hash_2[(tmp_partition_hash_1[i], id)] = [i]
                        id += 1

                    partitions = [
                        np_array([putative_center_row_indices[i] for i in tmp_partition_hash_2[key]])
                        for key in tmp_partition_hash_2.keys()
                    ]

                    # pcs = [[self.PM.averageCoverages[i] for i in p] for p in partitions]
                    # print pcs
                    return [partitions, ret_values]
Ejemplo n.º 38
0
    def _calculate_stats(self, xs, ys):
        ag = self.analysis_group
        ag.attribute = self.options.index_attr
        ag.weighted_age_error_kind = self.options.error_calc_method
        ag.include_j_error_in_mean = self.options.include_j_error_in_mean
        ag.include_j_error_in_individual_analyses = self.options.include_j_error

        mswd, valid_mswd, n = self.analysis_group.get_mswd_tuple()

        if self.options.mean_calculation_kind == 'kernel':
            wm, we = 0, 0
            delta = 1
            maxs, _mins = find_peaks(ys, xs, delta=delta, lookahead=1)
            wm = np_max(maxs, axis=1)[0]
        else:
            wage = self.analysis_group.weighted_age
            wm, we = wage.nominal_value, wage.std_dev

        return wm, we, mswd, valid_mswd

        # def _calc_error(self, we, mswd):
        # ec = self.options.error_calc_method
        # n = self.options.nsigma
        # if ec == 'SEM':
        # a = 1
        # elif ec == 'SEM, but if MSWD>1 use SEM * sqrt(MSWD)':
        # a = 1
        # if mswd > 1:
        # a = mswd ** 0.5
        # return we * a * n

        # ============= EOF =============================================
        # def _add_mean_indicator2(self, g, scatter, bins, probs, pid):
        # offset = 0
        # percentH = 1 - 0.954  # 2sigma
        #
        # maxp = max(probs)
        # wm, we, mswd, valid_mswd = self._calculate_stats(self.xs, self.xes,
        #                                                         bins, probs)
        #        #ym = maxp * percentH + offset
        #        #set ym in screen space
        #        #convert to data space
        #        ogid = self.group_id
        #        gid = ogid + 1
        #        sgid = ogid * 3
        #
        #        ym = maxp * 0.1 * gid
        #
        #        s, p = g.new_series(
        #            [wm], [ym],
        #            type='scatter',
        #                            marker='circle',
        #                            #selection_marker_size=3,
        #                            marker_size=3,
        #                            #selection_marker='circle',
        #                            #selection_color=scatter.color,
        #                            #selection_outline_color=scatter.color,
        #                            color=scatter.color,
        #                            plotid=0
        #        )
        #
        #        g.set_series_label('Mean-{}'.format(gid), series=sgid + 2, plotid=pid)
        #
        #        self._add_error_bars(s, [we], 'x', self.options.nsigma)
        # #         display_mean_indicator = self._get_plot_option(self.options,
        # 'display_mean_indicator', default=True)
        #        if not self.options.display_mean_indicator:
        #            s.visible = False
        #
        #        label = None
        #        #         display_mean = self._get_plot_option(self.options, 'display_mean_text', default=True)
        #        if self.options.display_mean:
        #            text = self._build_label_text(wm, we, mswd, valid_mswd, len(self.xs))
        #            #             font = self._get_plot_option(self.options, 'data_label_font', default='modern 12')
        #            self._add_data_label(s, text, (wm, ym),
        #                                 #                                 font=font
        #            )
        #            # add a tool to move the mean age point
        #        s.tools.append(PointMoveTool(component=s,
        #                                     label=label,
        #                                     constrain='y'))
Ejemplo n.º 39
0
    def loadData(self,
                 timer,
                 condition,                 # condition as set by another function
                 bids=[],                   # if this is set then only load those contigs with these bin ids
                 verbose=True,              # many to some output messages
                 silent=False,              # some to no output messages
                 loadCovProfiles=True,
                 loadKmerPCs=True,
                 loadKmerVarPC=True,
                 loadRawKmers=False,
                 makeColors=True,
                 loadContigNames=True,
                 loadContigLengths=True,
                 loadContigGCs=True,
                 loadBins=False,
                 loadLinks=False):
        """Load pre-parsed data"""

        timer.getTimeStamp()
        if(silent):
            verbose=False
        if verbose:
            print "Loading data from:", self.dbFileName

        try:
            self.numStoits = self.getNumStoits()
            self.condition = condition
            self.indices = self.dataManager.getConditionalIndices(self.dbFileName,
                                                                  condition=condition,
                                                                  silent=silent)
            if(verbose):
                print "    Loaded indices with condition:", condition
            self.numContigs = len(self.indices)

            if self.numContigs == 0:
                print "    ERROR: No contigs loaded using condition:", condition
                return

            if(not silent):
                print "    Working with: %d contigs" % self.numContigs

            if(loadCovProfiles):
                if(verbose):
                    print "    Loading coverage profiles"
                self.covProfiles = self.dataManager.getCoverageProfiles(self.dbFileName, indices=self.indices)
                self.normCoverages = self.dataManager.getNormalisedCoverageProfiles(self.dbFileName, indices=self.indices)

                # work out average coverages
                self.averageCoverages = np_array([sum(i)/self.numStoits for i in self.covProfiles])

            if loadRawKmers:
                if(verbose):
                    print "    Loading RAW kmer sigs"
                self.kmerSigs = self.dataManager.getKmerSigs(self.dbFileName, indices=self.indices)

            if(loadKmerPCs):
                self.kmerPCs = self.dataManager.getKmerPCAs(self.dbFileName, indices=self.indices)

                if(verbose):
                    print "    Loading PCA kmer sigs (" + str(len(self.kmerPCs[0])) + " dimensional space)"

                self.kmerNormPC1 = np_copy(self.kmerPCs[:,0])
                self.kmerNormPC1 -= np_min(self.kmerNormPC1)
                self.kmerNormPC1 /= np_max(self.kmerNormPC1)

            if(loadKmerVarPC):
                self.kmerVarPC = self.dataManager.getKmerVarPC(self.dbFileName, indices=self.indices)

                if(verbose):
                    print "    Loading PCA kmer variance (total variance: %.2f" % np_sum(self.kmerVarPC) + ")"

            if(loadContigNames):
                if(verbose):
                    print "    Loading contig names"
                self.contigNames = self.dataManager.getContigNames(self.dbFileName, indices=self.indices)

            if(loadContigLengths):
                self.contigLengths = self.dataManager.getContigLengths(self.dbFileName, indices=self.indices)
                if(verbose):
                    print "    Loading contig lengths (Total: %d BP)" % ( sum(self.contigLengths) )

            if(loadContigGCs):
                self.contigGCs = self.dataManager.getContigGCs(self.dbFileName, indices=self.indices)
                if(verbose):
                    print "    Loading contig GC ratios (Average GC: %0.3f)" % ( np_mean(self.contigGCs) )

            if(makeColors):
                if(verbose):
                    print "    Creating color map"

                # use HSV to RGB to generate colors
                S = 1       # SAT and VAL remain fixed at 1. Reduce to make
                V = 1       # Pastels if that's your preference...
                self.colorMapGC = self.createColorMapHSV()

            if(loadBins):
                if(verbose):
                    print "    Loading bin assignments"

                self.binIds = self.dataManager.getBins(self.dbFileName, indices=self.indices)

                if len(bids) != 0: # need to make sure we're not restricted in terms of bins
                    bin_stats = self.getBinStats()
                    for bid in bids:
                        try:
                            self.validBinIds[bid] = bin_stats[bid][0]
                            self.isLikelyChimeric[bid]= bin_stats[bid][1]
                        except KeyError:
                            self.validBinIds[bid] = 0
                            self.isLikelyChimeric[bid]= False

                else:
                    bin_stats = self.getBinStats()
                    for bid in bin_stats:
                        self.validBinIds[bid] = bin_stats[bid][0]
                        self.isLikelyChimeric[bid] = bin_stats[bid][1]

                # fix the binned indices
                self.binnedRowIndices = {}
                for i in range(len(self.indices)):
                    if(self.binIds[i] != 0):
                        self.binnedRowIndices[i] = True
            else:
                # we need zeros as bin indicies then...
                self.binIds = np_zeros(len(self.indices))

            if(loadLinks):
                self.loadLinks()

            self.stoitColNames = self.getStoitColNames()

        except:
            print "Error loading DB:", self.dbFileName, exc_info()[0]
            raise
Ejemplo n.º 40
0
                color = Grid_Values['zGas'].index(parameter_divider)
                label = r'$Z = {logage}$'.format(logage = round(float(parameter_divider) * 0.02, 3))
                
                x_values, y_values      = Ar_S_model(Line_dict, threshold = 4)
                
                if (x_values != None) and (y_values != None):
                
                    dz.data_plot(x_values, y_values, color=dz.ColorVector[2][color], label=label, markerstyle='o')
    
                    x_linealFitting = hstack([x_linealFitting, x_values])
                    y_linealFitting = hstack([y_linealFitting, y_values])

#Lineal model
lineal_mod          = LinearModel(prefix='lineal_')
Lineal_parameters   = lineal_mod.guess(y_linealFitting, x=x_linealFitting)
x_lineal            = linspace(np_min(x_linealFitting), np_max(x_linealFitting), 100)
y_lineal            = Lineal_parameters['lineal_slope'].value * x_lineal + Lineal_parameters['lineal_intercept'].value
dz.data_plot(x_lineal, y_lineal, label='Lineal fitting', color = 'black', linestyle='-')

# #Plot fitting formula
formula = r"$log\left(Ar^{{+2}}/Ar^{{+3}}\right) = {m} \cdot log\left(S^{{+2}}/S^{{+3}}\right) + {n}$".format(m=round(Lineal_parameters['lineal_slope'].value,3), n=round(Lineal_parameters['lineal_intercept'].value, 3))
# formula2 = r"$m = {m} \pm {merror}; n = {n} \pm {nerror}$".format(m=round(m[0],3), merror=round(m_err[0],3), n=round(n[0],3), nerror=round(n_err[0],3))
dz.Axis.text(0.50, 0.15, formula, transform=dz.Axis.transAxes, fontsize=20) 
# dz.Axis.text(0.50, 0.08, formula2, transform=dz.Axis.transAxes, fontsize=20) 

#Plot wording
xtitle  = r'$log([SIII]/[SIV])$'
ytitle  =  r'$log([ArIII]/[ArIV])$'
title   = 'Argon - Sulfur ionic relation in Cloudy photoionization models'
dz.FigWording(xtitle, ytitle, title, axis_Size = 20.0, title_Size = 20.0, legend_size=20.0, legend_loc='upper left')
# dz.Axis.set_xlim(0,6)
Ejemplo n.º 41
0
                  
                #Calculate the grid point abundances
                #x_values, y_values         = Ar_S_model(Line_dict, threshold = 4, z = float(Model_dict['zGas']))
                x_values, y_values          = Ar_S_abundances_model(Line_dict, diags, Ar3, Ar4, S3, S4, 3)
                  
                if (x_values != None) and (y_values != None):
                  
                    dz.data_plot(x_values, y_values, color=dz.ColorVector[2][color], label=label, markerstyle='o')
     
                    x_linealFitting = hstack([x_linealFitting, x_values])
                    y_linealFitting = hstack([y_linealFitting, y_values])
  
#Lineal model
lineal_mod          = LinearModel(prefix='lineal_')
Lineal_parameters   = lineal_mod.guess(y_linealFitting, x=x_linealFitting)
x_lineal            = linspace(0, np_max(x_linealFitting), 100)
y_lineal            = Lineal_parameters['lineal_slope'].value * x_lineal + Lineal_parameters['lineal_intercept'].value
dz.data_plot(x_lineal, y_lineal, label='Lineal fitting', color = 'black', linestyle='-')
  
# #Plot fitting formula
formula = r"$log\left(Ar^{{+2}}/Ar^{{+3}}\right) = {m} \cdot log\left(S^{{+2}}/S^{{+3}}\right) + {n}$".format(m=round(Lineal_parameters['lineal_slope'].value,3),
                                                                                                            n=round(Lineal_parameters['lineal_intercept'].value, 3))
dz.Axis.text(0.50, 0.15, formula, transform=dz.Axis.transAxes, fontsize=20) 
  
#Plot wording
xtitle  =   r'$log\left(S^{{+2}}/S^{{+3}}\right)$'
ytitle  =   r'$log\left(Ar^{{+2}}/Ar^{{+3}}\right)$'
title   =   'Argon - Sulfur ionic abundances for several cluster ages and masses'
dz.FigWording(xtitle, ytitle, title, axis_Size = 20.0, title_Size = 20.0, legend_size=20.0, legend_loc='upper left')
  
#Display figure
Ejemplo n.º 42
0
 def get_value_for_data_only(self, values):
     """
     Return the maximum value
     """
     return np_max(values)