Exemple #1
0
    def train(self):
        pop = [self.create_solution() for _ in range(self.pop_size)]
        v_max = 0.5 * (self.ub - self.lb)
        v_min = zeros(self.problem_size)
        v_list = uniform(v_min, v_max, (self.pop_size, self.problem_size))
        pop_local = deepcopy(pop)
        g_best = self.get_global_best_solution(pop=pop,
                                               id_fit=self.ID_FIT,
                                               id_best=self.ID_MIN_PROB)

        N_CLS = int(self.pop_size / 5)  # Number of chaotic local searches
        for epoch in range(self.epoch):
            r = rand()

            list_fits = [item[self.ID_FIT] for item in pop]
            fit_avg = mean(list_fits)
            fit_min = np_min(list_fits)
            for i in range(self.pop_size):
                w = self.__get_weights__(pop[i][self.ID_FIT], fit_avg, fit_min)
                v_new = w * v_list[i] + self.c1 * rand() * (pop_local[i][self.ID_POS] - pop[i][self.ID_POS]) + \
                        self.c2 * rand() * (g_best[self.ID_POS] - pop[i][self.ID_POS])
                x_new = pop[i][self.ID_POS] + v_new
                x_new = self.amend_position_random_faster(x_new)
                fit_new = self.get_fitness_position(x_new)
                pop[i] = [x_new, fit_new]
                # Update current position, current velocity and compare with past position, past fitness (local best)
                if fit_new < pop_local[i][self.ID_FIT]:
                    pop_local[i] = [x_new, fit_new]

            g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB,
                                                      g_best)

            ## Implement chaostic local search for the best solution
            cx_best_0 = (g_best[self.ID_POS] - self.lb) / (self.ub - self.lb
                                                           )  # Eq. 7
            cx_best_1 = 4 * cx_best_0 * (1 - cx_best_0)  # Eq. 6
            x_best = self.lb + cx_best_1 * (self.ub - self.lb)  # Eq. 8
            fit_best = self.get_fitness_position(x_best)
            if fit_best < g_best[self.ID_FIT]:
                g_best = [x_best, fit_best]

            bound_min = stack(
                [self.lb, g_best[self.ID_POS] - r * (self.ub - self.lb)])
            self.lb = np_max(bound_min, axis=0)
            bound_max = stack(
                [self.ub, g_best[self.ID_POS] + r * (self.ub - self.lb)])
            self.ub = np_min(bound_max, axis=0)

            pop_new_child = [
                self.create_solution() for _ in range(self.pop_size - N_CLS)
            ]
            pop_new = sorted(pop, key=lambda item: item[self.ID_FIT])
            pop = pop_new[:N_CLS] + pop_new_child

            self.loss_train.append(g_best[self.ID_FIT])
            if self.verbose:
                print(">Epoch: {}, Best fit: {}".format(
                    epoch + 1, g_best[self.ID_FIT]))
        self.solution = g_best
        return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
Exemple #2
0
def emfTitration(ax, massAcid, emf, massSample, concAcid, alk_emf0, alkGuess,
                 rgb, sublabel):
    """EMF change as acid is added throughout a titration."""
    ax.axvline(1e3 * alk_emf0['x'][0] * massSample / concAcid,
               color=_rgb_final,
               linestyle='--',
               zorder=1)
    ax.axvline(1e3 * alkGuess * massSample / concAcid,
               color=_rgb_guess,
               linestyle='--',
               zorder=1)
    ax.scatter(massAcid * 1e3,
               emf,
               c=rgb,
               edgecolors='k',
               clip_on=False,
               zorder=2)
    ax.set_xlim([0, np_max(massAcid) * 1e3])
    yrange = np_max(emf) - np_min(emf)
    ax.set_ylim([np_min(emf) - yrange * 0.05, np_max(emf) + yrange * 0.05])
    ax.set_xlabel('Acid mass / g')
    ax.set_ylabel('EMF / mV')
    ax.set_title('{} Final EMF$^\circ$ = {:.2f} mV'.format(
        sublabel, alk_emf0['x'][1]),
                 fontsize=10)
    return ax
Exemple #3
0
def norm(array):

    from numpy import nanmin as np_min
    from numpy import nanmax as np_max

    norm_array = (array - np_min(array)) / (np_max(array) - np_min(array))

    return norm_array
Exemple #4
0
def alkComponents(titrationPotentiometric, ax=None):
    t = titrationPotentiometric
    assert 'alkSteps' in vars(t), \
        'You must first run `titrationPotentiometric.get_alkSteps().`'
    # Get the 'used' values
    solver = 'complete'  # only valid option for now
    usedMin = np_min(t.volAcid[t.solvedWith[solver]])
    usedMax = np_max(t.volAcid[t.solvedWith[solver]])
    # Draw the plot
    ax = _checksetax(ax)
    ax.plot(t.volAcid,
            -log10(t.alkSteps),
            label='Total alk.',
            marker='o',
            markersize=_markersize,
            c='k',
            alpha=_alpha)
    for component, conc in t.alkComponents.items():
        if np_any(conc != 0):
            ax.plot(t.volAcid, -log10(np_abs(conc)), **rgbs[component])
    ax.add_patch(
        patches.Rectangle((usedMin, ax.get_ylim()[1]),
                          usedMax - usedMin,
                          ax.get_ylim()[0] - ax.get_ylim()[1],
                          facecolor=0.9 * ones(3)))
    ax.invert_yaxis()
    ax.legend(bbox_to_anchor=(1.05, 1), edgecolor='k')
    ax.set_xlabel('Acid volume / ml')
    ax.set_ylabel('$-$log$_{10}$(concentration from pH / mol$\cdot$kg$^{-1}$)')
    return ax
Exemple #5
0
    def baseline_dwt(cls, coeff, w):
        """Return the estimation of the data baseline based on DWT

        coeff: DWT coefficients
        w: pywt wavelet object
        """
        return np_min(cls.waverec(coeff, w, len(coeff) / 2))
Exemple #6
0
def plot_animated(self, i):
    """Return the pyvista mesh object.

    Parameters
    ----------
    self : Mode
        a Mode object
    i : int
        index of the mode to plot

    Returns
    -------
    mesh : pyvista.core.pointset.StructuredGrid
        a pyvista StructuredGrid object
    """

    radial_shape = self.get_shape_pol()[:, 0]
    shape_xyz = self.get_shape_xyz()
    clim = [np_min(radial_shape), np_max(radial_shape)]
    self.parent.mesh.plot_deformation_animated(
        shape_xyz,
        radial_shape,
        factor=0.05,
        field_name="Radial displacement",
        clim=clim,
    )
Exemple #7
0
 def maximum_spread(self,
                    pareto_front=None,
                    reference_front=None):  ## MS function
     """ It addresses the range of objective function values and takes into account the proximity to the true Pareto front"""
     pareto_front, reference_front = self.get_pareto_front_reference_front(
         pareto_front, reference_front)
     n_objs = reference_front.shape[1]
     pf_max = np_max(pareto_front, axis=0)
     pf_min = np_min(pareto_front, axis=0)
     rf_max = np_max(reference_front, axis=0)
     rf_min = np_min(reference_front, axis=0)
     ms = 0
     for i in range(0, n_objs):
         ms += ((min(pf_max[i], rf_max[i]) - max(pf_min[i], rf_min[i])) /
                (rf_max[i] - rf_min[i]))**2
     return sqrt(ms / n_objs)
Exemple #8
0
def comp_point_ref(self, is_set=False):
    """Compute the point ref of the Surface

    Parameters
    ----------
    self : SurfLine
        A SurfLine object
    is_set: bool
        True to update the point_ref property

    Returns
    -------
    point_ref : complex
        the reference point of the surface
    """

    middle_array = array([line.get_middle() for line in self.get_lines()])
    point_ref = sum(middle_array) / middle_array.size

    # Use another method if the point is not is the surface
    if not self.is_inside(Z=point_ref, if_online=False):
        middle_array_abs = abs(middle_array)
        # Find "min abs" middle
        mid_id = argmin(middle_array_abs)
        Zmid = middle_array[mid_id]
        H = np_max(middle_array_abs) - np_min(middle_array_abs)

        point_ref = (abs(Zmid) + H / 100) * exp(1j * angle(Zmid))

    if is_set:
        self.point_ref = point_ref
    return point_ref
Exemple #9
0
def get_field(self, axes_list):
    """Returns the values of the field (with symmetries and sums).
    Parameters
    ----------
    self: Data
        a Data object
    axes_list: list
        a list of RequestedAxis objects
    Returns
    -------
    values: ndarray
        values of the field
    """

    values = self.values
    for axis_requested in axes_list:
        # Rebuild symmetries when needed
        axis_symmetries = self.axes[axis_requested.index].symmetries
        if (
            axis_requested.transform == "fft"
            and axis_requested.is_pattern
            or axis_requested.extension in ["sum", "rss", "mean", "rms", "integrate"]
            and axis_requested.is_pattern
        ):
            values = take(values, axis_requested.rebuild_indices, axis_requested.index)
        elif axis_requested.transform == "fft" and "antiperiod" in axis_symmetries:
            nper = axis_symmetries["antiperiod"]
            axis_symmetries["antiperiod"] = 2
            values = rebuild_symmetries(values, axis_requested.index, axis_symmetries)
            axis_symmetries["antiperiod"] = nper
        elif axis_requested.indices is not None:
            if (
                axis_requested.extension in ["sum", "rss", "mean", "rms", "integrate"]
                or max(axis_requested.indices) > values.shape[axis_requested.index]
            ):
                values = rebuild_symmetries(
                    values, axis_requested.index, axis_symmetries
                )
                self.axes[axis_requested.index].symmetries = dict()

        # sum over sum axes
        if axis_requested.extension == "sum":
            values = np_sum(values, axis=axis_requested.index, keepdims=True)
        # root sum square over rss axes
        elif axis_requested.extension == "rss":
            values = sqrt(np_sum(values ** 2, axis=axis_requested.index, keepdims=True))
        # mean value over mean axes
        elif axis_requested.extension == "mean":
            values = np_mean(values, axis=axis_requested.index, keepdims=True)
        # RMS over rms axes
        elif axis_requested.extension == "rms":
            values = sqrt(
                np_mean(values ** 2, axis=axis_requested.index, keepdims=True)
            )
        # integration over integration axes
        elif axis_requested.extension == "integrate":
            values = trapz(
                values, x=axis_requested.values, axis=axis_requested.index
            ) / (np_max(axis_requested.values) - np_min(axis_requested.values))
    return values
Exemple #10
0
    def statistic(float_set):
        """
        This function calculates standard statistical metrics over a list of floats.
        :param float_set: list of float
        :return: Dictionary of float and list of float
                 Statistic of the set of instances {mean:, median:, min:, max:, sample_variance:, samples:}
        """
        float_set_size = len(float_set)

        # Calculate the sample mean
        factor = 1 / float_set_size
        sample_mean = factor * np_sum(float_set)

        # Calculate the standard deviation
        factor = 1 / (float_set_size - 1)
        sample_variance = sqrt(factor * np_sum((float_set - sample_mean)**2))

        minimum = np_min(float_set)
        maximum = np_max(float_set)

        median_dist = median(float_set)

        return {
            'mean': sample_mean,
            'median': median_dist,
            'min': minimum,
            'max': maximum,
            'sv': sample_variance,
            'samples': float_set
        }
Exemple #11
0
def solve_FEMM(self, output, sym, FEMM_dict):

    # Loading parameters for readibility
    angle = output.mag.angle
    qs = output.simu.machine.stator.winding.qs  # Winding phase number
    Npcpp = output.simu.machine.stator.winding.Npcpp
    L1 = output.simu.machine.stator.comp_length()
    Nt_tot = output.mag.Nt_tot  # Number of time step
    Na_tot = output.mag.Na_tot  # Number of angular step

    # Create the mesh
    femm.mi_createmesh()

    # Initialize results matrix
    Br = zeros((Nt_tot, Na_tot))
    Bt = zeros((Nt_tot, Na_tot))
    Tem = zeros((Nt_tot, 1))
    Phi_wind_stator = zeros((Nt_tot, qs))

    # Compute the data for each time step
    for ii in range(Nt_tot):
        # Update rotor position and currents
        update_FEMM_simulation(
            output,
            FEMM_dict["materials"],
            FEMM_dict["circuits"],
            self.is_mmfs,
            self.is_mmfr,
            j_t0=ii,
        )
        # Run the computation
        femm.mi_analyze()
        femm.mi_loadsolution()
        # Get the flux result
        for jj in range(Na_tot):
            Br[ii, jj], Bt[ii, jj] = femm.mo_getgapb("bc_ag2",
                                                     angle[jj] * 180 / pi)
        # Compute the torque
        Tem[ii] = comp_FEMM_torque(FEMM_dict, sym=sym)
        # Phi_wind computation
        Phi_wind_stator[ii, :] = comp_FEMM_Phi_wind(qs,
                                                    Npcpp,
                                                    is_stator=True,
                                                    Lfemm=FEMM_dict["Lfemm"],
                                                    L1=L1,
                                                    sym=sym)

    # Store the results
    output.mag.Br = Br
    output.mag.Bt = Bt
    output.mag.Tem = Tem
    output.mag.Tem_av = mean(Tem)
    if output.mag.Tem_av != 0:
        output.mag.Tem_rip = abs(
            (np_max(Tem) - np_min(Tem)) / output.mag.Tem_av)
    output.mag.Phi_wind_stator = Phi_wind_stator

    # Electromotive forces computation (update output)
    self.comp_emf()
Exemple #12
0
def plot_hsm(ax, result, wavelengths):
    """
    Plotter of a HSM result
    ----------------------
    :param ax: ax object to edit
    :param result: result to plot
    :param wavelengths: wavelengths used to get result. Will become x-axis
    :return: None. Edits ax object
    """
    def lorentzian(params, x):
        """
        Lorentzian formula. Taken from SPectrA
        ----------------
        :param params: Parameters of lorentzian. Need to be four.
        :param x: x-axis. Wavelengths
        :return: array of values for current parameters and wavelengths
        """
        return params[0] + params[1] / ((x - params[2])**2 +
                                        (0.5 * params[3])**2)

    # scatter plot of actual results
    ax.scatter(wavelengths, result['intensity'])
    try:
        # try to fit and plot fit over
        wavelengths_ev = 1240 / linspace(
            np_min(wavelengths), np_max(wavelengths), num=50)
        hsm_fit = lorentzian(result['fit_parameters'], wavelengths_ev)
        ax.plot(1240 / wavelengths_ev, hsm_fit, 'r--')
    except:
        pass
    # set axis to same range every time
    ax.set_xlim(np_min(wavelengths) - 30, np_max(wavelengths) + 30)
    try:
        # add fit info if possible
        text = '\n'.join((r'SPR (nm)=%.1f' % (result['result'][0], ),
                          r'linewidth (meV)=%.1f' % (result['result'][1], ),
                          r'r^2=%.1f' % (result['result'][2], )))
        props = dict(boxstyle='round', facecolor='white', alpha=0.5)
        ax.text(0.05,
                0.95,
                text,
                transform=ax.transAxes,
                verticalalignment='top',
                bbox=props)
    except:
        pass
Exemple #13
0
 def find_niche_reference_point(self, num_mem, rps_pos):
     # find the minimal cluster size
     cluster_members = array(num_mem)[:len(rps_pos)]
     min_size = np_min(cluster_members)
     min_rps = where(cluster_members == min_size)[0]
     if len(min_rps) == 0:
         return 0
     return min_rps[randint(0, len(min_rps) - 1)]
Exemple #14
0
def log_datakeeper_step_result(simulation, datakeeper_list, index, simu_type):
    """Log the content of the datakeeper for the step index (if index=None, use reference)"""
    if simulation.layer == 2:
        msg = "    "
    else:
        msg = ""
    if simu_type is not None:
        msg += simu_type + " "
    if simulation.index is None:
        msg += "Reference "
    msg += "Results: "
    for datakeeper in datakeeper_list:
        if index is None:
            value = datakeeper.result_ref
        else:
            value = datakeeper.result[index]
        # Format log
        if isinstance(value, ndarray):
            msg += (datakeeper.symbol + "=array(min=" +
                    format(np_min(value), ".4g") + ",max=" +
                    format(np_max(value), ".4g") + ")")
        elif isinstance(value, list):
            msg += (datakeeper.symbol + "=list(min=" +
                    format(np_min(value), ".4g") + ",max=" +
                    format(np_max(value), ".4g") + ")")
        elif isinstance(value, Data) or isinstance(value, VectorField):
            msg += datakeeper.symbol + "=" + type(value).__name__
        elif value is None:
            pass
            # msg += datakeeper.symbol + "=None"
        else:
            msg += datakeeper.symbol + "=" + format(value, ".4g")
        if value is not None:
            if datakeeper.unit is not None:
                msg += " [" + datakeeper.unit + "], "
            else:
                msg += ", "
    msg = msg[:-2]

    # Get logger of the main simulation in parallel mode
    if simulation.logger_name[0:8] == "Parallel":
        log = getLogger(simulation.logger_name[9:])
    else:
        log = simulation.get_logger()
    log.info(msg)
Exemple #15
0
 def select_cluster_member(self, reference_points: list, n_mems: int,
                           rank: list):
     chosen = -1
     if len(reference_points) > 0:
         min_rank = np_min([rank[r[0]] for r in reference_points])
         for rp in reference_points:
             if rank[rp[0]] == min_rank:
                 chosen = rp[0]
     return chosen
Exemple #16
0
    def initLastAction(self):
        # ignore if manually set
        if self.app.getLastAction() is not None:
            self.last_action = self.app.getLastAction()
            return

        # if not live
        if not self.app.isLive():
            self.last_action = 'SELL'
            return

        orders = self.account.getOrders(self.app.getMarket(), '', 'done')
        if len(orders) > 0:
            last_order = orders[-1:]

            # if orders exist and last order is a buy
            if str(last_order.action.values[0]) == 'buy':
                self.last_buy_size = float(last_order[last_order.action == 'buy']['size'])
                self.last_buy_filled = float(last_order[last_order.action == 'buy']['filled'])
                self.last_buy_price = float(last_order[last_order.action == 'buy']['price'])

                # binance orders do not show fees
                if self.app.getExchange() == 'coinbasepro':
                    self.last_buy_fee = float(last_order[last_order.action == 'buy']['fees'])

                self.last_action = 'BUY'
                return
            else:
                self.minimumOrderQuote()
                self.last_action = 'SELL'
                self.last_buy_price = 0.0
                return
        else:
            base = float(self.account.getBalance(self.app.getBaseCurrency()))
            quote = float(self.account.getBalance(self.app.getQuoteCurrency()))

            # nil base or quote funds
            if base == 0.0 and quote == 0.0:
                sys.tracebacklimit = 0
                raise Exception(f'Insufficient Funds! ({self.app.getBaseCurrency()}={str(base)}, {self.app.getQuoteCurrency()}={str(base)})') 

            # determine last action by comparing normalised [0,1] base and quote balances 
            order_pairs = np_array([ base, quote ])
            order_pairs_normalised = (order_pairs - np_min(order_pairs)) / np_ptp(order_pairs)

            if order_pairs_normalised[0] < order_pairs_normalised[1]:
                self.minimumOrderQuote()
                self.last_action = 'SELL'
            elif order_pairs_normalised[0] > order_pairs_normalised[1]:
                self.minimumOrderBase()
                self.last_action = 'BUY'

            else:
                self.last_action = 'WAIT'

            return
Exemple #17
0
def show_entropy(xyz, H):

    H_color = (H - np_min(H)) / (np_max(H) - np_min(H))
    fig = figure()
    ax = Axes3D(fig)
    xyz_b = xyz[where(H_color <= 0.25)[0]]
    xyz_r = xyz[where(H_color > 0.75)[0]]
    print(np_min(H), np_max(H), H[1], H_color[1], H_color, xyz_b.shape,
          xyz_r.shape)
    # xyz_g = xyz[where((H_color>0.25) and (H_color<=0.75))[0]]

    # ax.scatter(xyz[:,0],xyz[:,1],xyz[:,2], c=H_color)
    ax.scatter(xyz_b[:, 0], xyz_b[:, 1], xyz_b[:, 2], color='b')
    # ax.scatter(xyz_g[:,0],xyz_g[:,1],xyz_g[:,2], color='g')
    ax.scatter(xyz_r[:, 0], xyz_r[:, 1], xyz_r[:, 2], color='r')
    ax.set_xticks(arange(-1, 1, 0.2))
    ax.set_yticks(arange(-1, 1, 0.2))
    ax.set_zticks(arange(-1, 1, 0.2))
    grid()
    fig.show()
Exemple #18
0
    def add_curve(self, curve):
        """
        Add a curve to the bounding box.

        :param curve: Curve to add to bounding box.
        :type curve: :class:`.BezierCurve` or :class:`.NurbsCurve`
        """
        cp = curve.cp
        bmin = np_min(cp, axis=0)
        bmax = np_max(cp, axis=0)
        self.set_bounds(bmin, bmax)
Exemple #19
0
def count_seq(x):

    stop = np_nonzero(np_diff(np_hstack((x, 0))) == -1)
    start = np_nonzero(np_diff(np_hstack((0, x))) == 1)

    ld = {}
    if len(start[0]):
        d = stop[0] - start[0] + 1
        for ll in xrange(np_max((np_min(d), 2)), np_max(d) + 1):
            ld[str(ll)] = np_sum((d == ll).astype(int))

    return ld
Exemple #20
0
def alkEstimates(ax, massAcid, alk0Sim, rgb, alk_emf0, RMS, Npts, sublabel):
    """Original sample alkalinity estimated from each titration point."""
    ax.axhline(alk_emf0['x'][0] * 1e6, color=_rgb_final, zorder=1)
    ax.scatter(massAcid * 1e3,
               alk0Sim * 1e6,
               c=rgb,
               edgecolors='k',
               clip_on=False,
               zorder=2)
    ax.set_xlim([0, np_max(massAcid) * 1e3])
    yrange = (np_max(alk0Sim) - np_min(alk0Sim)) * 1e6
    ax.set_ylim([
        np_min(alk0Sim * 1e6) - yrange * 0.05,
        np_max(alk0Sim * 1e6 + yrange * 0.05)
    ])
    ax.set_xlabel('Acid mass / g')
    ax.set_ylabel('$A_\mathrm{T}$ from pH / μmol$\cdot$kg$^{-1}$')
    ax.set_title(('{} Final alkalinity = ({:.1f} $\pm$ {:.1f}) μmol/kg' +
                  ' ($n$ = {})').format(sublabel, alk_emf0['x'][0] * 1e6,
                                        RMS * 1e6, Npts),
                 fontsize=10)
    return ax
Exemple #21
0
    def add_surface(self, surface):
        """
        Add a surface to the bounding box.

        :param surface: Surface to add to bounding box.
        :type surface: :class:`.BezierCurve` or :class:`.NurbsSurface`
        """
        d1, d2 = surface.n + 1, surface.m + 1
        cp = surface.cp
        cp = cp.reshape(d1 * d2, -1)
        bmin = np_min(cp, axis=0)
        bmax = np_max(cp, axis=0)
        self.set_bounds(bmin, bmax)
Exemple #22
0
    def compute_ideal_points(self, pop, fronts):
        ideal_point = [0] * self.n_objs
        conv_pop = deepcopy(pop)
        for i in range(self.n_objs):
            fit_list = [
                conv_pop[list(conv_pop.keys())[stt]][self.ID_FIT][i]
                for stt in fronts[0]
            ]
            ideal_point[i] = np_min(fit_list)

            for front in fronts:
                for stt in front:
                    conv_pop[list(conv_pop.keys())[stt]][
                        self.ID_FIT][i] -= ideal_point[i]
        return ideal_point, conv_pop
Exemple #23
0
def min_max(A):
    """
    Standardise data by scaling data points by the sample minimum and maximum
    such that all data points lie in the range 0 to 1, equivalent to sklearn
    MinMaxScaler.
    """
    assert A.ndim > 1

    n = A.shape[1]
    res = empty_like(A, dtype=np_float64)

    for i in range(n):
        data_i = A[:, i]
        data_min = np_min(data_i)
        res[:, i] = (data_i - data_min) / (np_max(data_i) - data_min)

    return res
Exemple #24
0
def get_centered_box(frame, size=1):
    """size can be:
    1: small
    2: medium
    3: large"""

    shape = frame.shape
    width = shape[1]
    length = shape[0]
    smallest_dim = np_min([width, length])
    offset = int(((size + 1) / 4.0) * (smallest_dim / 2.0))
    x = (width / 2) - offset
    y = (length / 2) - offset
    w = offset * 2
    h = offset * 2

    return (x, y, w, h)
Exemple #25
0
def get_centered_box(frame, size=1):
    """size can be:
    1: small
    2: medium
    3: large"""

    shape = frame.shape
    width = shape[1]
    length = shape[0]
    smallest_dim = np_min([width, length])
    offset = int(((size + 1) / 4.0) * (smallest_dim / 2.0))
    x = (width / 2) - offset
    y = (length / 2) - offset
    w = offset * 2
    h = offset * 2

    return (x, y, w, h)
Exemple #26
0
 def makeColourProfile(self):
     """Make a colour profile based on ksig information"""
     working_data = np_array(self.kmerSigs, copy=True) 
     Center(working_data,verbose=0)
     p = PCA(working_data)
     components = p.pc()
     
     # now make the colour profile based on PC1
     self.kmerVals = np_array([float(i) for i in components[:,0]])
     
     # normalise to fit between 0 and 1
     self.kmerVals -= np_min(self.kmerVals)
     self.kmerVals /= np_max(self.kmerVals)
     if(False):
         plt.figure(1)
         plt.subplot(111)
         plt.plot(components[:,0], components[:,1], 'r.')
         plt.show()
def bent_up_head(right_eye, left_eye, nose, head_box):
    """Calculus distance beetween nose and eyes line"""

    global DOWN_COUNTER
    global UP_COUNTER

    d_eyes = dist.euclidean(right_eye, left_eye)
    d1 = dist.euclidean(right_eye, nose)
    d2 = dist.euclidean(left_eye, nose)

    coeff = d1 + d2

    cosb = np_min(
        (pow(d2, 2) - pow(d1, 2) + pow(d_eyes, 2)) / (2 * d2 * d_eyes))
    bent_up = int(250 * (d2 * sin(acos(cosb)) - coeff / 3.5) / coeff)

    out = analyse_position(bent_up, head_box)

    return out
Exemple #28
0
    def waverec(cls, coeff, w, level, offset=False):
        """Return 1D DWT reconstructed data using details up to levels

        coeff: DWT coefficients
        w: pywt wavelet object
        level: DWT details component
        offset: if TRUE, subtract offset from data
        """

        if offset:
            offset = np_min(cls.waverec(coeff, w, len(coeff) / 2))
        else:
            offset = 0

        loc_coeff = coeff[:]
        for ll in xrange(level + 1, len(coeff)):
            loc_coeff[ll] = np_zeros(coeff[ll].shape)

        return pywt.waverec(loc_coeff, w) - offset
Exemple #29
0
def min_max_parallel(A):
    """
    Standardise data by scaling data points by the sample minimum and maximum
    such that all data points lie in the range 0 to 1, equivalent to sklearn
    MinMaxScaler.

    Uses explicit parallel loop; may offer improved performance in some
    cases.
    """
    assert A.ndim > 1

    n = A.shape[1]
    res = empty_like(A, dtype=np_float64)

    for i in prange(n):
        data_i = A[:, i]
        data_min = np_min(data_i)
        res[:, i] = (data_i - data_min) / (np_max(data_i) - data_min)

    return res
Exemple #30
0
    def spacing_to_extend(self,
                          pareto_front=None,
                          reference_front=None):  ## STE function
        pareto_front, reference_front = self.get_pareto_front_reference_front(
            pareto_front, reference_front)
        pf_size = len(pareto_front)
        dist_min_list = zeros(pf_size)
        for i in range(pf_size):
            dist_min = min([
                norm(pareto_front[i] - reference_front[j])
                for j in range(pf_size) if i != j
            ])
            dist_min_list[i] = dist_min
        dist_mean = mean(dist_min_list)
        spacing = sum((dist_min_list - dist_mean)**2) / (pf_size - 1)

        f_max = np_max(pareto_front, axis=0)
        f_min = np_min(pareto_front, axis=0)
        extent = sum(abs(f_max - f_min))
        ste = spacing / extent
        return ste
Exemple #31
0
def plot(self, i):
    """Return the pyvista mesh object.

    Parameters
    ----------
    self : Mode
        a Mode object
    i : int
        index of the mode to plot

    Returns
    -------
    mesh : pyvista.core.pointset.StructuredGrid
        a pyvista StructuredGrid object
    """

    radial_shape = self.get_shape_pol()[:, 0]
    clim = [np_min(radial_shape), np_max(radial_shape)]
    self.parent.mesh.plot_contour(
        radial_shape, field_name="Radial displacement", clim=clim
    )
def determine_linear_continua(x, y, indices_list):

    lineal_mod = LinearModel(prefix='lineal_')

    #     x_combine = x[indices_list[0]]
    #     y_combine = y[indices_list[0]]
    x_combine = array([])
    y_combine = array([])

    for i in range(len(indices_list)):

        x_combine = hstack([x_combine, x[indices_list[i]]])
        y_combine = hstack([y_combine, y[indices_list[i]]])

    Lineal_parameters = lineal_mod.guess(y_combine, x=x_combine)

    x_lineal = linspace(np_min(x_combine), np_max(x_combine), 100)
    y_lineal = Lineal_parameters[
        'lineal_slope'].value * x_lineal + Lineal_parameters[
            'lineal_intercept'].value

    return x_lineal, y_lineal, Lineal_parameters
Exemple #33
0
    def loadData(self,
                 timer,
                 condition,                 # condition as set by another function
                 bids=[],                   # if this is set then only load those contigs with these bin ids
                 verbose=True,              # many to some output messages
                 silent=False,              # some to no output messages
                 loadCovProfiles=True,
                 loadKmerPCs=True,
                 loadKmerVarPC=True,
                 loadRawKmers=False,
                 makeColors=True,
                 loadContigNames=True,
                 loadContigLengths=True,
                 loadContigGCs=True,
                 loadBins=False,
                 loadLinks=False):
        """Load pre-parsed data"""

        timer.getTimeStamp()
        if(silent):
            verbose=False
        if verbose:
            print "Loading data from:", self.dbFileName

        try:
            self.numStoits = self.getNumStoits()
            self.condition = condition
            self.indices = self.dataManager.getConditionalIndices(self.dbFileName,
                                                                  condition=condition,
                                                                  silent=silent)
            if(verbose):
                print "    Loaded indices with condition:", condition
            self.numContigs = len(self.indices)

            if self.numContigs == 0:
                print "    ERROR: No contigs loaded using condition:", condition
                return

            if(not silent):
                print "    Working with: %d contigs" % self.numContigs

            if(loadCovProfiles):
                if(verbose):
                    print "    Loading coverage profiles"
                self.covProfiles = self.dataManager.getCoverageProfiles(self.dbFileName, indices=self.indices)
                self.normCoverages = self.dataManager.getNormalisedCoverageProfiles(self.dbFileName, indices=self.indices)

                # work out average coverages
                self.averageCoverages = np_array([sum(i)/self.numStoits for i in self.covProfiles])

            if loadRawKmers:
                if(verbose):
                    print "    Loading RAW kmer sigs"
                self.kmerSigs = self.dataManager.getKmerSigs(self.dbFileName, indices=self.indices)

            if(loadKmerPCs):
                self.kmerPCs = self.dataManager.getKmerPCAs(self.dbFileName, indices=self.indices)

                if(verbose):
                    print "    Loading PCA kmer sigs (" + str(len(self.kmerPCs[0])) + " dimensional space)"

                self.kmerNormPC1 = np_copy(self.kmerPCs[:,0])
                self.kmerNormPC1 -= np_min(self.kmerNormPC1)
                self.kmerNormPC1 /= np_max(self.kmerNormPC1)

            if(loadKmerVarPC):
                self.kmerVarPC = self.dataManager.getKmerVarPC(self.dbFileName, indices=self.indices)

                if(verbose):
                    print "    Loading PCA kmer variance (total variance: %.2f" % np_sum(self.kmerVarPC) + ")"

            if(loadContigNames):
                if(verbose):
                    print "    Loading contig names"
                self.contigNames = self.dataManager.getContigNames(self.dbFileName, indices=self.indices)

            if(loadContigLengths):
                self.contigLengths = self.dataManager.getContigLengths(self.dbFileName, indices=self.indices)
                if(verbose):
                    print "    Loading contig lengths (Total: %d BP)" % ( sum(self.contigLengths) )

            if(loadContigGCs):
                self.contigGCs = self.dataManager.getContigGCs(self.dbFileName, indices=self.indices)
                if(verbose):
                    print "    Loading contig GC ratios (Average GC: %0.3f)" % ( np_mean(self.contigGCs) )

            if(makeColors):
                if(verbose):
                    print "    Creating color map"

                # use HSV to RGB to generate colors
                S = 1       # SAT and VAL remain fixed at 1. Reduce to make
                V = 1       # Pastels if that's your preference...
                self.colorMapGC = self.createColorMapHSV()

            if(loadBins):
                if(verbose):
                    print "    Loading bin assignments"

                self.binIds = self.dataManager.getBins(self.dbFileName, indices=self.indices)

                if len(bids) != 0: # need to make sure we're not restricted in terms of bins
                    bin_stats = self.getBinStats()
                    for bid in bids:
                        try:
                            self.validBinIds[bid] = bin_stats[bid][0]
                            self.isLikelyChimeric[bid]= bin_stats[bid][1]
                        except KeyError:
                            self.validBinIds[bid] = 0
                            self.isLikelyChimeric[bid]= False

                else:
                    bin_stats = self.getBinStats()
                    for bid in bin_stats:
                        self.validBinIds[bid] = bin_stats[bid][0]
                        self.isLikelyChimeric[bid] = bin_stats[bid][1]

                # fix the binned indices
                self.binnedRowIndices = {}
                for i in range(len(self.indices)):
                    if(self.binIds[i] != 0):
                        self.binnedRowIndices[i] = True
            else:
                # we need zeros as bin indicies then...
                self.binIds = np_zeros(len(self.indices))

            if(loadLinks):
                self.loadLinks()

            self.stoitColNames = self.getStoitColNames()

        except:
            print "Error loading DB:", self.dbFileName, exc_info()[0]
            raise
 def get_value_for_data_only(self, values):
     """
     Return the minimum value
     """
     return np_min(values)
    label = Base[0 : Base.find(".txt")].replace("_", " ")

    n_Components = len(Components)

    Wmin_vector = zeros(n_Components)
    Wmax_vector = zeros(n_Components)
    for j in range(len(Components)):
        Wavelength = pv.get_ColumnData(
            [0],
            Components_Folder + Components[j],
            HeaderSize=0,
            StringIndexes=False,
            comments_icon="#",
            unpack_check=True,
        )
        Wmin_vector[i], Wmax_vector[i] = np_min(Wavelength), np_max(Wavelength)

    Axis3D.bar3d(log10(Age), metallicity, Wmin_vector, ones(len(Age)), ones(len(metallicity)), Wmax_vector)

#     Axis3D.xaxis.set_scale('log')
#     Axis3D.xaxis.set_xlim(100000, 5e10)

plt.show()
# pv.DataPloter_One(Age, metallicity, label, pv.Color_Vector[2][i+2], LineStyle=None)


# Plot_Title      = 'Stellar bases used in SSP synthesis'
# Plot_ylabel     = 'Metallicity'
# Plot_xlabel     = 'Age ' + r'$(yr)$'
#
# pv.Labels_Legends_One(Plot_Title, Plot_xlabel, Plot_ylabel, LegendLocation = 'best')
def compare_alpha_diversities(rarefaction_lines, mapping_lines, category,
                              depth=None, test_type='nonparametric', num_permutations=999):
    """Compares alpha diversity values for differences per category treatment.

    Notes:
     Returns a defaultdict which as keys has the pairs of treatments being
     compared, and as values, lists of (pval,tval) tuples for each comparison at
     for a given iteration.
    Inputs:
     rarefaction_lines - list of lines, result of multiple rarefactions.
     mapping_lines - list of lines, mapping file lines.
     category - str, the category to be compared, eg 'Treatment' or 'Age'.
     depth - int, depth of the rarefaction file to use. if None, then will use
     the deepest available in the file.
     test_type - str, the type of t-test to perform. Must be either
     'parametric' or 'nonparametric'.
     num_permutations - int, the number of Monte Carlo permutations to use if
     test_type is 'nonparametric'.
    """
    if test_type == 'nonparametric' and num_permutations < 1:
        raise ValueError("Invalid number of permutations: %d. Must be greater "
                         "than zero." % num_permutations)

    rarefaction_data = parse_rarefaction(rarefaction_lines)
    mapping_data = parse_mapping_file_to_dict(mapping_lines)[0]
    # samid_pairs, treatment_pairs are in the same order
    samid_pairs, treatment_pairs = sampleId_pairs(mapping_data,
                                                  rarefaction_data, category)

    ps_avg_div = get_per_sample_average_diversities(rarefaction_data, depth)

    ttest_results, ad_avgs = {}, {}
    for sid_pair, treatment_pair in zip(samid_pairs, treatment_pairs):
        # if there is only 1 sample for each treatment in a comparison, and mc
        # using mc method, will error (e.g. mc_t_two_sample([1],[1]).
        if len(sid_pair[0]) == 1 and len(sid_pair[1]) == 1:
            ttest_results[treatment_pair] = (None, None)
            # add alpha diversity averages and standard deviations. since their
            # is only a single sample if we are in this part of the loop, we can
            # just record the sample value as the avg and 0 as the std.
            ad_avgs[treatment_pair[0]] = (sid_pair[0][0], 0.)
            ad_avgs[treatment_pair[1]] = (sid_pair[1][0], 0.)
        else:
            i = array([ps_avg_div[x] for x in sid_pair[0]])
            j = array([ps_avg_div[x] for x in sid_pair[1]])
            # add alpha diversity averages and standard deviations.
            ad_avgs[treatment_pair[0]] = (i.mean(), i.std())
            ad_avgs[treatment_pair[1]] = (j.mean(), j.std())
            # conduct tests
            if isnan(np_min(i)) or isnan(np_min(j)):
                ttest_results[treatment_pair] = (None, None)
                continue
            if test_type == 'parametric':
                obs_t, p_val = t_two_sample(i, j)
            elif test_type == 'nonparametric':
                obs_t, _, _, p_val = mc_t_two_sample(i, j,
                                                     permutations=num_permutations)
                if p_val is not None:
                    p_val = float(format_p_value_for_num_iters(p_val,
                                                               num_iters=num_permutations))
                elif p_val is None:  # None will error in format_p_val
                    obs_t, p_val = None, None
            else:
                raise ValueError("Invalid test type '%s'." % test_type)
            ttest_results[treatment_pair] = (obs_t, p_val)

    return ttest_results, ad_avgs
Exemple #37
0
    def findNewClusterCenters(self, ss=0):
        """Find a putative cluster"""

        inRange = lambda x, l, u: x >= l and x < u

        # we work from the top view as this has the base clustering
        max_index = np_argmax(self.blurredMaps[0])
        max_value = self.blurredMaps[0].ravel()[max_index]

        max_x = int(max_index / self.PM.scaleFactor)
        max_y = max_index - self.PM.scaleFactor * max_x
        max_z = -1

        ret_values = [max_value, max_x, max_y]

        start_span = int(1.5 * self.span)
        span_len = 2 * start_span + 1

        if self.debugPlots:
            self.plotRegion(max_x, max_y, max_z, fileName="Image_" + str(self.imageCounter), tag="column", column=True)
            self.imageCounter += 1

        # make a 3d grid to hold the values
        working_block = np_zeros((span_len, span_len, self.PM.scaleFactor))

        # go through the entire column
        (x_lower, x_upper) = self.makeCoordRanges(max_x, start_span)
        (y_lower, y_upper) = self.makeCoordRanges(max_y, start_span)
        super_putative_row_indices = []
        for p in self.im2RowIndicies:
            if inRange(p[0], x_lower, x_upper) and inRange(p[1], y_lower, y_upper):
                for row_index in self.im2RowIndicies[p]:
                    # check that the point is real and that it has not yet been binned
                    if row_index not in self.PM.binnedRowIndicies and row_index not in self.PM.restrictedRowIndicies:
                        # this is an unassigned point.
                        multiplier = np_log10(self.PM.contigLengths[row_index])
                        self.incrementAboutPoint3D(
                            working_block, p[0] - x_lower, p[1] - y_lower, p[2], multiplier=multiplier
                        )
                        super_putative_row_indices.append(row_index)

        # blur and find the highest value
        bwb = ndi.gaussian_filter(working_block, 8)  # self.blurRadius)
        densest_index = np_unravel_index(np_argmax(bwb), (np_shape(bwb)))
        max_x = densest_index[0] + x_lower
        max_y = densest_index[1] + y_lower
        max_z = densest_index[2]

        # now get the basic color of this dense point
        putative_center_row_indices = []

        (x_lower, x_upper) = self.makeCoordRanges(max_x, self.span)
        (y_lower, y_upper) = self.makeCoordRanges(max_y, self.span)
        (z_lower, z_upper) = self.makeCoordRanges(max_z, 2 * self.span)

        for row_index in super_putative_row_indices:
            p = np_around(self.PM.transformedCP[row_index])
            if inRange(p[0], x_lower, x_upper) and inRange(p[1], y_lower, y_upper) and inRange(p[2], z_lower, z_upper):
                # we are within the range!
                putative_center_row_indices.append(row_index)

        # make sure we have something to go on here
        if np_size(putative_center_row_indices) == 0:
            # it's all over!
            return None

        if np_size(putative_center_row_indices) == 1:
            # get out of here but keep trying
            # the calling function may restrict these indices
            return [[np_array(putative_center_row_indices)], ret_values]
        else:
            total_BP = sum([self.PM.contigLengths[i] for i in putative_center_row_indices])
            if not self.isGoodBin(total_BP, len(putative_center_row_indices), ms=5):  # Can we trust very small bins?.
                # get out of here but keep trying
                # the calling function should restrict these indices
                return [[np_array(putative_center_row_indices)], ret_values]
            else:
                # we've got a few good guys here, partition them up!
                # shift these guys around a bit
                center_k_vals = np_array([self.PM.kmerVals[i] for i in putative_center_row_indices])
                k_partitions = self.partitionVals(center_k_vals)

                if len(k_partitions) == 0:
                    return None
                else:
                    center_c_vals = np_array([self.PM.transformedCP[i][-1] for i in putative_center_row_indices])
                    # center_c_vals = np_array([self.PM.averageCoverages[i] for i in putative_center_row_indices])
                    center_c_vals -= np_min(center_c_vals)
                    c_max = np_max(center_c_vals)
                    if c_max != 0:
                        center_c_vals /= c_max
                    c_partitions = self.partitionVals(center_c_vals)

                    # take the intersection of the two partitions
                    tmp_partition_hash_1 = {}
                    id = 1
                    for p in k_partitions:
                        for i in p:
                            tmp_partition_hash_1[i] = id
                        id += 1

                    tmp_partition_hash_2 = {}
                    id = 1
                    for p in c_partitions:
                        for i in p:
                            try:
                                tmp_partition_hash_2[(tmp_partition_hash_1[i], id)].append(i)
                            except KeyError:
                                tmp_partition_hash_2[(tmp_partition_hash_1[i], id)] = [i]
                        id += 1

                    partitions = [
                        np_array([putative_center_row_indices[i] for i in tmp_partition_hash_2[key]])
                        for key in tmp_partition_hash_2.keys()
                    ]

                    # pcs = [[self.PM.averageCoverages[i] for i in p] for p in partitions]
                    # print pcs
                    return [partitions, ret_values]
def compare_alpha_diversities(rarefaction_lines, mapping_lines, category, 
    depth=None, test_type='nonparametric', num_permutations=999):
    """Compares alpha diversity values for differences per category treatment.
    Notes: 
     Returns a defaultdict which as keys has the pairs of treatments being 
     compared, and as values, lists of (pval,tval) tuples for each comparison at
     for a given iteration.     
    Inputs:
     rarefaction_lines - list of lines, result of multiple rarefactions.
     mapping_lines - list of lines, mapping file lines. 
     category - str, the category to be compared, eg 'Treatment' or 'Age'.
     depth - int, depth of the rarefaction file to use. if None, then will use 
     the deepest available in the file. 
     test_type - str, the type of t-test to perform. Must be either
     'parametric' or 'nonparametric'.
     num_permutations - int, the number of Monte Carlo permutations to use if
     test_type is 'nonparametric'.    
    """
    if test_type == 'nonparametric' and num_permutations < 1:
        raise ValueError("Invalid number of permutations: %d. Must be greater "
                         "than zero." % num_permutations)
    
    rarefaction_data = parse_rarefaction(rarefaction_lines)
    mapping_data = parse_mapping_file_to_dict(mapping_lines)[0]
    # samid_pairs, treatment_pairs are in the same order
    samid_pairs, treatment_pairs = sampleId_pairs(mapping_data, 
        rarefaction_data, category)
    
    # extract only rows of the rarefaction data that are at the given depth
    # if depth is not given default to the deepest rarefaction available
    # rarefaction file is not guaranteed to be in order of rarefaction depth
    if depth == None:
        depth = array(rarefaction_data[3])[:,0].max()

    rare_mat = array([row for row in rarefaction_data[3] if row[0]==depth])
    
    # Average each col of the rarefaction mtx. Computing t test on averages over
    # all iterations. Avoids more comps which kills signifigance. 
    rare_mat = (rare_mat.sum(0)/rare_mat.shape[0])[2:] #remove depth,iter cols
    sids = rarefaction_data[0][3:] # 0-2 are header strings
    
    ttest_results = {}
    for sid_pair, treatment_pair in zip(samid_pairs, treatment_pairs):
        # if there is only 1 sample for each treatment in a comparison, and mc
        # using mc method, will error (e.g. mc_t_two_sample([1],[1]).
        if len(sid_pair[0])==1 and len(sid_pair[1])==1:
            ttest_results[treatment_pair]= (None,None)
        else:
            pair0_indices = [sids.index(i) for i in sid_pair[0]]
            pair1_indices = [sids.index(i) for i in sid_pair[1]]
            i = rare_mat.take(pair0_indices)
            j = rare_mat.take(pair1_indices)
            # found discussion of how to quickly check an array for nan here:
            # http://stackoverflow.com/questions/6736590/fast-check-for-nan-in-numpy
            if isnan(np_min(i)) or isnan(np_min(j)):
                ttest_results[treatment_pair]= (None,None)
                continue
            if test_type == 'parametric':
                obs_t, p_val = t_two_sample(i,j)
            elif test_type == 'nonparametric':
                obs_t, _, _, p_val = mc_t_two_sample(i,j, 
                    permutations=num_permutations)
                if p_val != None: 
                    p_val = float(format_p_value_for_num_iters(p_val, 
                        num_iters=num_permutations))
                elif p_val ==  None: #None will error in format_p_val
                    obs_t, p_val = None, None
            else:
                raise ValueError("Invalid test type '%s'." % test_type)
            ttest_results[treatment_pair]= (obs_t,p_val)
    # create dict of average alpha diversity values
    alphadiv_avgs = {}
    for sid_pair, treatment_pair in zip(samid_pairs, treatment_pairs):
        # calculate the alpha diversity average, std vals. choosing only first
        # treatment pair doesn't guarantees full covering, must look at both
        for sid_list, treatment_str in zip(sid_pair, treatment_pair):
            # check if already computed and added
            if not treatment_str in alphadiv_avgs.keys():
                alphadiv_vals = \
                    rare_mat.take([sids.index(i) for i in sid_list])
                ad_mean = alphadiv_vals.mean()
                ad_std = alphadiv_vals.std()
                alphadiv_avgs[treatment_str] = (ad_mean, ad_std) 
    return ttest_results, alphadiv_avgs
Exemple #39
0
                color = Grid_Values['zGas'].index(parameter_divider)
                label = r'$Z = {logage}$'.format(logage = round(float(parameter_divider) * 0.02, 3))
                
                x_values, y_values      = Ar_S_model(Line_dict, threshold = 4)
                
                if (x_values != None) and (y_values != None):
                
                    dz.data_plot(x_values, y_values, color=dz.ColorVector[2][color], label=label, markerstyle='o')
    
                    x_linealFitting = hstack([x_linealFitting, x_values])
                    y_linealFitting = hstack([y_linealFitting, y_values])

#Lineal model
lineal_mod          = LinearModel(prefix='lineal_')
Lineal_parameters   = lineal_mod.guess(y_linealFitting, x=x_linealFitting)
x_lineal            = linspace(np_min(x_linealFitting), np_max(x_linealFitting), 100)
y_lineal            = Lineal_parameters['lineal_slope'].value * x_lineal + Lineal_parameters['lineal_intercept'].value
dz.data_plot(x_lineal, y_lineal, label='Lineal fitting', color = 'black', linestyle='-')

# #Plot fitting formula
formula = r"$log\left(Ar^{{+2}}/Ar^{{+3}}\right) = {m} \cdot log\left(S^{{+2}}/S^{{+3}}\right) + {n}$".format(m=round(Lineal_parameters['lineal_slope'].value,3), n=round(Lineal_parameters['lineal_intercept'].value, 3))
# formula2 = r"$m = {m} \pm {merror}; n = {n} \pm {nerror}$".format(m=round(m[0],3), merror=round(m_err[0],3), n=round(n[0],3), nerror=round(n_err[0],3))
dz.Axis.text(0.50, 0.15, formula, transform=dz.Axis.transAxes, fontsize=20) 
# dz.Axis.text(0.50, 0.08, formula2, transform=dz.Axis.transAxes, fontsize=20) 

#Plot wording
xtitle  = r'$log([SIII]/[SIV])$'
ytitle  =  r'$log([ArIII]/[ArIV])$'
title   = 'Argon - Sulfur ionic relation in Cloudy photoionization models'
dz.FigWording(xtitle, ytitle, title, axis_Size = 20.0, title_Size = 20.0, legend_size=20.0, legend_loc='upper left')
# dz.Axis.set_xlim(0,6)
def compare_alpha_diversities(rarefaction_lines, mapping_lines, category, depth,
    test_type='nonparametric', num_permutations=999):
    """Compares alpha diversity values for differences per category treatment.
    Notes: 
     Returns a defaultdict which as keys has the pairs of treatments being 
     compared, and as values, lists of (pval,tval) tuples for each comparison at
     for a given iteration.     
    Inputs:
     rarefaction_lines - list of lines, result of multiple rarefactions.
     mapping_lines - list of lines, mapping file lines. 
     category - str, the category to be compared, eg 'Treatment' or 'Age'.
     depth - int, depth of the rarefaction file to use.
     test_type - str, the type of t-test to perform. Must be either
     'parametric' or 'nonparametric'.
     num_permutations - int, the number of Monte Carlo permutations to use if
     test_type is 'nonparametric'.    
    """
    if test_type == 'nonparametric' and num_permutations < 1:
        raise ValueError("Invalid number of permutations: %d. Must be greater "
                         "than zero." % num_permutations)
     
    rarefaction_data = parse_rarefaction(rarefaction_lines)
    mapping_data = parse_mapping_file_to_dict(mapping_lines)[0]
    # samid_pairs, treatment_pairs are in the same order
    samid_pairs, treatment_pairs = sampleId_pairs(mapping_data, 
        rarefaction_data, category)
    
    # extract only rows of the rarefaction data that are at the given depth
    rare_mat = array([row for row in rarefaction_data[3] if row[0]==depth])
    
    # Average each col of the rarefaction mtx. Computing t test on averages over
    # all iterations. Avoids more comps which kills signifigance. 
    rare_mat = (rare_mat.sum(0)/rare_mat.shape[0])[2:] #remove depth,iter cols
    sids = rarefaction_data[0][3:] # 0-2 are header strings
    results = {}
    for sid_pair, treatment_pair in zip(samid_pairs, treatment_pairs):
        # if there is only 1 sample for each treatment in a comparison, and mc
        # using mc method, will error (e.g. mc_t_two_sample([1],[1]).
        if len(sid_pair[0])==1 and len(sid_pair[1])==1:
            t_key = '%s,%s' % (treatment_pair[0], treatment_pair[1])
            results[t_key]= (None,None)
        else:
            pair0_indices = [sids.index(i) for i in sid_pair[0]]
            pair1_indices = [sids.index(i) for i in sid_pair[1]]
            t_key = '%s,%s' % (treatment_pair[0], treatment_pair[1])
            i = rare_mat.take(pair0_indices)
            j = rare_mat.take(pair1_indices)
            # found discussion of how to quickly check an array for nan here:
            # http://stackoverflow.com/questions/6736590/fast-check-for-nan-in-numpy
            if isnan(np_min(i)) or isnan(np_min(j)):
                results[t_key]= (None,None)
                continue
            if test_type == 'parametric':
                obs_t, p_val = t_two_sample(i,j)
            elif test_type == 'nonparametric':
                obs_t, _, _, p_val = mc_t_two_sample(i,j, 
                    permutations=num_permutations)
                if p_val != None: 
                    p_val = float(format_p_value_for_num_iters(p_val, 
                        num_iters=num_permutations))
                elif p_val ==  None: #None will error in format_p_val
                    obs_t, p_val = None, None
            else:
                raise ValueError("Invalid test type '%s'." % test_type)
            results[t_key]= (obs_t,p_val)
    return results