Ejemplo n.º 1
0
def check_prov_sizes(province_map, province_output):
    print("Checking for provinces that are suspiciously small or large ...")

    unique_cols_arr = numpy.unique(province_map.reshape(-1, province_map.shape[2]), axis = 0)

    undetermined_mask = logical_and.reduce(province_map == undetermined_col, axis = -1)
    global undetermined_origins
    undetermined_province_masks, undetermined_origins, useless_mask = get_provinces(undetermined_mask, 0, 2)
    
    for u in range(len(unique_cols_arr)):
        unique_col = unique_cols_arr[u]
        #Ignore black and white.
        if (unique_col == (0, 0, 0)).all() or (unique_col == (255, 255, 255)).all():
            continue

        unique_col_coords = numpy.where(logical_and.reduce(province_map == unique_col, axis = -1))
        unique_col_origin = unique_col_coords[0][0], unique_col_coords[1][0]
        
        unique_col_mask = logical_and.reduce(province_map == unique_col, axis = -1)

        if numpy.count_nonzero(unique_col_mask) <= small_province_pixel_count:
            small_provinces.append(unique_col_origin)

        x_min, y_min, x_max, y_max = find_bounds(unique_col_mask)
        
        if x_max - x_min > large_province_bounds or y_max - y_min > large_province_bounds:
            fragments_masks, fragment_origins, undetermined_mask = get_provinces(unique_col_mask, 0, 2)

            if  len(fragment_origins) > 1:
                spread_out_provinces[tuple(unique_col)] = [len(fragment_origins), x_max - x_min, y_max - y_min, unique_col_origin]
Ejemplo n.º 2
0
def display_triangle(im, tr, col):
    rows, cols = im.shape
    rr, cc = line(*map(int, tr[0]), *map(int, tr[1]))
    ind = logical_and.reduce((rr >= 0, rr < rows, cc >= 0, cc < cols))
    rr, cc = rr[ind], cc[ind]
    im[rr, cc] = col
    rr, cc = line(*map(int, tr[1]), *map(int, tr[2]))
    ind = logical_and.reduce((rr >= 0, rr < rows, cc >= 0, cc < cols))
    rr, cc = rr[ind], cc[ind]
    im[rr, cc] = col
    rr, cc = line(*map(int, tr[2]), *map(int, tr[0]))
    ind = logical_and.reduce((rr >= 0, rr < rows, cc >= 0, cc < cols))
    rr, cc = rr[ind], cc[ind]
    im[rr, cc] = col
Ejemplo n.º 3
0
def get_state_mask(state_guide, state_color):
    # Get a mask of all pixels of the border key color.
    # Running two_d_array == value produces a 'mask' where only individual pixels are compared, but we want to know about cases where all three pixels are equal.
    # logical_and does this sort of thing, apparently.
    border_mask = logical_and.reduce(state_guide == state_color, axis=-1)

    x_min, y_min, x_max, y_max = find_bounds(border_mask)
    guide_view = state_guide[y_min:y_max + 1, x_min:x_max + 1]

    # Crop the border_mask too.
    border_mask = border_mask[y_min:y_max + 1, x_min:x_max + 1]

    # Create a copy of the border mask with a new layer of false pixels all around the edge.
    state_mask = border_mask.copy()
    state_mask = numpy.insert(state_mask, 0, False, axis=0)
    state_mask = numpy.insert(state_mask, 0, False, axis=1)
    state_mask = numpy.insert(state_mask, state_mask.shape[0], False, axis=0)
    state_mask = numpy.insert(state_mask, state_mask.shape[1], False, axis=1)

    # Get a mask of all pixels outside the state's borders, by performing a flood from the top-left pixel on the expanded mask we've made.
    # Then invert the mask. All true values on the mask signify a point on or within the state's borders.
    state_mask = ~flood(state_mask, (0, 0), connectivity=1)

    # Crop the state_mask back to the bounds of the relevant pixels.
    state_mask = numpy.delete(state_mask, state_mask.shape[0] - 1, 0)
    state_mask = numpy.delete(state_mask, state_mask.shape[1] - 1, 1)
    state_mask = numpy.delete(state_mask, 0, 0)
    state_mask = numpy.delete(state_mask, 0, 1)

    state_mask = state_mask & logical_or.reduce(guide_view != ignore_col,
                                                axis=-1)

    return state_mask, border_mask, x_min, y_min, x_max, y_max
Ejemplo n.º 4
0
 def __call__(self, x):
     # TODO: check performance of the logical_and.reduce implementation (with list materialization)
     if not self.props:
         return ones(
             len(x), dtype='bool'
         )  # TODO: check if this is correct handling for scalar x
     return logical_and.reduce([p(x) for p in self.props])
Ejemplo n.º 5
0
def concentrations(Sal, WhichKs, WhoseTB):
    """Estimate total concentrations of borate, fluoride and sulfate from
    salinity.

    Inputs must first be conditioned with inputs().

    Based on a subset of Constants, version 04.01, 10-13-97, by Ernie Lewis.
    """
    # Generate empty vectors for holding results
    TB = full_like(Sal, nan)
    TF = full_like(Sal, nan)
    TS = full_like(Sal, nan)
    # Calculate total borate
    F = WhichKs==8
    if any(F): # Pure water
        TB[F] = 0.0
    F = logical_or(WhichKs==6, WhichKs==7)
    if any(F):
        TB[F] = conc.borate_C65(Sal[F])
    F = logical_and.reduce((WhichKs!=6, WhichKs!=7, WhichKs!=8))
    if any(F): # All other cases
        FF = logical_and(F, WhoseTB==1)
        if any(FF): # If user opted for Uppstrom's values
            TB[FF] = conc.borate_U74(Sal[FF])
        FF = logical_and(F, WhoseTB==2)
        if any(FF): # If user opted for the new Lee values
            TB[FF] = conc.borate_LKB10(Sal[FF])
    # Calculate total fluoride and sulfate
    TF = conc.fluoride_R65(Sal)
    TS = conc.sulfate_MR66(Sal)
    return TB, TF, TS
Ejemplo n.º 6
0
def _hinv(A00, A01, A11):
    from numpy_sugar import is_all_finite

    rcond = 1e-15
    b = atleast_1d(A01)
    d = atleast_1d(A11)
    a = full_like(d, A00)
    m = maximum(maximum(npy_abs(b), npy_abs(d)), abs(a))

    a /= m
    b = b / m
    c = b
    d = d / m

    bc = b * c
    ad = a * d
    with errstate(invalid="ignore", divide="ignore"):
        ai = a / (a * a - nan_to_num((bc * a) / d))
        bi = b / (b * b - nan_to_num(ad))
        di = d / (d * d - nan_to_num((bc * d) / a))

    ai /= m
    bi /= m
    di /= m

    ok = is_all_finite(ai) and is_all_finite(bi) and is_all_finite(di)
    if not ok:
        ok = logical_and.reduce([isfinite(ai), isfinite(bi), isfinite(di)])
        nok = logical_not(ok)
        U, S, VT = hsvd(a[nok], b[nok], d[nok])

        maxi = maximum(npy_abs(S[0]), npy_abs(S[1]))
        cutoff = rcond * maxi

        large = S[0] > cutoff
        S[0] = divide(1, S[0], where=large, out=S[0])
        S[0][~large] = 0

        large = S[1] > cutoff
        S[1] = divide(1, S[1], where=large, out=S[1])
        S[1][~large] = 0

        SiVT = [[VT[0][0] * S[0], VT[0][1] * S[0]],
                [VT[1][0] * S[1], VT[1][1] * S[1]]]
        Ai = [
            [
                U[0][0] * SiVT[0][0] + U[0][1] * SiVT[1][0],
                U[0][0] * SiVT[0][1] + U[0][1] * SiVT[1][1],
            ],
            [
                U[1][0] * SiVT[0][0] + U[1][1] * SiVT[1][0],
                U[1][0] * SiVT[0][1] + U[1][1] * SiVT[1][1],
            ],
        ]
        ai[nok] = Ai[0][0] / m
        bi[nok] = Ai[0][1] / m
        di[nok] = Ai[1][1] / m

    return ai, bi, di
Ejemplo n.º 7
0
    def test_logical_ops(self):
        from numpy import logical_and, logical_or, logical_xor, logical_not

        assert (logical_and([True, False, True, True], [1, 1, 3, 0]) == [True, False, True, False]).all()
        assert (logical_or([True, False, True, False], [1, 2, 0, 0]) == [True, True, True, False]).all()
        assert (logical_xor([True, False, True, False], [1, 2, 0, 0]) == [False, True, True, False]).all()
        assert (logical_not([True, False]) == [False, True]).all()
        assert logical_and.reduce([1.0, 1.0]) == True
Ejemplo n.º 8
0
def triangle_hough(edges, sizes):
    nb_lignes, nb_colonnes = edges.shape
    accumulator = zeros((nb_lignes, nb_colonnes, len(sizes) - 1))
    lx, ly = nonzero(edges)
    for i in range(nb_lignes):
        for j in range(nb_colonnes):
            x1, y1 = ly - j, lx - i
            x2, y2 = -x1 / 2 - y1 * sqrt(3) / 2, x1 * sqrt(3) / 2 - y1 / 2
            x3, y3 = -x1 / 2 + y1 * sqrt(3) / 2, -x1 * sqrt(3) / 2 - y1 / 2
            for s in range(len(sizes) - 1):
                a1 = logical_and(sizes[s] <= y1, y1 < sizes[s + 1])
                a2 = logical_and(sizes[s] <= y2, y2 < sizes[s + 1])
                a3 = logical_and(sizes[s] <= y3, y3 < sizes[s + 1])
                b1 = y1 < max(sizes[s], sizes[s + 1])
                b2 = y2 < max(sizes[s], sizes[s + 1])
                b3 = y3 < max(sizes[s], sizes[s + 1])
                accumulator[i, j, s] += sum(logical_and.reduce((a1, b2, b3)))
                accumulator[i, j, s] += sum(logical_and.reduce((a2, b3, b1)))
                accumulator[i, j, s] += sum(logical_and.reduce((a3, b1, b2)))
    return accumulator
Ejemplo n.º 9
0
def triangle_hough_bis(edges, sizes):
    nb_lignes, nb_colonnes = edges.shape
    accumulator = zeros((nb_lignes, nb_colonnes, len(sizes)))
    for i in range(nb_lignes):
        print(str(i + 1) + " / " + str(nb_colonnes))
        for j in range(nb_colonnes):
            for s in range(len(sizes)):
                tr = triangle_sommets(i, j, sizes[s])
                rr, cc = line(*map(int, tr[0]), *map(int, tr[1]))
                ind = logical_and.reduce((rr >= 0, rr < nb_lignes, cc >= 0, cc < nb_colonnes))
                rr, cc = rr[ind], cc[ind]
                accumulator[i, j, s] += sum(edges[rr, cc])
                rr, cc = line(*map(int, tr[1]), *map(int, tr[2]))
                ind = logical_and.reduce((rr >= 0, rr < nb_lignes, cc >= 0, cc < nb_colonnes))
                rr, cc = rr[ind], cc[ind]
                accumulator[i, j, s] += sum(edges[rr, cc])
                rr, cc = line(*map(int, tr[2]), *map(int, tr[0]))
                ind = logical_and.reduce((rr >= 0, rr < nb_lignes, cc >= 0, cc < nb_colonnes))
                rr, cc = rr[ind], cc[ind]
                accumulator[i, j, s] += sum(edges[rr, cc])
    return accumulator
Ejemplo n.º 10
0
def test_reading_select_region_metadata_not_spatial_only(filename):
    """
    The same as test_reading_select_region_metadata but for spatial_only=False.
    """

    full_data = load(filename)

    # Mask off the centre of the volume.
    mask_region = mask(filename, spatial_only=False)

    restrict = array(
        [full_data.metadata.boxsize * 0.26,
         full_data.metadata.boxsize * 0.74]).T

    mask_region.constrain_spatial(restrict=restrict)

    selected_data = load(filename, mask=mask_region)

    selected_coordinates = selected_data.gas.coordinates

    # Now need to repeat the selection by hand:
    subset_mask = logical_and.reduce([
        logical_and(x > y_lower, x < y_upper)
        for x, (y_lower, y_upper) in zip(full_data.gas.coordinates.T, restrict)
    ])

    # We also need to repeat for the thing we just selected; the cells only give
    # us an _approximate_ selection!
    selected_subset_mask = logical_and.reduce([
        logical_and(x > y_lower, x < y_upper)
        for x, (y_lower,
                y_upper) in zip(selected_data.gas.coordinates.T, restrict)
    ])

    hand_selected_coordinates = full_data.gas.coordinates[subset_mask]

    assert (hand_selected_coordinates ==
            selected_coordinates[selected_subset_mask]).all()

    return
 def transform(self, X, y=None, *args, **kwargs):
     from pandas import DataFrame
     from numpy import logical_and
     from numexpr import evaluate
     self.X = X  #keep the original input data for later
     X = X.iloc[:, self.columns].values if isinstance(
         X, DataFrame) else X[:, self.columns]
     LB, UB = self.LB, self.UB
     #MASK = (X <= LB)|(X >= UB); MASK = np.logical_or(X<=LB, X.__ge__(UB))  #same
     MASK = evaluate(
         "(X<=LB)|(X>=UB)"
     )  # X = subset     MASK = 2D mask-array denoting outliers
     self.mask = logical_and.reduce(~MASK, axis=1)  # non-outliers
     self.X = self.X.iloc[self.mask, :] if isinstance(
         self.X, DataFrame) else self.X[self.mask, :]
     self.number_of_outliers_removed = X.shape[0] - self.X.shape[0]
     self.outliers_index = (~self.mask).nonzero()[0]
     del X, self.mask
     return self.X
Ejemplo n.º 12
0
 def _crosslink_atoms(self):
     pb = self.pbond
     e = self.ensemble_model
     atoms = []
     for a in pb.atoms:
         if a.structure is not e:
             # Find matching atom in e.
             ea = e.atoms
             from numpy import logical_and
             mask = logical_and.reduce(
                 ((ea.names == a.name),
                  (ea.chain_ids == a.residue.chain_id),
                  (ea.residues.numbers == a.residue.number)))
             matom = ea.filter(mask)
             if len(matom) != 1:
                 from chimerax.core.errors import UserError
                 raise UserError(
                     'Require one atom in ensemble %s matching pseudobond atom /%s:%d@%s, got %d'
                     % (e.name, a.residue.chain_id, a.residue.number,
                        a.name, len(matom)))
             a = matom[0]
         atoms.append(a)
     return atoms
Ejemplo n.º 13
0
def fill_state(province_guide, province_output, state_color):
    try:
        state_mask, border_mask, x_min, y_min, x_max, y_max = get_state_mask(
            province_guide, state_color)

        # Define the area that we're operating on by cropping the entire image to the bounds of where the defining state key can be found, for optimisation.
        state_view = province_output[y_min:y_max + 1, x_min:x_max + 1]

        province_masks, province_origins, undetermined_mask = get_provinces(
            numpy.logical_and(state_mask, ~border_mask), min_province_pixels)

        undetermined_province_masks = None
        if undetermined_mask is not None:
            undetermined_province_masks, undetermined_origins, undetermined_second_mask = get_provinces(
                undetermined_mask, 0, 2)

        palette_color = None
        if random_state_palette_colors:
            palette_color = get_random_color()
        else:
            palette_color = state_color

        for p in province_masks:
            # Fill each province with a random color.
            new_prov_col = get_random_color(palette_color)

            if new_prov_col == ignore_col:
                raise Exception(
                    "Error: A province was almost filled with the ignore color {}! This shouldn't be possible, but I saw it happen once so I added this safeguard. Please report it to the tool author. Aborting the operation."
                    .format(ignorecol))

            state_view[numpy.where(p)] = new_prov_col
            used_cols.add(tuple(new_prov_col))

            # Register an animation frame after each painted province.
            register_anim_frame(province_output)

        if undetermined_province_masks is not None:
            for u in undetermined_province_masks:
                # For now, treat the stray province pieces as regular provinces (this allows us to fill in the borders nicely), but they bay be filled with the undetermined col later
                # depending on the user settings.
                new_prov_col = get_random_color(palette_color)
                state_view[numpy.where(u)] = new_prov_col
                used_cols.add(tuple(new_prov_col))

        stray_border_origins = clean_up_borders(state_view, state_mask,
                                                border_mask, state_color)
        if stray_border_origins is not None:
            for s in stray_border_origins:
                s[0] = s[0] + y_min
                s[1] = s[1] + x_min

            global stray_border_fragments

            stray_border_fragments = numpy.concatenate(
                (stray_border_fragments, stray_border_origins))

        # Decided what to do with the province fragments which were so small they were probably meant to be part of a bigger province.
        if undetermined_province_masks is not None:
            for u in undetermined_origins:
                mode_col = None

                post_border_cleanup_fragment = logical_and.reduce(
                    state_view == state_view[u[0], u[1]], axis=-1)

                state_view[post_border_cleanup_fragment] = (255, 0, 255)

                if undetermined_pixel_handling != 0:
                    mode_col = get_mode_neighbors_of_area(
                        post_border_cleanup_fragment, state_view, state_mask,
                        undetermined_pixel_handling == 1)

                if mode_col is not None:

                    state_view[numpy.where(
                        post_border_cleanup_fragment)] = mode_col
                else:
                    state_view[numpy.where(
                        post_border_cleanup_fragment)] = undetermined_col

                    # Coordinates need to be in global array space.
                    # Don't forget, axes are [0] = y, [1] = x in numpy ...
                    u[0] = u[0] + y_min
                    u[1] = u[1] + x_min
                    global undetermined_fragments
                    undetermined_fragments = numpy.concatenate(
                        (undetermined_fragments, [u]), axis=0)

        # Register the final animation frame.
        register_anim_frame(province_output)

    except Exception as exc:
        print(
            "Error: Failure while attempting to fill the state of color '{}':".
            format(state_color) + str(exc))
        traceback.print_exc()
        # Notify the caller that the operation was not a success
        return False

    return True
Ejemplo n.º 14
0
    def as_subindex(self, index):
        index = ndindex(index).reduce().broadcast_arrays()

        self = self.broadcast_arrays()

        if ... in self.args:
            raise NotImplementedError("Tuple.as_subindex() is not yet implemented for tuples with ellipses")

        if isinstance(index, (Integer, ArrayIndex, Slice)):
            index = Tuple(index)
        if isinstance(index, Tuple):
            new_args = []
            boolean_arrays = []
            integer_arrays = []
            if any(isinstance(i, Slice) and i.step < 0 for i in index.args):
                    raise NotImplementedError("Tuple.as_subindex() is only implemented on slices with positive steps")
            if ... in index.args:
                raise NotImplementedError("Tuple.as_subindex() is not yet implemented for tuples with ellipses")
            for self_arg, index_arg in zip(self.args, index.args):
                if (isinstance(self_arg, IntegerArray) and
                    isinstance(index_arg, Slice)):
                    if (self_arg.array < 0).any():
                        raise NotImplementedError("IntegerArray.as_subindex() is only implemented for arrays with all nonnegative entries. Try calling reduce() with a shape first.")
                    if index_arg.step < 0:
                        raise NotImplementedError("IntegerArray.as_subindex(Slice) is only implemented for slices with positive steps")

                    # After reducing, start is not None when step > 0
                    if index_arg.stop is None or index_arg.start < 0 or index_arg.stop < 0:
                        raise NotImplementedError("IntegerArray.as_subindex(Slice) is only implemented for slices with nonnegative start and stop. Try calling reduce() with a shape first.")

                    s = self_arg.array
                    start, stop, step = subindex_slice(
                        s, s+1, 1, index_arg.start, index_arg.stop, index_arg.step)
                    if (stop <= 0).all():
                        raise ValueError("Indices do not intersect")
                    if start.shape == ():
                        if start >= stop:
                            raise ValueError("Indices do not intersect")

                    integer_arrays.append((start, stop))
                    # Placeholder. We need to mask out the stops below.
                    new_args.append(IntegerArray(start))
                else:
                    subindex = self_arg.as_subindex(index_arg)
                    if isinstance(subindex, Tuple):
                        assert subindex == ()
                        subindex # Workaround https://github.com/nedbat/coveragepy/issues/1029
                        continue
                    if isinstance(subindex, BooleanArray):
                        boolean_arrays.append(subindex)
                    new_args.append(subindex)
            args_remainder = self.args[min(len(self.args), len(index.args)):]
            index_remainder = index.args[min(len(self.args), len(index.args)):]
            if any(isinstance(i, ArrayIndex) and i.isempty() for i in
                   index_remainder):
                raise ValueError("Indices do not intersect")
            for arg in args_remainder:
                if isinstance(arg, BooleanArray):
                    boolean_arrays.append(arg)
                if isinstance(arg, IntegerArray):
                    integer_arrays.append((arg.array, arg.array+1))
                new_args.append(arg)
            # Replace all boolean arrays with the logical AND of them.
            if any(i.isempty() for i in boolean_arrays):
                raise ValueError("Indices do not intersect")
            if boolean_arrays:
                if len(boolean_arrays) > 1:
                    new_array = BooleanArray(logical_and.reduce([i.array for i in boolean_arrays]))
                else:
                    new_array = boolean_arrays[0]
                new_args2 = []
                first = True
                for arg in new_args:
                    if arg in boolean_arrays:
                        if first:
                            new_args2.append(new_array)
                            first = False
                    else:
                        new_args2.append(arg)
                new_args = new_args2

            # Mask out integer arrays to only where the start is less than the
            # stop for all arrays.
            if integer_arrays:
                starts, stops = zip(*integer_arrays)
                starts = array(broadcast_arrays(*starts))
                stops = array(broadcast_arrays(*stops))
                mask = logical_and.reduce(starts < stops, axis=0)
                new_args2 = []
                i = 0
                for arg in new_args:
                    if isinstance(arg, IntegerArray):
                        if mask.ndim == 0:
                            # Integer arrays always result in a 1 dimensional
                            # result, except when we have a scalar, we want to
                            # have a 0 dimensional result to match Integer().
                            new_args2.append(IntegerArray(starts[i]))
                        elif mask.all():
                            new_args2.append(IntegerArray(starts[i]))
                        else:
                            new_args2.append(IntegerArray(starts[i, mask]))
                        if new_args2[-1].isempty():
                            raise ValueError("Indices do not intersect")
                        i += 1
                    else:
                        new_args2.append(arg)
                new_args = new_args2
            return Tuple(*new_args)
        raise NotImplementedError(f"Tuple.as_subindex() is not implemented for type '{type(index).__name__}")
Ejemplo n.º 15
0
def equilibria(TempC, Pdbar, pHScale, WhichKs, WhoseKSO4, WhoseKF, TP, TSi, Sal,
        TF, TS):
    """Evaluate all stoichiometric equilibrium constants, converted to the
    chosen pH scale, and corrected for pressure.

    Inputs must first be conditioned with inputs().

    This finds the Constants of the CO2 system in seawater or freshwater,
    corrects them for pressure, and reports them on the chosen pH scale.
    The process is as follows: the Constants (except KS, KF which stay on the
    free scale - these are only corrected for pressure) are:
          1) evaluated as they are given in the literature,
          2) converted to the SWS scale in mol/kg-SW or to the NBS scale,
          3) corrected for pressure,
          4) converted to the SWS pH scale in mol/kg-SW,
          5) converted to the chosen pH scale.

    Based on a subset of Constants, version 04.01, 10-13-97, by Ernie Lewis.
    """
    # PROGRAMMER'S NOTE: all logs are log base e
    # PROGRAMMER'S NOTE: all Constants are converted to the pH scale
    #     pHScale# (the chosen one) in units of mol/kg-SW
    #     except KS and KF are on the free scale
    #     and KW is in units of (mol/kg-SW)^2

    TempK, Pbar, RT = units(TempC, Pdbar)

    # Calculate K0 (Henry's constant for CO2)
    K0 = eq.kCO2_W74(TempK, Sal)

    # Calculate KS (bisulfate ion dissociation constant)
    KS = full_like(TempK, nan)
    F = WhoseKSO4==1
    if any(F):
        KS[F] = eq.kHSO4_FREE_D90a(TempK[F], Sal[F])
    F = WhoseKSO4==2
    if any(F):
        KS[F] = eq.kHSO4_FREE_KRCB77(TempK[F], Sal[F])

    # Calculate KF (hydrogen fluoride dissociation constant)
    KF = full_like(TempC, nan)
    F = WhoseKF==1
    if any(F):
        KF[F] = eq.kHF_FREE_DR79(TempK[F], Sal[F])
    F = WhoseKF==2
    if any(F):
        KF[F] = eq.kHF_FREE_PF87(TempK[F], Sal[F])

    # Calculate pH scale conversion factors - these are NOT pressure-corrected
    SWStoTOT = convert.sws2tot(TS, KS, TF, KF)
    # Calculate fH
    fH = full_like(TempC, nan)
    # Use GEOSECS's value for cases 1-6 to convert pH scales
    F = WhichKs==8
    if any(F):
        fH[F] = 1.0 # this shouldn't occur in the program for this case
    F = WhichKs==7
    if any(F):
        fH[F] = convert.fH_PTBO87(TempK[F], Sal[F])
    F = logical_and(WhichKs!=7, WhichKs!=8)
    if any(F):
        fH[F] = convert.fH_TWB82(TempK[F], Sal[F])

    # Calculate boric acid dissociation constant (KB)
    KB = full_like(TempC, nan)
    F = WhichKs==8 # Pure water case
    if any(F):
        KB[F] = 0.0
    F = logical_or(WhichKs==6, WhichKs==7)
    if any(F):
        KB[F] = eq.kBOH3_NBS_LTB69(TempK[F], Sal[F])
        KB[F] /= fH[F] # Convert NBS to SWS
    F = logical_and.reduce((WhichKs!=6, WhichKs!=7, WhichKs!=8))
    if any(F):
        KB[F] = eq.kBOH3_TOT_D90b(TempK[F], Sal[F])
        KB[F] /= SWStoTOT[F] # Convert TOT to SWS

    # Calculate water dissociation constant (KW)
    KW = full_like(TempC, nan)
    F = WhichKs==7
    if any(F):
        KW[F] = eq.kH2O_SWS_M79(TempK[F], Sal[F])
    F = WhichKs==8
    if any(F):
        KW[F] = eq.kH2O_SWS_HO58_M79(TempK[F], Sal[F])
    F = logical_and.reduce((WhichKs!=6, WhichKs!=7, WhichKs!=8))
    if any(F):
        KW[F] = eq.kH2O_SWS_M95(TempK[F], Sal[F])
    # KW is on the SWS pH scale in (mol/kg-SW)**2
    F = WhichKs==6
    if any(F):
        KW[F] = 0 # GEOSECS doesn't include OH effects

    # Calculate phosphate and silicate dissociation constants
    KP1 = full_like(TempC, nan)
    KP2 = full_like(TempC, nan)
    KP3 = full_like(TempC, nan)
    KSi = full_like(TempC, nan)
    F = WhichKs==7
    if any(F):
        KP1[F], KP2[F], KP3[F] = eq.kH3PO4_NBS_KP67(TempK[F], Sal[F])
        # KP1 is already on SWS!
        KP2[F] /= fH[F] # Convert NBS to SWS
        KP3[F] /= fH[F] # Convert NBS to SWS
        KSi[F] = eq.kSi_NBS_SMB64(TempK[F], Sal[F])
        KSi[F] /= fH[F] # Convert NBS to SWS
    F = logical_or(WhichKs==6, WhichKs==8)
    if any(F):
        # Neither the GEOSECS choice nor the freshwater choice
        # include contributions from phosphate or silicate.
        KP1[F] = 0.0
        KP2[F] = 0.0
        KP3[F] = 0.0
        KSi[F] = 0.0
    F = logical_and.reduce((WhichKs!=6, WhichKs!=7, WhichKs!=8))
    if any(F):
        KP1[F], KP2[F], KP3[F] = eq.kH3PO4_SWS_YM95(TempK[F], Sal[F])
        KSi[F] = eq.kSi_SWS_YM95(TempK[F], Sal[F])

    # Calculate carbonic acid dissociation constants (K1 and K2)
    K1 = full_like(TempC, nan)
    K2 = full_like(TempC, nan)
    F = WhichKs==1
    if any(F):
        K1[F], K2[F] = eq.kH2CO3_TOT_RRV93(TempK[F], Sal[F])
        K1[F] /= SWStoTOT[F] # Convert TOT to SWS
        K2[F] /= SWStoTOT[F] # Convert TOT to SWS
    F = WhichKs==2
    if any(F):
        K1[F], K2[F] = eq.kH2CO3_SWS_GP89(TempK[F], Sal[F])
    F = WhichKs==3
    if any(F):
        K1[F], K2[F] = eq.kH2CO3_SWS_H73_DM87(TempK[F], Sal[F])
    F = WhichKs==4
    if any(F):
        K1[F], K2[F] = eq.kH2CO3_SWS_MCHP73_DM87(TempK[F], Sal[F])
    F = WhichKs==5
    if any(F):
        K1[F], K2[F] = eq.kH2CO3_SWS_HM_DM87(TempK[F], Sal[F])
    F = logical_or(WhichKs==6, WhichKs==7)
    if any(F):
        K1[F], K2[F] = eq.kH2CO3_NBS_MCHP73(TempK[F], Sal[F])
        K1[F] /= fH[F] # Convert NBS to SWS
        K2[F] /= fH[F] # Convert NBS to SWS
    F = WhichKs==8
    if any(F):
        K1[F], K2[F] = eq.kH2CO3_SWS_M79(TempK[F], Sal[F])
    F = WhichKs==9
    if any(F):
        K1[F], K2[F] = eq.kH2CO3_NBS_CW98(TempK[F], Sal[F])
        K1[F] /= fH[F] # Convert NBS to SWS
        K2[F] /= fH[F] # Convert NBS to SWS
    F = WhichKs==10
    if any(F):
        K1[F], K2[F] = eq.kH2CO3_TOT_LDK00(TempK[F], Sal[F])
        K1[F] /= SWStoTOT[F] # Convert TOT to SWS
        K2[F] /= SWStoTOT[F] # Convert TOT to SWS
    F = WhichKs==11
    if any(F):
        K1[F], K2[F] = eq.kH2CO3_SWS_MM02(TempK[F], Sal[F])
    F = WhichKs==12
    if any(F):
        K1[F], K2[F] = eq.kH2CO3_SWS_MPL02(TempK[F], Sal[F])
    F = WhichKs==13
    if any(F):
        K1[F], K2[F] = eq.kH2CO3_SWS_MGH06(TempK[F], Sal[F])
    F = WhichKs==14
    if any(F):
        K1[F], K2[F] = eq.kH2CO3_SWS_M10(TempK[F], Sal[F])
    F = WhichKs==15
    if any(F):
        K1[F], K2[F] = eq.kH2CO3_SWS_WMW14(TempK[F], Sal[F])

    # From CO2SYS_v1_21.m: calculate KH2S and KNH3
    KH2S = full_like(TempC, nan)
    KNH3 = full_like(TempC, nan)
    F = logical_or.reduce((WhichKs==6, WhichKs==7, WhichKs==8))
    # Contributions from NH3 and H2S not included for these options.
    if any(F):
        KH2S[F] = 0.0
        KNH3[F] = 0.0
    F = logical_and.reduce((WhichKs!=6, WhichKs!=7, WhichKs!=8))
    if any(F):
        KH2S[F] = eq.kH2S_TOT_YM95(TempK[F], Sal[F])
        KNH3[F] = eq.kNH3_TOT_CW95(TempK[F], Sal[F])
        KH2S[F] /= SWStoTOT[F] # Convert TOT to SWS
        KNH3[F] /= SWStoTOT[F] # Convert TOT to SWS

#****************************************************************************
# Correct dissociation constants for pressure
# Currently: For WhichKs# = 1 to 7, all Ks (except KF and KS, which are on
#       the free scale) are on the SWS scale.
#       For WhichKs# = 6, KW set to 0, KP1, KP2, KP3, KSi don't matter.
#       For WhichKs# = 8, K1, K2, and KW are on the "pH" pH scale
#       (the pH scales are the same in this case); the other Ks don't
#       matter.
#
# No salinity dependence is given for the pressure coefficients here.
# It is assumed that the salinity is at or very near Sali = 35.
# These are valid for the SWS pH scale, but the difference between this and
# the total only yields a difference of .004 pH units at 1000 bars, much
# less than the uncertainties in the values.
#****************************************************************************
# The sources used are:
# Millero, 1995:
#       Millero, F. J., Thermodynamics of the carbon dioxide system in the
#       oceans, Geochemica et Cosmochemica Acta 59:661-677, 1995.
#       See table 9 and eqs. 90-92, p. 675.
#       TYPO: a factor of 10^3 was left out of the definition of Kappa
#       TYPO: the value of R given is incorrect with the wrong units
#       TYPO: the values of the a's for H2S and H2O are from the 1983
#                values for fresh water
#       TYPO: the value of a1 for B(OH)3 should be +.1622
#        Table 9 on p. 675 has no values for Si.
#       There are a variety of other typos in Table 9 on p. 675.
#       There are other typos in the paper, and most of the check values
#       given don't check.
# Millero, 1992:
#       Millero, Frank J., and Sohn, Mary L., Chemical Oceanography,
#       CRC Press, 1992. See chapter 6.
#       TYPO: this chapter has numerous typos (eqs. 36, 52, 56, 65, 72,
#               79, and 96 have typos).
# Millero, 1983:
#       Millero, Frank J., Influence of pressure on chemical processes in
#       the sea. Chapter 43 in Chemical Oceanography, eds. Riley, J. P. and
#       Chester, R., Academic Press, 1983.
#       TYPO: p. 51, eq. 94: the value -26.69 should be -25.59
#       TYPO: p. 51, eq. 95: the term .1700t should be .0800t
#       these two are necessary to match the values given in Table 43.24
# Millero, 1979:
#       Millero, F. J., The thermodynamics of the carbon dioxide system
#       in seawater, Geochemica et Cosmochemica Acta 43:1651-1661, 1979.
#       See table 5 and eqs. 7, 7a, 7b on pp. 1656-1657.
# Takahashi et al, in GEOSECS Pacific Expedition, v. 3, 1982.
#       TYPO: the pressure dependence of K2 should have a 16.4, not 26.4
#       This matches the GEOSECS results and is in Edmond and Gieskes.
# Culberson, C. H. and Pytkowicz, R. M., Effect of pressure on carbonic acid,
#       boric acid, and the pH of seawater, Limnology and Oceanography
#       13:403-417, 1968.
# Edmond, John M. and Gieskes, J. M. T. M., The calculation of the degree of
#       seawater with respect to calcium carbonate under in situ conditions,
#       Geochemica et Cosmochemica Acta, 34:1261-1291, 1970.
#****************************************************************************
# These references often disagree and give different fits for the same thing.
# They are not always just an update either; that is, Millero, 1995 may agree
#       with Millero, 1979, but differ from Millero, 1983.
# For WhichKs# = 7 (Peng choice) I used the same factors for KW, KP1, KP2,
#       KP3, and KSi as for the other cases. Peng et al didn't consider the
#       case of P different from 0. GEOSECS did consider pressure, but didn't
#       include Phos, Si, or OH, so including the factors here won't matter.
# For WhichKs# = 8 (freshwater) the values are from Millero, 1983 (for K1, K2,
#       and KW). The other aren't used (TB = TS = TF = TP = TSi = 0.), so
#       including the factors won't matter.
#****************************************************************************
#       deltaVs are in cm3/mole
#       Kappas are in cm3/mole/bar
#****************************************************************************

    # Correct K1, K2 and KB for pressure:
    deltaV = full_like(TempC, nan)
    Kappa = full_like(TempC, nan)
    lnK1fac = full_like(TempC, nan)
    lnK2fac = full_like(TempC, nan)
    lnKBfac = full_like(TempC, nan)
    F = WhichKs==8
    if any(F):
        # Pressure effects on K1 in freshwater: this is from Millero, 1983.
        deltaV[F]  = -30.54 + 0.1849 *TempC[F] - 0.0023366*TempC[F]**2
        Kappa[F]   = (-6.22 + 0.1368 *TempC[F] - 0.001233 *TempC[F]**2)/1000
        lnK1fac[F] = (-deltaV[F] + 0.5*Kappa[F]*Pbar[F])*Pbar[F]/RT[F]
        # Pressure effects on K2 in freshwater: this is from Millero, 1983.
        deltaV[F]  = -29.81 + 0.115*TempC[F] - 0.001816*TempC[F]**2
        Kappa[F]   = (-5.74 + 0.093*TempC[F] - 0.001896*TempC[F]**2)/1000
        lnK2fac[F] = (-deltaV[F] + 0.5*Kappa[F]*Pbar[F])*Pbar[F]/RT[F]
        lnKBfac[F] = 0 #; this doesn't matter since TB = 0 for this case
    F = logical_or(WhichKs==6, WhichKs==7)
    if any(F):
        # GEOSECS Pressure Effects On K1, K2, KB (on the NBS scale)
        # Takahashi et al, GEOSECS Pacific Expedition v. 3, 1982 quotes
        # Culberson and Pytkowicz, L and O 13:403-417, 1968:
        # but the fits are the same as those in
        # Edmond and Gieskes, GCA, 34:1261-1291, 1970
        # who in turn quote Li, personal communication
        lnK1fac[F] = (24.2 - 0.085*TempC[F])*Pbar[F]/RT[F]
        lnK2fac[F] = (16.4 - 0.04 *TempC[F])*Pbar[F]/RT[F]
        #               Takahashi et al had 26.4, but 16.4 is from Edmond and Gieskes
        #               and matches the GEOSECS results
        lnKBfac[F] = (27.5 - 0.095*TempC[F])*Pbar[F]/RT[F]
    F=logical_and.reduce((WhichKs!=6, WhichKs!=7, WhichKs!=8))
    if any(F):
        #***PressureEffectsOnK1:
        #               These are from Millero, 1995.
        #               They are the same as Millero, 1979 and Millero, 1992.
        #               They are from data of Culberson and Pytkowicz, 1968.
        deltaV[F]  = -25.5 + 0.1271*TempC[F]
        #                 'deltaV = deltaV - .151*(Sali - 34.8); # Millero, 1979
        Kappa[F]   = (-3.08 + 0.0877*TempC[F])/1000
        #                 'Kappa = Kappa  - .578*(Sali - 34.8)/1000.; # Millero, 1979
        lnK1fac[F] = (-deltaV[F] + 0.5*Kappa[F]*Pbar[F])*Pbar[F]/RT[F]
        #               The fits given in Millero, 1983 are somewhat different.

        #***PressureEffectsOnK2:
        #               These are from Millero, 1995.
        #               They are the same as Millero, 1979 and Millero, 1992.
        #               They are from data of Culberson and Pytkowicz, 1968.
        deltaV[F]  = -15.82 - 0.0219*TempC[F]
        #                  'deltaV = deltaV + .321*(Sali - 34.8); # Millero, 1979
        Kappa[F]   = (1.13 - 0.1475*TempC[F])/1000
        #                 'Kappa = Kappa - .314*(Sali - 34.8)/1000: # Millero, 1979
        lnK2fac[F] = (-deltaV[F] + 0.5*Kappa[F]*Pbar[F])*Pbar[F]/RT[F]
        #               The fit given in Millero, 1983 is different.
        #               Not by a lot for deltaV, but by much for Kappa. #

        #***PressureEffectsOnKB:
        #               This is from Millero, 1979.
        #               It is from data of Culberson and Pytkowicz, 1968.
        deltaV[F]  = -29.48 + 0.1622*TempC[F] - 0.002608*TempC[F]**2
        #               Millero, 1983 has:
        #                 'deltaV = -28.56 + .1211*TempCi - .000321*TempCi*TempCi
        #               Millero, 1992 has:
        #                 'deltaV = -29.48 + .1622*TempCi + .295*(Sali - 34.8)
        #               Millero, 1995 has:
        #                 'deltaV = -29.48 - .1622*TempCi - .002608*TempCi*TempCi
        #                 'deltaV = deltaV + .295*(Sali - 34.8); # Millero, 1979
        Kappa[F]   = -2.84/1000 # Millero, 1979
        #               Millero, 1992 and Millero, 1995 also have this.
        #                 'Kappa = Kappa + .354*(Sali - 34.8)/1000: # Millero,1979
        #               Millero, 1983 has:
        #                 'Kappa = (-3 + .0427*TempCi)/1000
        lnKBfac[F] = (-deltaV[F] + 0.5*Kappa[F]*Pbar[F])*Pbar[F]/RT[F]

    # CorrectKWForPressure:
    lnKWfac = full_like(TempC, nan)
    F=(WhichKs==8)
    if any(F):
        # PressureEffectsOnKWinFreshWater:
        #               This is from Millero, 1983.
        deltaV[F]  =  -25.6 + 0.2324*TempC[F] - 0.0036246*TempC[F]**2
        Kappa[F]   = (-7.33 + 0.1368*TempC[F] - 0.001233 *TempC[F]**2)/1000
        lnKWfac[F] = (-deltaV[F] + 0.5*Kappa[F]*Pbar[F])*Pbar[F]/RT[F]

        #               NOTE the temperature dependence of KappaK1 and KappaKW
        #               for fresh water in Millero, 1983 are the same.
    F=(WhichKs!=8)
    if any(F):
        # GEOSECS doesn't include OH term, so this won't matter.
        # Peng et al didn't include pressure, but here I assume that the KW correction
        #       is the same as for the other seawater cases.
        # PressureEffectsOnKW:
        #               This is from Millero, 1983 and his programs CO2ROY(T).BAS.
        deltaV[F]  = -20.02 + 0.1119*TempC[F] - 0.001409*TempC[F]**2
        #               Millero, 1992 and Millero, 1995 have:
        Kappa[F]   = (-5.13 + 0.0794*TempC[F])/1000 # Millero, 1983
        #               Millero, 1995 has this too, but Millero, 1992 is different.
        lnKWfac[F] = (-deltaV[F] + 0.5*Kappa[F]*Pbar[F])*Pbar[F]/RT[F]
        #               Millero, 1979 does not list values for these.

    # PressureEffectsOnKF:
    #       This is from Millero, 1995, which is the same as Millero, 1983.
    #       It is assumed that KF is on the free pH scale.
    deltaV = -9.78 - 0.009*TempC - 0.000942*TempC**2
    Kappa = (-3.91 + 0.054*TempC)/1000
    lnKFfac = (-deltaV + 0.5*Kappa*Pbar)*Pbar/RT
    # PressureEffectsOnKS:
    #       This is from Millero, 1995, which is the same as Millero, 1983.
    #       It is assumed that KS is on the free pH scale.
    deltaV = -18.03 + 0.0466*TempC + 0.000316*TempC**2
    Kappa = (-4.53 + 0.09*TempC)/1000
    lnKSfac = (-deltaV + 0.5*Kappa*Pbar)*Pbar/RT

    # CorrectKP1KP2KP3KSiForPressure:
    # These corrections don't matter for the GEOSECS choice (WhichKs# = 6) and
    #       the freshwater choice (WhichKs# = 8). For the Peng choice I assume
    #       that they are the same as for the other choices (WhichKs# = 1 to 5).
    # The corrections for KP1, KP2, and KP3 are from Millero, 1995, which are the
    #       same as Millero, 1983.
    # PressureEffectsOnKP1:
    deltaV = -14.51 + 0.1211*TempC - 0.000321*TempC**2
    Kappa  = (-2.67 + 0.0427*TempC)/1000
    lnKP1fac = (-deltaV + 0.5*Kappa*Pbar)*Pbar/RT
    # PressureEffectsOnKP2:
    deltaV = -23.12 + 0.1758*TempC - 0.002647*TempC**2
    Kappa  = (-5.15 + 0.09  *TempC)/1000
    lnKP2fac = (-deltaV + 0.5*Kappa*Pbar)*Pbar/RT
    # PressureEffectsOnKP3:
    deltaV = -26.57 + 0.202 *TempC - 0.003042*TempC**2
    Kappa  = (-4.08 + 0.0714*TempC)/1000
    lnKP3fac = (-deltaV + 0.5*Kappa*Pbar)*Pbar/RT
    # PressureEffectsOnKSi:
    #  The only mention of this is Millero, 1995 where it is stated that the
    #    values have been estimated from the values of boric acid. HOWEVER,
    #    there is no listing of the values in the table.
    #    I used the values for boric acid from above.
    deltaV = -29.48 + 0.1622*TempC - 0.002608*TempC**2
    Kappa  = -2.84/1000
    lnKSifac = (-deltaV + 0.5*Kappa*Pbar)*Pbar/RT

    # CorrectKNH3KH2SForPressure:
    # The corrections are from Millero, 1995, which are the
    #       same as Millero, 1983.
    # PressureEffectsOnKNH3:
    deltaV = -26.43 + 0.0889*TempC - 0.000905*TempC**2
    Kappa = (-5.03 + 0.0814*TempC)/1000
    lnKNH3fac = (-deltaV + 0.5*Kappa*Pbar)*Pbar/RT
    # PressureEffectsOnKH2S:
    # Millero 1995 gives values for deltaV in fresh water instead of SW.
    # Millero 1995 gives -b0 as -2.89 instead of 2.89
    # Millero 1983 is correct for both
    deltaV = -11.07 - 0.009*TempC - 0.000942*TempC**2
    Kappa = (-2.89 + 0.054*TempC)/1000
    lnKH2Sfac = (-deltaV + 0.5*Kappa*Pbar)*Pbar/RT

    # CorrectKsForPressureHere:
    K1 *= exp(lnK1fac)
    K2 *= exp(lnK2fac)
    KW *= exp(lnKWfac)
    KB *= exp(lnKBfac)
    KF *= exp(lnKFfac)
    KS *= exp(lnKSfac)
    KP1 *= exp(lnKP1fac)
    KP2 *= exp(lnKP2fac)
    KP3 *= exp(lnKP3fac)
    KSi *= exp(lnKSifac)
    KNH3 *= exp(lnKNH3fac)
    KH2S *= exp(lnKH2Sfac)

    # CorrectpHScaleConversionsForPressure:
    # fH has been assumed to be independent of pressure.
    SWStoTOT = convert.sws2tot(TS, KS, TF, KF)
    FREEtoTOT = convert.free2tot(TS, KS)

    #  The values KS and KF are already pressure-corrected, so the pH scale
    #  conversions are now valid at pressure.

    # Find pH scale conversion factor: this is the scale they will be put on
    pHfactor = full_like(TempC, nan)
    F = pHScale==1 # Total
    pHfactor[F] = SWStoTOT[F]
    F = pHScale==2 # SWS, they are all on this now
    pHfactor[F] = 1.0
    F = pHScale==3 # pHfree
    pHfactor[F] = SWStoTOT[F]/FREEtoTOT[F]
    F = pHScale==4 # pHNBS
    pHfactor[F] = fH[F]

    # Convert from SWS pH scale to chosen scale
    K1 *= pHfactor
    K2 *= pHfactor
    KW *= pHfactor
    KB *= pHfactor
    KP1 *= pHfactor
    KP2 *= pHfactor
    KP3 *= pHfactor
    KSi *= pHfactor
    KNH3 *= pHfactor
    KH2S *= pHfactor

    return K0, K1, K2, KW, KB, KF, KS, KP1, KP2, KP3, KSi, KNH3, KH2S, fH
Ejemplo n.º 16
0
def intersect(a, b, axis=2):
    from numpy import logical_and, logical_or
    return unique(a[logical_or.reduce(logical_and.reduce(a == b[:,None], axis=axis))])
def CFC_rew_pun_permutation(subj,block,phase_elec,amp_elec,surrogate_analysis):
    import scipy.io as sio
    from numpy import arange,array,append,zeros,pi,angle,logical_and,mean,roll,save,where,empty,delete,in1d,random,rint #for efficiency
    from eegfilt import eegfilt
    from scipy.signal import hilbert
    from math import log
    import pickle
    import os.path
    from random import randint

    #data_path = '/home/jcase/data/' + subj + block + '/' + subj + '_' + block + '_data.mat'
    #subglo_path = '/home/jcase/data/subj_globals.mat'
    #MI_output_path = '/home/jcase/data/' + subj + block + '/MI/e' + str(phase_elec) + '_e' + str(amp_elec)

    data_path = '/Users/johncase/Documents/UCSF Data/' + subj + '/' + subj + block + '/data/' + subj + '_' + block + '_data.mat'
    subglo_path = '/Users/johncase/Documents/UCSF Data/subj_globals.mat'
    MI_output_path = '/Users/johncase/Documents/UCSF Data/' + subj + '/' + subj + block + '/analysis/MI/rew_pun/e' + str(phase_elec) + '_e' + str(amp_elec)

    #Load ECOG Data
    ecog_data = sio.loadmat(data_path)['ecogData']
    amp_raw_data = ecog_data[amp_elec-1,:]
    pha_raw_data = ecog_data[phase_elec-1,:]
    del ecog_data

    #Load subject globals
    all_subj_data = sio.loadmat(subglo_path,struct_as_record=False, squeeze_me=True)['subj_globals']
    subj_data = getattr(getattr(all_subj_data,subj),block)
    srate = subj_data.srate
    per_chan_bad_epochs = subj_data.per_chan_bad_epochs
    allstimtimes = subj_data.allstimtimes
    stimID = subj_data.stimID

    #Trial windows
    bl = 0
    ps = 1 #in seconds

    #Surrogate Runs
    kruns = 200

    #Phase-providing frequency
    #fp = arange(1,15.1,0.1)
    #fp_bandwidth = arange(0.5,5.1,0.1)

    #fp = arange(1,21,1)
    #fp_bandwidth = arange(1,11,1)

    fp = arange(2,3)
    fp_bandwidth = arange(2,3)

    #Amplitude-providing frequency
    fa = array([70,150])

    #Define phase bins
    n_bins = 20
    bin_size = 2*pi/n_bins
    bins = arange(-pi,pi-bin_size,bin_size)

    #Define time_window (roughly entire block, exclude artifacts samples later)
    #t_0 = int(round(allstimtimes[0,0]*srate))
    #t_end = int(round((allstimtimes[-1,1]+3) *srate))
    #t_win = arange(t_0,t_end)

    MI_stim = empty((len(fp),len(fp_bandwidth),3))
    MI_diff = empty((len(fp),len(fp_bandwidth)))
    if surrogate_analysis == 1:
        MI_diff_surrogate = empty((len(fp),len(fp_bandwidth),kruns))



    #Determine samples with artifacts
    bad_samp = array([])
    if per_chan_bad_epochs[phase_elec-1].size == 2:
        bad_samp = append(bad_samp,arange(srate*per_chan_bad_epochs[phase_elec-1][0],srate*per_chan_bad_epochs[phase_elec-1][1]))
    else:
        for epoch in per_chan_bad_epochs[phase_elec-1]:
            bad_samp = append(bad_samp,arange(srate*epoch[0],srate*epoch[1]))

    if not phase_elec == amp_elec:
        if per_chan_bad_epochs[amp_elec-1].size == 2:
            bad_samp = append(bad_samp,arange(srate*per_chan_bad_epochs[amp_elec-1][0],srate*per_chan_bad_epochs[amp_elec-1][1]))
        else:
            for epoch in per_chan_bad_epochs[amp_elec-1]:
                bad_samp = append(bad_samp,arange(srate*epoch[0],srate*epoch[1]))



    #Do high-gamma filtering
    pow,filtwt = eegfilt(amp_raw_data,srate,fa[0],[])
    pow,filtwt = eegfilt(pow[0],srate,[],fa[1])
    pow = abs(hilbert(pow[0][0:len(pow[0])-1]))
   # pow = pow[good_samps] #exclude bad` samples

    #Calculate MI for each phase-providing central-frequencies / bandwidths

    for iFreq,freq in enumerate(fp):
        for iBand,band in enumerate(fp_bandwidth):

            if freq-(band/2) < 0.5:
                MI_diff[iFreq,iBand] = 0
                continue

            print('freq = ' + str(freq) + ', bw = ' + str(band))

            #Do phase-providing phase filtering

            pha = zeros([1,len(amp_raw_data)])
            pha = eegfilt(pha_raw_data,srate,[],freq+(band/2))[0][0]
            pha = eegfilt(pha,srate,freq-(band/2),[])[0][0]
            pha = angle(hilbert(pha[0:len(pha)-1]))



            for iStim,iStimID in enumerate(range(1,4)):

                trl_onsets = rint(allstimtimes[where(stimID==iStimID)[0],0]*srate).astype(int)

                trl_samps = array([]).astype(int)
                for trl in trl_onsets:
                    trl_samps = append(trl_samps,arange(int(trl+bl*srate),int(trl+ps*srate)))

                #exclude bad samples
                trl_samps = trl_samps[~in1d(trl_samps,bad_samp)]

                #keep phase/pow info only within iStim trl windows
                pha_istim = pha[trl_samps]
                pow_istim = pow[trl_samps]

                #Calculate mean amplitude within each phase bin to yield a
                #distribution of amplitude(phase)
                bin_dist = zeros([len(bins)])
                for iBin in range(len(bins)):
                    ind = logical_and(pha_istim>=bins[iBin],pha_istim<bins[iBin]+bin_size)
                    bin_dist[iBin] = mean(pow_istim[ind])

                #Normalize distribution to yield pseudo "probability density function" (PDF)
                bin_dist = bin_dist / sum(bin_dist)

                #Calculate Shannon entropy of PDF
                h_p = 0
                for iBin,mybin in enumerate(bin_dist):
                    h_p = h_p - mybin * log(mybin)

                #MI = (Kullback-Leibler distance between h_p and uniform
                #distribution) / (Entropy of uniform distribution) (see
                #http://jn.physiology.org/content/104/2/1195)
                MI_stim[iFreq,iBand,iStim] = (log(len(bins)) - h_p) / log(len(bins))

            # difference statistic = abs(A-B) + abs(A-C) + abs(B-C)
            MI_diff[iFreq,iBand] = abs(MI_stim[iFreq,iBand,0]-MI_stim[iFreq,iBand,1]) + abs(MI_stim[iFreq,iBand,0]-MI_stim[iFreq,iBand,2]) + abs(MI_stim[iFreq,iBand,1]-MI_stim[iFreq,iBand,2])

            if surrogate_analysis == 1:

                stim_n = zeros(3)
                stim_n[0] = len(where(stimID==1)[0])
                stim_n[1] = len(where(stimID==2)[0])
                stim_n[2] = len(where(stimID==3)[0])
                shuffle_ind = arange(sum(stim_n)).astype(int)


                for iRun in range(kruns):

                    if iRun%10 == 0:
                        print '{}\r'.format('Run ' + str(iRun+10)),

                    MI_stim_surrogate = zeros(3)

                    random.shuffle(shuffle_ind)

                    cnt = 0
                    for iStim,iStimID in enumerate(range(1,4)):

                        phase_trl_onsets = rint(allstimtimes[where(stimID==iStimID)[0],0]*srate).astype(int)
                        amp_trl_onsets =  rint(allstimtimes[shuffle_ind[cnt:cnt+len(phase_trl_onsets)],0]*srate).astype(int)
                        cnt = cnt + len(phase_trl_onsets)


                        bin_dist = zeros([len(bins)])
                        for iBin in range(len(bins)):

                            bin_power_list = array([])

                            for iTrial,trl in enumerate(phase_trl_onsets):

                                #find sample of onset to 1s + onset
                                phase_trl = arange(trl,trl+ps*srate).astype(int)

                                #find samples within phase bin (indices relative to stimulus onset (i.e. 0-400))
                                ind = where(logical_and.reduce((pha[phase_trl]>=bins[iBin],pha[phase_trl]<bins[iBin]+bin_size,~in1d(phase_trl,bad_samp))))[0]

                                #find amp samples with the same post-stimulus latency as "inds"
                                amp_samples = amp_trl_onsets[iTrial]+ind
                                amp_samples = amp_samples[~in1d(amp_trl_onsets[iTrial]+ind,bad_samp)]

                                #grow list of power during phase bin (power from random trial but with same post-stim indices)
                                bin_power_list = append(bin_power_list,pow[amp_samples])

                            bin_dist[iBin] = mean(bin_power_list)

                        #Normalize distribution to yield pseudo "probability density function" (PDF)
                        bin_dist = bin_dist / sum(bin_dist)

                        #Calculate Shannon entropy of PDF
                        h_p = 0
                        for iBin,mybin in enumerate(bin_dist):
                            h_p = h_p - mybin * log(mybin)

                        MI_stim_surrogate[iStim] = (log(len(bins)) - h_p) / log(len(bins))
                    MI_diff_surrogate[iFreq,iBand,iRun] = abs(MI_stim_surrogate[0]-MI_stim_surrogate[1]) + abs(MI_stim_surrogate[0]-MI_stim_surrogate[2]) + abs(MI_stim_surrogate[1]-MI_stim_surrogate[2])



    save(open(MI_output_path+'_diff','wb'),MI_diff)
    save(open(MI_output_path+'_stim','wb'),MI_stim)
    if surrogate_analysis == 1:
        save(open(MI_output_path+'_diff_surrogate','wb'),MI_diff_surrogate)