Example #1
0
 def membership(self):
     x = self.__training_set
     c = self.__centers
     M, _ = x.shape
     C, _ = c.shape
     r = zeros((M, C))
     m1 = 1. / (self.fuzzyness_coefficient - 1.)
     for k in range(M):
         den = numpy_sum((x[k] - c)**2., axis=1)
         frac = outer(den, 1. / den)**m1
         r[k, :] = 1. / numpy_sum(frac, axis=1)
     self.__membership_degrees = r
     return self.__membership_degrees
Example #2
0
    def compute_mass_grid(
        valuefield,
        areafield,
        dofrac=False,
        fracfield=None,
        uninitval=422397696.0,
    ):
        """Compute the mass of a data field.

        :Parameters:


            valuefield: ESMF.Field
                This contains data values of a field built on the cells of
                a grid.

            areafield: ESMF.Field
                This contains the areas associated with the grid cells.

            fracfield: ESMF.Field
                This contains the fractions of each cell which contributed
                to a regridding operation involving 'valuefield.

            dofrac: `bool`
                This gives the option to not use the 'fracfield'.

            uninitval: `float`
                The value uninitialised cells take.

        :Returns:

            `float`
                The mass of the data field is computed.

        """
        mass = 0.0
        areafield.get_area()

        ind = numpy_where(valuefield.data != uninitval)

        if dofrac:
            mass = numpy_sum(
                areafield.data[ind]
                * valuefield.data[ind]
                * fracfield.data[ind]
            )
        else:
            mass = numpy_sum(areafield.data[ind] * valuefield.data[ind])

        return mass
Example #3
0
def sample_size_f(a, axis=None, masked=False):
    """Return the sample size.

    :Parameters:

        axis: `int`, optional
            Axis along which to operate. By default, flattened input
            is used.

    :Returns:

        `numpy.ndarray`

    """
    if masked:
        N = numpy_sum(~a.mask, axis=axis, dtype=float)
        if not numpy_ndim(N):
            N = numpy_asanyarray(N)
    else:
        if axis is None:
            N = numpy_array(a.size, dtype=float)
        else:
            shape = a.shape
            N = numpy_empty(shape[:axis] + shape[axis + 1 :], dtype=float)
            N[...] = shape[axis]
    # --- End: if

    return asanyarray(N)
Example #4
0
def summarize_pcoas(master_pcoa, support_pcoas, method='IQR', apply_procrustes=True):
    """returns the average PCoA vector values for the support pcoas

    Also returns the ranges as calculated with the specified method.
    The choices are:
        IQR: the Interquartile Range
        ideal fourths: Ideal fourths method as implemented in scipy
    """
    if apply_procrustes:
        # perform procrustes before averaging
        support_pcoas = [list(sp) for sp in support_pcoas]
        master_pcoa = list(master_pcoa)
        for i, pcoa in enumerate(support_pcoas):
            master_std, pcoa_std, m_squared = procrustes(master_pcoa[1],pcoa[1])
            support_pcoas[i][1] = pcoa_std
        master_pcoa[1] = master_std

    m_matrix = master_pcoa[1]
    m_eigvals = master_pcoa[2]
    m_names = master_pcoa[0]
    jn_flipped_matrices = []
    all_eigvals = []
    for rep in support_pcoas:
        matrix = rep[1]
        eigvals = rep[2]
        all_eigvals.append(eigvals)
        jn_flipped_matrices.append(_flip_vectors(matrix, m_matrix))
    matrix_average, matrix_low, matrix_high = _compute_jn_pcoa_avg_ranges(\
            jn_flipped_matrices, method)
    #compute average eigvals
    all_eigvals_stack = vstack(all_eigvals)
    eigval_sum = numpy_sum(all_eigvals_stack, axis=0)
    eigval_average = eigval_sum / float(len(all_eigvals))
    return matrix_average, matrix_low, matrix_high, eigval_average, m_names
Example #5
0
def summarize_pcoas(master_pcoa, support_pcoas, method='IQR', apply_procrustes=True):
    """returns the average PCoA vector values for the support pcoas

    Also returns the ranges as calculated with the specified method.
    The choices are:
        IQR: the Interquartile Range
        ideal fourths: Ideal fourths method as implemented in scipy
    """
    if apply_procrustes:
        # perform procrustes before averaging
        support_pcoas = [list(sp) for sp in support_pcoas]
        master_pcoa = list(master_pcoa)
        for i, pcoa in enumerate(support_pcoas):
            master_std, pcoa_std, m_squared = procrustes(master_pcoa[1],pcoa[1])
            support_pcoas[i][1] = pcoa_std
        master_pcoa[1] = master_std

    m_matrix = master_pcoa[1]
    m_eigvals = master_pcoa[2]
    m_names = master_pcoa[0]
    jn_flipped_matrices = []
    all_eigvals = []
    for rep in support_pcoas:
        matrix = rep[1]
        eigvals = rep[2]
        all_eigvals.append(eigvals)
        jn_flipped_matrices.append(_flip_vectors(matrix, m_matrix))
    matrix_average, matrix_low, matrix_high = _compute_jn_pcoa_avg_ranges(\
            jn_flipped_matrices, method)
    #compute average eigvals
    all_eigvals_stack = vstack(all_eigvals)
    eigval_sum = numpy_sum(all_eigvals_stack, axis=0)
    eigval_average = eigval_sum / float(len(all_eigvals))
    return matrix_average, matrix_low, matrix_high, eigval_average, m_names
Example #6
0
def downsample_presence_array(presence_array, resolution):
    """
    Downsamples a 1D presence array given the specified resolution.
    Sums the presence across bins given the resolution size, so
    [0, 1, 1, 1, 0, 0] for resolution == 3 becomes
    [2, 1]
    """
    return [numpy_sum(presence_array[index:index+resolution])
            for index in range(0, len(presence_array), resolution)]
Example #7
0
def fill_virtual_evidence_cells(prior_input_array, num_labels):
    """
    For genomic positions which have at least one, but not all priors specified,
    this function will apply a uniform prior to all remaining labels.
    Indexes where no prior was ever specified will remain zero for downsampling

    Example:
    INPUT:
    [[0.5, 0.2, None, None, None],
     [None, 0.4, None, None, None],
     [None, None, None, None, None]]

    OUTPUT: 
    [[0.5, 0.2, 0.10, 0.10, 0.10],
     [0.15, 0.4, 0.15, 0.15, 0.15],
     [0.0 ... 0.0]]
    """

    prior_array = zeros((len(prior_input_array), num_labels))
    for index, prior_input in enumerate(prior_input_array):
        # Only priors which were specified in the input file will be set
        # Unset labels will still be none
        num_prior_labels = numpy_sum(prior_input != None)
        if num_prior_labels:
            prior_list_values = list(filter(None, prior_input))

            # Check if priors should be treated as ratios or percentages
            if numpy_sum(prior_list_values) < 1:
                remaining_probability = 1 - numpy_sum(prior_list_values)
            else:
                remaining_probability = 0

            # divide remaining probability uniformly amongst the remaining labels
            prior_input[prior_input == None] = (
                remaining_probability / (num_labels - num_prior_labels))

            prior_array[index] = prior_input

    return prior_array
Example #8
0
def normalise(raw_counts):
    """Normalise raw counts into relative abundance.

    Args:
        raw_counts (class `numpy.ndarray`): Array with raw count.

    Returns:
        class `numpy.ndarray`: Normalised data.

    """
    sum_values = numpy_sum(raw_counts)

    if not sum_values:
        raise RuntimeWarning('All values in input are 0.')
    return raw_counts / sum_values
Example #9
0
def aggregate_level(results, position):
    """Aggregate abundance of metagenomes by taxonomic level.

    Args:
        results (dict): Path to results.
        position (int): Position of level in the results.

    Returns:
        dict: Aggregated result for targeted taxonomy level.

    """
    level_results = defaultdict(list)

    for all_taxa in results:
        taxa = all_taxa.split("\t")[position]
        abundance = results[all_taxa]
        level_results[taxa].append(abundance)

    return {temp_taxa: numpy_sum(level_results[temp_taxa], axis=0) for temp_taxa in level_results}
Example #10
0
def _compute_jn_pcoa_avg_ranges(jn_flipped_matrices, method):
    """Computes PCoA average and ranges for jackknife plotting

    returns 1) an array of jn_averages
             2) an array of upper values of the ranges
            3) an array of lower values for the ranges

    method: the method by which to calculate the range
        IQR: Interquartile Range
        ideal fourths: Ideal fourths method as implemented in scipy
    """
    x,y = shape(jn_flipped_matrices[0])
    all_flat_matrices = [matrix.ravel() for matrix in jn_flipped_matrices]
    summary_matrix = vstack(all_flat_matrices)
    matrix_sum = numpy_sum(summary_matrix, axis=0)
    matrix_average = matrix_sum / float(len(jn_flipped_matrices))
    matrix_average = matrix_average.reshape(x,y)
    if method == 'IQR':
        result = matrix_IQR(summary_matrix)
        matrix_low = result[0].reshape(x,y)
        matrix_high = result[1].reshape(x,y)
    elif method == 'ideal_fourths':
        result = idealfourths(summary_matrix, axis=0)
        matrix_low = result[0].reshape(x,y)
        matrix_high = result[1].reshape(x,y)
    elif method == "sdev":
        # calculate std error for each sample in each dimension
        sdevs = zeros(shape=[x,y])
        for j in xrange(y):
            for i in xrange(x):
                vals = array([pcoa[i][j] for pcoa in jn_flipped_matrices])
                sdevs[i,j] = vals.std(ddof=1)
        matrix_low = -sdevs/2
        matrix_high = sdevs/2


    return matrix_average, matrix_low, matrix_high
Example #11
0
def _compute_jn_pcoa_avg_ranges(jn_flipped_matrices, method):
    """Computes PCoA average and ranges for jackknife plotting

    returns 1) an array of jn_averages
             2) an array of upper values of the ranges
            3) an array of lower values for the ranges

    method: the method by which to calculate the range
        IQR: Interquartile Range
        ideal fourths: Ideal fourths method as implemented in scipy
    """
    x, y = shape(jn_flipped_matrices[0])
    all_flat_matrices = [matrix.ravel() for matrix in jn_flipped_matrices]
    summary_matrix = vstack(all_flat_matrices)
    matrix_sum = numpy_sum(summary_matrix, axis=0)
    matrix_average = matrix_sum / float(len(jn_flipped_matrices))
    matrix_average = matrix_average.reshape(x, y)
    if method == 'IQR':
        result = matrix_IQR(summary_matrix)
        matrix_low = result[0].reshape(x, y)
        matrix_high = result[1].reshape(x, y)
    elif method == 'ideal_fourths':
        result = idealfourths(summary_matrix, axis=0)
        matrix_low = result[0].reshape(x, y)
        matrix_high = result[1].reshape(x, y)
    elif method == "sdev":
        # calculate std error for each sample in each dimension
        sdevs = zeros(shape=[x, y])
        for j in xrange(y):
            for i in xrange(x):
                vals = array([pcoa[i][j] for pcoa in jn_flipped_matrices])
                sdevs[i, j] = vals.std(ddof=1)
        matrix_low = -sdevs / 2
        matrix_high = sdevs / 2

    return matrix_average, matrix_low, matrix_high
Example #12
0
def sample_size_f(a, axis=None, masked=False):
    '''TODO

    :Parameters:

        axis: `int`, optional
            non-negative


    '''
    if masked:
        N = numpy_sum(~a.mask, axis=axis, dtype=float)
        if not numpy_ndim(N):
            N = numpy_asanyarray(N)
    else:
        if axis is None:
            N = numpy_array(a.size, dtype=float)
        else:
            shape = a.shape
            N = numpy_empty(shape[:axis] + shape[axis + 1:], dtype=float)
            N[...] = shape[axis]
    # --- End: if

    return asanyarray(N)
Example #13
0
def sum_squared(predictions, output_data):
    return (1 / 2) * numpy_sum((output_data - predictions) ** 2)
Example #14
0
def calc_stats(des_colors,curr_colors,curr_entered,req,X,n_to_try):

    # Create modified desired/current list, so its in format [1,1,2,2,3,3]
    desired = []
    for i in range(0,3):
        for j in range(0,des_colors[i]):
            desired.append(i+1)

    current = []
    for i in range(0,3):
        for j in range(0,curr_colors[i]):
            current.append(i+1)        

    # Color weights
    p = [r+X for r in req]
    sump = float(numpy_sum(p))
    Pr = p[0] / sump
    Pg = p[1] / sump
    Pb = p[2] / sump

    N = sum(des_colors)
    Nfact = factorial(N)

    combs_iter = combinations_with_replacement([r+1 for r in range(3)],N)

    # Bring this into a usable form
    combs = []
    for i in combs_iter:
        combs.append(i)

    Ncombs = len(combs)
    Pcomb = []
    Pperm = []

    # Calculate probability of each combination
    for i in combs:
        Nr = i.count(1)
        Ng = i.count(2)
        Nb = i.count(3)
        Pperm.append(Pr**Nr * Pg**Ng * Pb**Nb)
        Pcomb.append(Nfact/(factorial(Nr)*factorial(Ng)*factorial(Nb)) * Pr**Nr * Pg**Ng * Pb**Nb)
    
    # Create Transition Matrix
    T = zeros((Ncombs,Ncombs))
    count = 0
    ind = 100 # Set high so that if we find current first, it'll be lower than ind
    for i in combs:
        T[count,:] = Pcomb[:]
        # Self transition prob is lowered, can't return same permutation
        T[count,count] = (Pcomb[count]-Pperm[count])/(1-Pperm[count])
        total = numpy_sum(T[count,:])
        T[count,:] = divide(T[count,:],total)

        # If we reach the desired state we're done
        if list(i) == desired:
            T[count,:] = 0
            T[count,count] = 1
            ind = count
        if list(i) == current:
            curr_ind = count
            if curr_ind > ind:
                curr_ind-=1 # This adjusts the index to fit smaller matrix t
        
        count += 1

    # Create Fundamental Matrix & Identity Matrix
    # Notation from  http://en.wikipedia.org/wiki/Absorbing_Markov_chain
    Q = zeros((Ncombs-1,Ncombs-1))
    I = zeros((Ncombs-1,Ncombs-1))

    for i in range(len(T)):
        if i < ind:
            xind = i
        elif i > ind:
            xind = i-1

        else:
            xind = -1
        
        for j in range(len(T)):
            if j < ind:
                yind = j
            elif j > ind:
                yind = j-1
            else:
                yind = -1
            if xind > -1 and yind > -1:
                Q[xind,yind] = T[i,j]
                if xind == yind:
                    I[xind,yind] = 1
                          
    Nmat = matrix(I-Q)
    Nmat = Nmat.I
    t = Nmat * ones((len(Nmat),1))
    if curr_entered == 0:
        mean_chromes = float(mean(t))
    else:
        mean_chromes = float(t[curr_ind])
    

    # Modify T so that T[:,ind] represents the matrix R, probability of going from any transient state to the absorbing state, multiply by the probability of being in any transient state
    T[ind,ind] = 0
    R = zeros((Ncombs-1,1))
    count = 0
    for val in T[:,ind]:
        if val != 0:
            R[count,0] = val
            count += 1

    prob_trans = zeros((1,Ncombs-1))
    count=0
    for i in range(len(Pcomb)):
        if i != ind:
            prob_trans[0,count] = Pcomb[i]
            count += 1
    prob_trans = divide(prob_trans,numpy_sum(prob_trans))

    prob_per_chrome = float(dot(prob_trans,R))

    # Calculate Median & cdf after n_to_try chromes
    stopping_point = 5000 # This is where we stop calculating exactly and start approximating
    cum_prob_failure = 1
    found_median = 0

    # Calculate probability after some number of chromes (inexact)
    curr_state = zeros((1,Ncombs-1))
    if curr_entered == 0:
        count = 0
        for i in range(len(Pcomb)):
            if i != ind:
                curr_state[0,count] = Pcomb[i]
                count += 1
        
    else:
        # We know where we're starting, so let's calculate more exactly
        curr_state[0,curr_ind] = 1 # We know 100% what state we start in
        
    # Force sum(curr_state) = 1
    curr_state = divide(curr_state,numpy_sum(curr_state))

    for i in range(stopping_point):
        prob_failure = 1 - float(dot(curr_state,R))
        cum_prob_failure = cum_prob_failure * prob_failure
            
        curr_state = dot(curr_state,Q) # Update current state
        curr_state = divide(curr_state,numpy_sum(curr_state)) # Normalize to create pdf

        if i+1 == n_to_try:
            prob_so_far = 1 - cum_prob_failure

        prob_success = 1 - cum_prob_failure
        if prob_success >= .5 and found_median==0:
            median_chromes = i+1
            found_median = 1

    prob_per_chrome = float(dot(curr_state,R))

    if n_to_try > stopping_point: # Approximate the remaining chromes using prob_per_chrome
        n_leftover = n_to_try - stopping_point
        cum_prob_failure = cum_prob_failure * (1-prob_per_chrome)**n_leftover
        prob_so_far = 1 - cum_prob_failure
    # We may need to still calculate the median
    if found_median == 0:
        # Calculate the median
        # The solution for n_left for .5 = 1 - cum_prob_failure*(1-p)**n_left is
        # (log(1/(2*cum_prob_failure)) + 2i * pi * n_inf)/log(1-p), where log is the natural log
        # Taking the real portion of this complex number gives the median
        n_inf = 1 # This can be any real integer
        complex_num = (log(1/(2*(1-prob_so_far))) + 2*sqrt(-1)*pi*n_inf)/log(1-prob_per_chrome)
        chromes_left = float(complex_num.real)
        median_chromes = n_to_try + chromes_left

    return median_chromes, mean_chromes, n_to_try, prob_so_far, prob_per_chrome
Example #15
0
def process_data(request):
    # Let's process all the input
    error_list = []
    [des_r,des_g,des_b], error_list = check_input(['des_r','des_g','des_b'],1,6,request,error_list)
    [curr_r,curr_g,curr_b], error_list = check_input(['curr_r','curr_g','curr_b'],0,6,request,error_list)

    [STR], error_list = check_input(['str'],0,'inf',request,error_list)
    [DEX], error_list = check_input(['dex'],0,'inf',request,error_list)
    [INT], error_list = check_input(['int'],0,'inf',request,error_list)
    [X], error_list = check_input(['X'],1,'inf',request,error_list)
    [n_to_try], error_list = check_input(['n_to_try'],1,'inf',request,error_list)

    # Formatting
    des_colors = [des_r,des_g,des_b]
    curr_colors = [curr_r,curr_g,curr_b]

    try:
        if numpy_sum(curr_colors) > 0: # Did they enter the current colors?
            curr_entered = 1
        else:
            curr_entered = 0
        type_error = 0
    except TypeError:
        curr_entered = 0
        type_error = 1
    try:
        numpy_sum(des_colors) > 0
    except TypeError:
        type_error = 1
        

    # Default to this for error messages
    c = { 'des_r': des_r,
        'des_g': des_g,
        'des_b': des_b,
        'curr_r': curr_r,
        'curr_g': curr_g,
        'curr_b': curr_b,
        'str': STR,
        'dex': DEX,
        'int': INT,
        'X': X,
        'median_chromes': 0,
        'mean_chromes': 0,
        'n_to_try': n_to_try,
        'prob_so_far': str(1.0)}


	# Check for Errors
    if type_error == 0:
        if numpy_sum(des_colors) == 0: # No desired colors entered
            error_list.append("Please enter a valid desired item configuration")
        elif des_colors == curr_colors: # Equivalent items entered
            error_list.append("You apparently already have the item you want")
        elif numpy_sum(des_colors) != numpy_sum(curr_colors) and curr_entered==1: # Diff num sockets
            error_list.append("Your current item has a different number of sockets than your desired item")

    # Quit on error
    if len(error_list) > 0:
        c['error_message'] = error_list
        return c

    req = [STR,DEX,INT]

    # Perform statistics calculations
    median_chromes, mean_chromes, n_to_try, prob_so_far, prob_per_chrome = calc_stats(des_colors,curr_colors,curr_entered,req,X,n_to_try)

    # Calculate Vorici Results
    vorici_1 = 4
    vorici_2_same = 25
    vorici_3_same = 285
    vorici_2_diff = 15
    vorici_3_diff = 100

    # Determine possibly relevant Vorici mods
    poss_vorici = []
    if des_r >= 1:
        poss_vorici.append([1,0,0])
        if des_r >= 2:
            poss_vorici.append([2,0,0])
        if des_g >= 2:
            poss_vorici.append([1,2,0])
        if des_b >= 2:
            poss_vorici.append([1,0,2])

    if des_g >= 1:
        poss_vorici.append([0,1,0])
        if des_r >= 2:
            poss_vorici.append([2,1,0])
        if des_g >= 2:
            poss_vorici.append([0,2,0])
        if des_b >= 2:
            poss_vorici.append([0,1,2])

    if des_b >= 1:
        poss_vorici.append([0,0,1])
        if des_r >= 2:
            poss_vorici.append([2,0,1])
        if des_g >= 2:
            poss_vorici.append([0,2,1])
        if des_b >= 2:
            poss_vorici.append([0,0,2])
        
    # Calculate chances for each Vorici mod
    # NOTE: Ignores curr_colors, also only capturing mean values
    vorici_means = []
    vorici_dict = {}
    best_vorici = -1
    best_vorici_mean = -1
    for vorici_colors in poss_vorici:
        altered_colors = subtract(des_colors,vorici_colors)
        vor_median_chromes, vor_mean_chromes, vor_n_to_try, vor_prob_so_far, vor_prob_per_chrome = calc_stats(altered_colors,[0,0,0],0,req,X,n_to_try)

        vorici_means.append(vor_mean_chromes)

        if vor_mean_chromes < best_vorici_mean or best_vorici_mean == -1:
            best_vorici_mean = vor_mean_chromes
            best_vorici = vorici_colors

    # Determine proper multiplier
    if numpy_sum(best_vorici) == 1:
        vorici_mult = vorici_1
    elif numpy_sum(best_vorici) == 2:
        if 1 in best_vorici:
            vorici_mult = vorici_2_diff
        else:
            vorici_mult = vorici_2_same
    elif numpy_sum(best_vorici) == 3:
        if 3 in best_vorici:
            vorici_mult = vorici_3_same
        else:
            vorici_mult = vorici_3_diff

    best_vorici_cost = best_vorici_mean * vorici_mult

    

    # Return Information
    c = { 'des_r': des_r,
        'des_g': des_g,
        'des_b': des_b,
        'curr_r': curr_r,
        'curr_g': curr_g,
        'curr_b': curr_b,
        'str': STR,
        'dex': DEX,
        'int': INT,
        'X': X,
        'median_chromes': round(median_chromes,1),
        'mean_chromes': round(mean_chromes,1),
        'n_to_try': n_to_try,
        'prob_so_far': str(round(prob_so_far*100,1)) + '%',
        'n_prob': str(n_to_try) + '_' + str(prob_per_chrome),
        'graph_url': 'graphs/' + str(n_to_try) + '_' + str(prob_per_chrome),
	'intro_message': 0,
        'vorici_colors': best_vorici,
        'vorici_mean': best_vorici_mean,
        'vorici_cost': best_vorici_cost}
    return c
Example #16
0
File: loss.py Project: afcarl/nerv
def cross_entropy(predictions, output_data):
    return -numpy_sum(output_data * log(predictions))
Example #17
0
 def compute_centers(self):
     mm = self.__membership_degrees**self.fuzzyness_coefficient
     c = dot(self.__training_set.transpose(), mm) / numpy_sum(mm, axis=0)
     self.__centers = c.transpose()
     return self.__centers
Example #18
0
def cross_entropy(predictions, output_data):
    return -numpy_sum(output_data * log(predictions))
Example #19
0
File: loss.py Project: afcarl/nerv
def sum_squared(predictions, output_data):
    return (1 / 2) * numpy_sum((output_data - predictions)**2)