def row_adjust_priority(mat, row, p, cluster_nodes=None, p0mode=None, limit_matrix_calc=calculus, normalize_to_orig=True): ''' Adjusts a row of matrix and recalculates the priorities of all the nodes. :param mat: The scaled supermatrix to perform the calculation on :param row: The row to use for anp row sensitivity :param cluster_nodes: If you wish to normalize by cluster, this should be the indices of the nodes that are in row's cluster (including row itself). :param p0mode: This controls the calculation and has 3 cases: Case 1: if it is a float, you are directly setting the p0 value to whatever p0mode is. Case 2: if it is an integer, this is the smart p0 mode, and it treats p0mode as the index of the alternative/node to make continuous. Case 3: otherwise we assume you want original weights to be the p0 value, and return the parameter `orig` :param limit_matrix_calc: A function which takes a single input, the matrix to take the limit of. :param normalize_to_orig: If True we normalize the returning priority score so that the [row] index of it has the same value as the original and the other values are rescaled. Otherwise we simply normalize the priority vector directly. ''' if normalize_to_orig: old_lmt = limit_matrix_calc(mat) old_pri = priority_from_limit(old_lmt) old_val = old_pri[row] old_sum = sum(old_pri) - old_val else: old_sum = 1 new_mat = row_adjust(mat, row, p, cluster_nodes=cluster_nodes, p0mode=p0mode) new_lmt = limit_matrix_calc(new_mat) new_pri = priority_from_limit(new_lmt) row_pri = new_pri[row] new_pri[row] = 0 new_pri *= old_sum / sum(new_pri) new_pri[row] = row_pri return new_pri
def lmsynth(scaled_mat, alts): ''' Calculates the limit matrix and extracts the priorites for the listed alternative indices ''' limit_mat = lm.calculus(scaled_mat) lp = lm.priority_from_limit(limit_mat)[alts] return lp / lp.sum()
def influence_limit(mat, row, cluster_nodes=None, influence_nodes=None, delta=1e-6, p0mode=0.5, limit_matrix_calc=calculus): ''' Calculates the limit influence score of the influence_nodes with respect to row. :param mat: The scaled supermatrix to perform the calculation on :param row: The row to use for anp row sensitivity :param cluster_nodes: If you wish to normalize by cluster, this should be the indices of the nodes that are in row's cluster (including row itself). :param influence_nodes: The indices of the nodes to calculate the influence of, with respect to row. If None it calculates the influence of all nodes other than row. :param delta: We use 1-delta for the p-value to plugin to approximate the limit as p -> 1 :param p0mode: This controls the calculation and has 3 cases: Case 1: if it is a float, you are directly setting the p0 value to whatever p0mode is. Case 2: if it is an integer, this is the smart p0 mode, and it treats p0mode as the index of the alternative/node to make continuous. Case 3: otherwise we assume you want original weights to be the p0 value, and return the parameter `orig` :param limit_matrix_calc: A function which takes a single input, the matrix to take the limit of. :return: A tuple of 2 items, the first is a pandas.Series whose indices are 'Node 1', 'Node 2' (and the indices after "Node " are the influence_node indices) and whose values are the limit value. The second element of the returned tuple is a pandas.Series with the same indices and whose values are the p0 values we used for that alternative. ''' if not p0mode_is_direct(p0mode): raise ValueError( "p0mode must be a direct p0 value for limit influence") n = len(mat) if influence_nodes is None: influence_nodes = [i for i in range(n) if i != row] df = pd.DataFrame() limits = pd.Series() p0 = 1 - delta p0s = pd.Series() for alt in influence_nodes: if isinstance(p0mode, int): # This means p0mode is smart, and we should do it smart wrt the alt p0mode = alt new_mat = row_adjust(mat, row, p0, cluster_nodes=cluster_nodes, p0mode=p0mode) new_lmt = limit_matrix_calc(new_mat) new_pri = priority_from_limit(new_lmt) row_pri = new_pri[row] new_pri[row] = 0 new_pri /= sum(new_pri) new_pri[row] = row_pri y = new_pri[alt] label = "Node " + str(alt) limits[label] = y p0 = calcp0(mat, row, cluster_nodes, mat[row, alt], p0mode) p0s[label] = p0 return limits, p0s
def influence_table(mat, row, pvals=None, cluster_nodes=None, influence_nodes=None, p0mode=None, limit_matrix_calc=calculus, graph=True, return_p0vals=False, node_names=None): ''' Calculates the direct influence score, i.e. it calculates anp row sensitivity for each of pvals values and stores the new scores of the influence_nodes. :param mat: The scaled supermatrix to perform the calculation on :param row: The row to use for anp row sensitivity :param pvals: The values to set p to, this should be a list (or list like) object of values before 0 and 1. :param cluster_nodes: If you wish to normalize by cluster, this should be the indices of the nodes that are in row's cluster (including row itself). :param influence_nodes: The indices of the nodes to calculate the influence of, with respect to row. If None it calculates the influence of all nodes other than row. :param p0mode: This controls the calculation and has 3 cases: Case 1: if it is a float, you are directly setting the p0 value to whatever p0mode is. Case 2: if it is an integer, this is the smart p0 mode, and it treats p0mode as the index of the alternative/node to make continuous. Case 3: otherwise we assume you want original weights to be the p0 value, and return the parameter `orig` :param limit_matrix_calc: A function which takes a single input, the matrix to take the limit of. :param graph: If True, we return a matplotlib graph, otherwise we return pandas.DataFrame, p0vals :param return_p0vals: If true and not doing graphing, we return a tuple of the dataframe of the results, and the 2nd item as Series whose index is the names of the nodes, and whose values are the (x,y) position of the resting p0 value :param node_names: If None, we use Node 0, Node 1, ... to label nodes, otherwise we use this. :return: If graph=True, we return nothing, but create a matplotlib object and call plt.show(). Otherwise if return_p0vals is True we return a pair of items. The first is the dataframe of results, whose indices are "Node 1", "Node 2", ... which corresponds to influence_nodes (and the indices after "Node " are the influence_node indices) and has 2 columns, 'x' is the pvals and 'y' is the resulting influence score (i.e. changed priority). The second element is a pd.Series whose indices is the same as the dataframe and whose values are pairs of items (x,y) where x is the p0 value for the given alternative and the y is the influence score of that alternative at that p-value. If return_p0vals is False we return the first dataframe item only. ''' if pvals is None: xs = [i / 50 for i in range(1, 50)] else: xs = pvals n = len(mat) if influence_nodes is None: influence_nodes = [i for i in range(n) if i != row] df = pd.DataFrame() pvals = pd.Series() df['x'] = xs if node_names is None: node_names = ["Node " + str(i) for i in range(n)] for alt in influence_nodes: ys = [] if isinstance(p0mode, int): # This means p0mode is smart, and we should do it smart wrt the alt p0mode = alt for x in xs: new_mat = row_adjust(mat, row, x, cluster_nodes=cluster_nodes, p0mode=p0mode) new_lmt = limit_matrix_calc(new_mat) new_pri = priority_from_limit(new_lmt) new_pri[row] = 0 new_pri /= sum(new_pri) y = new_pri[alt] ys.append(y) label = node_names[alt] p0 = calcp0(mat, row, cluster_nodes, mat[row, alt], p0mode) x = p0 y = linear_interpolate(xs, ys, x) df[label] = ys pvals[label] = (x, y) # if graph: # plt.plot(xs, ys, label=label) # plt.scatter(x, y, label=label+" p0") # else: # df[label]=ys # pvals[label]=(x, y) # We need to normalize the rows for row in range(len(df.index)): s = sum(df.iloc[row, 1:]) if s != 0: df.iloc[row, 1:] /= s for label, pval in pvals.iteritems(): p0 = pval[0] y = linear_interpolate(xs, df.loc[:, label], p0) pvals[label] = (p0, y) if graph: # plt.legend() # plt.show() influence_table_plot(df, pvals) else: if return_p0vals: return df, pvals else: return df
def influence_marginal(mat, row, influence_nodes=None, cluster_nodes=None, left_or_right=None, delta=1e-6, p0mode=0.5, limit_matrix_calc=calculus): ''' Calculates the marginal influence :param mat: The scaled supermatrix to calculate marginal influence on :param row: The index of the row to perform the marginal influence on :param influence_nodes: The nodes to calculate the marginal influence of the row upon, if None then it assumes all nodes except row. :param cluster_nodes: The other nodes in the parameter row's cluster (including row itself), so we can scale by cluster. If None we do not scale by cluster. :param left_or_right: An integer telling whether we should do left-hand side derivative, right-hand side derivative or average them. If left_or_right < 0, then we do LHS deriv. If left_or_right > 0, we do RHS deriv. Finally, if left_or_right == 0, we average LHS and RHS. :param delta: The delta_x to use for the derivative calculation. :param p0mode: This controls the calculation and has 3 cases: Case 1: if it is a float, you are directly setting the p0 value to whatever p0mode is. Case 2: if it is an integer, this is the smart p0 mode, and it treats p0mode as the index of the alternative/node to make continuous. Case 3: otherwise we assume you want original weights to be the p0 value, and return the parameter `orig` :param limit_matrix_calc: A function which takes a single input, the matrix to take the limit of. :return: A pandas.Series whose indices are influence_nodes and whose values are the marginal influence scores of those nodes with respect to the given row. ''' n = len(mat) if influence_nodes is None: #influence_nodes = [i for i in range(n) if i != row] influence_nodes = list(range(n)) orig_lim = calculus(mat) orig_pri = priority_from_limit(orig_lim)[influence_nodes] orig_pri /= sum(abs(orig_pri)) if not p0mode_is_direct(p0mode): raise ValueError( "p0mode must be a direct p0 value for marginal influence") else: p0 = p0mode if left_or_right <= 0: #Calculate left deriv new_mat = row_adjust(mat, row, p0 - delta, cluster_nodes, p0mode=p0mode) lim = limit_matrix_calc(new_mat) pri = priority_from_limit(lim)[influence_nodes] pri /= sum(abs(pri)) left_deriv = (pri - orig_pri) / -delta if left_or_right < 0: #Only want left deriv rval = pd.Series(data=left_deriv, index=influence_nodes) return rval if left_or_right >= 0: # Calculate left deriv new_mat = row_adjust(mat, row, p0 + delta, cluster_nodes, p0mode=p0) lim = limit_matrix_calc(new_mat) pri = priority_from_limit(lim)[influence_nodes] pri /= sum(abs(pri)) right_deriv = (pri - orig_pri) / delta if left_or_right > 0: # Only want right deriv rval = pd.Series(data=right_deriv, index=influence_nodes) return rval #If we make it here, we want avg rval = pd.Series(data=(left_deriv + right_deriv) / 2, index=influence_nodes) return rval