def get_selection_S(F): """ The F and S notation is from Yang and Nielsen 2008. @param F: a selection value for each codon, up to an additive constant @return: selection differences F_j - F_i, also known as S_ij """ e = algopy.ones_like(F) return algopy.outer(e, F) - algopy.outer(F, e)
def get_selection_S(F): """ The F and S notation is from Yang and Nielsen 2008. Speed matters. @param F: a selection value for each codon, up to an additive constant @return: selection differences F_j - F_i, also known as S_ij """ # FIXME: use algopy.ones_like when it becomes available e = algopy.ones_like(F) # # FIXME: instead of the following block # e = algopy.zeros_like(F) # for i in range(F.shape[0]): # e[i] = 1. return algopy.outer(e, F) - algopy.outer(F, e)
def test_outer(self): x = numpy.arange(4) cg = algopy.CGraph() x = algopy.Function(x) x1 = x[:x.size//2] x2 = x[x.size//2:] y = algopy.trace(algopy.outer(x1,x2)) cg.trace_off() cg.independentFunctionList = [x] cg.dependentFunctionList = [y] cg2 = algopy.CGraph() x = algopy.Function(x) x1 = x[:x.size//2] x2 = x[x.size//2:] z = algopy.dot(x1,x2) cg2.trace_off() cg2.independentFunctionList = [x] cg2.dependentFunctionList = [z] assert_array_almost_equal(cg.jacobian(numpy.arange(4)), cg2.jacobian(numpy.arange(4)))