def scales_and_derivatives(self, phi): """Calculate the overall scale factor at each position in 'phi' and the derivatives of that scale factor wrt all parameters of the model""" # obtain data from all scale factor components data = [f.get_factors_and_derivatives(phi) for f in self._factors] # FIXME only using phi at the moment. In future will need other information # such as s1 directions or central impacts, and will have to pass the right # bits of information to the right ScaleFactor components contained here scale_components, grad_components = zip(*data) # the overall scale is the product of the separate scale components overall_scale = reduce(lambda fac1, fac2: fac1 * fac2, scale_components) # to convert derivatives of each scale component to derivatives of the # overall scale we multiply by the product of the other scale components, # omitting the factor that has been differentiated if len(scale_components) > 1: omit_one_prods = products_omitting_one_item(scale_components) grad_components = [row_multiply(g, coeff) for g, coeff in \ zip(grad_components, omit_one_prods)] # Now combine the gradient components by columns to produce a single # gradient matrix for the overall scale each_ncol = [g.n_cols for g in grad_components] tot_ncol = sum(each_ncol) grad = sparse.matrix(len(overall_scale), tot_ncol) col_start = [0] + each_ncol[:-1] for icol, g in zip(col_start, grad_components): grad.assign_block(g, 0, icol) return overall_scale, grad
def scales_and_derivatives(self, phi): """Calculate the overall scale factor at each position in 'phi' and the derivatives of that scale factor wrt all parameters of the model""" # obtain data from all scale factor components data = [f.get_factors_and_derivatives(phi) for f in self._factors] # FIXME only using phi at the moment. In future will need other information # such as s1 directions or central impacts, and will have to pass the right # bits of information to the right ScaleFactor components contained here scale_components, grad_components = zip(*data) # the overall scale is the product of the separate scale components overall_scale = reduce(lambda fac1, fac2: fac1 * fac2, scale_components) # to convert derivatives of each scale component to derivatives of the # overall scale we multiply by the product of the other scale components, # omitting the factor that has been differentiated if len(scale_components) > 1: omit_one_prods = products_omitting_one_item(scale_components) grad_components = [row_multiply(g, coeff) for g, coeff in \ zip(grad_components, omit_one_prods)] # Now combine the gradient components by columns to produce a single # gradient matrix for the overall scale each_ncol = [g.n_cols for g in grad_components] tot_ncol = sum(each_ncol) grad = sparse.matrix(len(overall_scale), tot_ncol) col_start = [0] + each_ncol[:-1] for icol, g in zip(col_start, grad_components): grad.assign_block(g, 0, icol) return overall_scale, grad
def test_products_omitting_one_item(): def explicit_method(items, omit_idx=0): """Do the explicit (slow) calculation for comparison with the products_omitting_one_item function""" items = list(items) del items[omit_idx] return reduce(lambda x, y: x * y, items) # test various random sequences of lengths between 2 and 10 for l in range(2, 10): vals = [random.randrange(100) for i in range(l)] prods = products_omitting_one_item(vals) tst = [explicit_method(vals, i) for i in range(len(vals))] for a, b in zip(prods, tst): assert a == b
def test_products_omitting_one_item(): def explicit_method(items, omit_idx=0): """Do the explicit (slow) calculation for comparison with the products_omitting_one_item function""" items = list(items) del items[omit_idx] return reduce(lambda x, y: x*y, items) # test various random sequences of lengths between 2 and 10 for l in range(2, 10): vals = [random.randrange(100) for i in range(l)] prods = products_omitting_one_item(vals) tst = [explicit_method(vals, i) for i in range(len(vals))] for a, b in zip(prods, tst): assert a == b print "OK"