def activity(x, encoders, alpha, bias): '''Implements J = alpha * x.dot(encoders) + bias, expands dimensions if all arguments are 1-D. Result is of shape (x.shape[0], encoders.shape[0]) * x: The range over which to render the functions * encoders: Vectors which dot-x * alpha: 1D gain coefficient * bias: 1D additive bias term''' (x, encoders, alpha, bias) = utils.force_array((x, encoders, alpha, bias)) return utils.dot_expand(x, encoders) * alpha + bias
def lif_fit(x_max, x_intercepts, y_targets, encoders, t_ref=0.002, t_rc=0.02): '''Returns [[alpha, J_bias]] for 'leaky-integrate-and-fire'/lif tuning model ''' (x_max, x_intercepts, y_targets, encoders) = utils.force_array((x_max, x_intercepts, y_targets, encoders)) B = np.exp((1./t_rc) * (t_ref - 1 / y_targets)) alpha = (1. / (1. - B) - 1.) / utils.diag_dot(x_max - x_intercepts, encoders) bias = 1. - alpha * utils.diag_dot(x_intercepts, encoders) return np.vstack((alpha, bias))
def linear_tuning_fit(x_targets, x_intercepts, y_targets, encoders): '''Returns [[alpha, J_bias]] for rect_linear tuning curve''' (x_targets, x_intercepts, y_targets, encoders) = utils.force_array((x_targets, x_intercepts, y_targets, encoders)) alphas = None J_biases = None alphas = y_targets / utils.diag_dot(x_targets - x_intercepts, encoders) J_biases = -utils.diag_dot(x_intercepts, encoders) * alphas return np.vstack((alphas, J_biases))