def test_grad_and_aux(): A = npr.randn(5, 4) x = npr.randn(4) f = lambda x: (np.sum(np.dot(A, x)), x**2) g = lambda x: np.sum(np.dot(A, x)) assert len(grad_and_aux(f)(x)) == 2 check_equivalent(grad_and_aux(f)(x)[0], grad(g)(x)) check_equivalent(grad_and_aux(f)(x)[1], x**2)
def __init__(self, initial_predators=50, initial_prey=100, duration=30., delta_t=0.2, use_summary_statistics=True, normalize_summary_statistics=True, use_full_time_series=False, smear_summary_statistics=False, zoom_in=True): super().__init__() # Save parameters self.initial_predators = initial_predators self.initial_prey = initial_prey self.duration = duration self.delta_t = delta_t self.n_time_series = int(self.duration / self.delta_t) + 1 self.use_summary_statistics = use_summary_statistics self.normalize_summary_statistics = normalize_summary_statistics self.use_full_time_series = use_full_time_series self.smear_summary_statistics = smear_summary_statistics self.zoom_in = zoom_in # Parameters self.n_parameters = 4 # Autograd self._d_simulate_step = ag.grad_and_aux(self._simulate_step)
def __init__(self, n_rows=20, n_nails=31): super().__init__() self.n_rows = n_rows self.n_nails = n_nails self.d_trace = ag.grad_and_aux(self.trace) # for mining: calculate the gradient log_p_xz (the joint score)
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Parameters self.n_parameters = 2 # Autograd self._d_simulate_transmission = ag.grad_and_aux(self._simulate_transmission) # Two of the original four parameters are fixed self.fixed_lambda = 0.593 self.fixed_gamma = 1.
def __init__(self, initial_state=0, duration=1., delta_t=0.1): super().__init__() # Save parameters self.initial_state = 0 self.duration = duration self.delta_t = delta_t self.n_time_series = int(self.duration / self.delta_t) + 1 # Parameters self.n_parameters = 1 # Autograd self._d_simulate_step = ag.grad_and_aux(self._simulate_step)
def trainNN(epsilon, momentum, train_x, train_y, train_y_integers, weights, unflatten, smooth_grad): # Batch compute the gradients (partial derivatives of the loss function w.r.t to all NN parameters) grad_fun = autograd.grad_and_aux(logistic_loss_batch) # Compute gradients (partial derivatives) using autograd toolbox weight_gradients, returned_values = grad_fun(weights, train_x, train_y, unflatten) #print('logistic loss: ', returned_values[0], 'Train error =', returned_values[1]) # Update weight vector smooth_grad = (1 - momentum) * smooth_grad + momentum * weight_gradients weights = weights - epsilon * smooth_grad #print('Train accuracy =', 1-mean_zero_one_loss(weights, train_x, train_y_integers, unflatten)) meanZeroOneLoss = mean_zero_one_loss(weights, train_x, train_y_integers, unflatten) grad_fun = logistic_loss_batch(weights, train_x, train_y, unflatten) meanLogisticloss = grad_fun[0] / train_x.shape[0] return smooth_grad, weights, meanLogisticloss, meanZeroOneLoss
def __init__(self, n_individuals=53, n_strains=33, overall_prevalence=None, end_time=10, delta_t=0.1, initial_infection=False, use_original_summary_statistics=True, use_prevalence_covariance=False): super().__init__() # Save parameters self.n_individuals = n_individuals self.n_strains = n_strains self.overall_prevalence = overall_prevalence self.end_time = end_time self.delta_t = delta_t self.initial_infection = initial_infection self.use_original_summary_statistics = use_original_summary_statistics self.use_prevalence_covariance = use_prevalence_covariance # Input if self.overall_prevalence is None: self.overall_prevalence = np.array([0.12, 0.11, 0.10, 0.07, 0.06] + [0.05] * 3 + [0.04] * 2 + [0.03] * 3 + [0.02] * 3 + [0.015] * 7 + [0.010] * 5 + [0.005] * 5) assert self.overall_prevalence.shape[ 0] == self.n_strains, 'Wrong number of strains in prevalence' # Parameters self.n_parameters = 4 # Autograd self._d_simulate_transmission = ag.grad_and_aux( self._simulate_transmission)
nEpochs = 1000 # Number of train examples nTrainSamples = X_train.shape[0] # Number of input dimensions dims_in = X_train.shape[1] # Convert integer labels to one-hot vectors # i.e. convert label 2 to 0, 0, 1, 0 train_y = np.zeros((nTrainSamples, dims_out)) train_y[np.arange(nTrainSamples), Y_train] = 1 assert momentum <= 1 assert epsilon <= 1 # Batch compute the gradients (partial derivatives of the loss function w.r.t to all NN parameters) grad_fun = autograd.grad_and_aux(logistic_loss_batch) """ """ plotsum = [] # [[5-yvalue], [40-yvalue], [70-yvalue]] times = [] sample_errors = [] for dims_hid in dims_hids: print("unit: ", dims_hid) start = time.time() mean_loss = [] # Initializing weights W = np.random.randn(dims_in, dims_hid) b = np.random.randn(dims_hid)
def __init__(self): self.d_simulate = ag.grad_and_aux(self.grad_simulate)