def prepare_operations(self): '''Prepares computational graph for needed operations.''' self.alphas_placeholder = tf.compat.v1.placeholder( dtype=tf.float64, shape=self.alphas.shape) self.means_placeholder = tf.compat.v1.placeholder( dtype=tf.float64, shape=self.means.shape) self.icf_placeholder = tf.compat.v1.placeholder(dtype=tf.float64, shape=self.icf.shape) with tf.GradientTape(persistent=True) as grad_tape: grad_tape.watch(self.alphas_placeholder) grad_tape.watch(self.means_placeholder) grad_tape.watch(self.icf_placeholder) self.objective_operation = gmm_objective(self.alphas_placeholder, self.means_placeholder, self.icf_placeholder, self.x, self.wishart_gamma, self.wishart_m) J = grad_tape.gradient(self.objective_operation, (self.alphas_placeholder, self.means_placeholder, self.icf_placeholder)) self.gradient_operation = tf.concat([flatten(d) for d in J], 0) self.feed_dict = { self.alphas_placeholder: self.alphas, self.means_placeholder: self.means, self.icf_placeholder: self.icf }
def create_operations(self): '''Creates operations for calculating the part of the objective and its derivative.''' with tf.GradientTape(persistent=True) as grad_tape: grad_tape.watch(self.cam_holder) grad_tape.watch(self.x_holder) grad_tape.watch(self.w_holder) self.w_err_operation = compute_w_err(self.w_holder) self.r_err_operation = compute_reproj_err(self.cam_holder, self.x_holder, self.w_holder, self.feat_holder) dc, dx, dw = grad_tape.jacobian( self.r_err_operation, (self.cam_holder, self.x_holder, self.w_holder), experimental_use_pfor=False) self.r_err_grad_operation = flatten(tf.concat( (dc, dx, tf.reshape(dw, [2, 1])), axis=1), column_major=True) self.w_err_grad_operation = grad_tape.gradient(self.w_err_operation, self.w_holder)
def prepare_operations(self): '''Prepares computational graph for needed operations.''' self.main_params_placeholder = tf.compat.v1.placeholder( dtype=tf.float64, shape=self.main_params.shape) self.extra_params_placeholder = tf.compat.v1.placeholder( dtype=tf.float64, shape=self.extra_params.shape) with tf.GradientTape(persistent=True) as grad_tape: grad_tape.watch(self.main_params_placeholder) grad_tape.watch(self.extra_params_placeholder) self.objective_operation = lstm_objective( self.main_params_placeholder, self.extra_params_placeholder, self.state, self.sequence) J = grad_tape.gradient( self.objective_operation, (self.main_params_placeholder, self.extra_params_placeholder)) self.gradient_operation = tf.concat([flatten(d) for d in J], 0) self.feed_dict = { self.main_params_placeholder: self.main_params, self.extra_params_placeholder: self.extra_params }
def calculate_objective(self, times): '''Calculates objective function many times.''' for _ in range(times): self.objective = self.objective_function(self.variables, *self.params) self.objective = flatten(self.objective)
def calculate_jacobian(self, times): ''' Calculates objective function jacobian many times.''' for _ in range(times): # reprojection error processing reproj_err = [] for j in range(self.p): camIdx = self.obs[j, 0] ptIdx = self.obs[j, 1] with tf.GradientTape(persistent = True) as t: t.watch(self.cams[camIdx]) t.watch(self.x[ptIdx]) t.watch(self.w[j]) rej = compute_reproj_err( self.cams[camIdx], self.x[ptIdx], self.w[j], self.feats[j] ) reproj_err.append(rej) dc, dx, dw = t.jacobian( rej, (self.cams[camIdx], self.x[ptIdx], self.w[j]), experimental_use_pfor = False ) J = tf.concat( ( dc, dx, tf.reshape(dw, [2, 1]) ), axis = 1 ) J = flatten(J, column_major = True).numpy() self.jacobian.insert_reproj_err_block(j, camIdx, ptIdx, J) self.reproj_error = tf.concat(reproj_err, 0) # weight error processing w_err = [] for j in range(self.p): with tf.GradientTape(persistent = True) as t: t.watch(self.w[j]) wj = compute_w_err(self.w[j]) w_err.append(wj) dwj = t.gradient(wj, self.w[j]) self.jacobian.insert_w_err_block(j, dwj.numpy()) self.w_err = tf.stack(w_err, 0)
def calculate_jacobian(self, times): '''Calculates objective function jacobian many times.''' for _ in range(times): with tf.GradientTape(persistent=True) as t: t.watch(self.variables) self.objective = self.objective_function( self.variables, *self.params) self.objective = flatten(self.objective) self.jacobian = t.jacobian(self.objective, self.variables, experimental_use_pfor=False)
def calculate_jacobian(self, times): ''' Calculates objective function jacobian many times.''' for _ in range(times): with tf.GradientTape(persistent = True) as t: t.watch(self.main_params) t.watch(self.extra_params) self.objective = lstm_objective( self.main_params, self.extra_params, self.state, self.sequence ) J = t.gradient(self.objective, (self.main_params, self.extra_params)) self.gradient = tf.concat([ flatten(d) for d in J ], 0)
def prepare_operations(self): '''Prepares computational graph for needed operations.''' self.variables_holder = tf.compat.v1.placeholder( dtype=tf.float64, shape=self.variables.shape) with tf.GradientTape(persistent=True) as grad_tape: grad_tape.watch(self.variables_holder) self.objective_operation = self.objective_function( self.variables_holder, *self.params) self.objective_operation = flatten(self.objective_operation) self.jacobian_operation = grad_tape.jacobian( self.objective_operation, self.variables_holder, experimental_use_pfor=False) self.feed_dict = {self.variables_holder: self.variables}
def calculate_jacobian(self, times): '''Calculates objective function jacobian many times.''' for _ in range(times): with tf.GradientTape(persistent = True) as t: t.watch(self.alphas) t.watch(self.means) t.watch(self.icf) self.objective = gmm_objective( self.alphas, self.means, self.icf, self.x, self.wishart_gamma, self.wishart_m ) J = t.gradient(self.objective, (self.alphas, self.means, self.icf)) self.gradient = tf.concat([ flatten(d) for d in J ], 0)