def current_time_step(self): """Returns the current ts.TimeStep. Returns: A `TimeStep` tuple of: step_type: A scalar int32 tensor representing the `StepType` value. reward: A scalar float32 tensor representing the reward at this timestep. discount: A scalar float32 tensor representing the discount [0, 1]. observation: A Tensor, or a nested dict, list or tuple of Tensors corresponding to `observation_spec()`. """ def _current_time_step(): with _check_not_called_concurrently(self._lock): if self._time_step is None: self._time_step = self._env.reset() return nest.flatten(self._time_step) with tf.name_scope('current_time_step'): outputs = tfe.py_func( _current_time_step, [], # No inputs. self._time_step_dtypes, name='current_time_step_py_func') step_type, reward, discount = outputs[0:3] flat_observations = outputs[3:] return self._set_names_and_shapes(step_type, reward, discount, *flat_observations)
def create_dataset(folder, img_size): """ This creates a tf.data.Dataset object based on all of the images that are within the given <folder>. This assumes that the images are large (typically 1000 x 1000). This will crop a smaller image from a randomly chosen location, so that each iteration through will produce an image taken from a different part of the original file. The label is also an image, the so called target. We need to crop the same location from the target, to keep the image and target in sync. This assumes the images are tiff file format. This assumes the target images have the same file name as the original image and are located in folder/target/samefilename.tif. Parameters ---------- folder : str The name of the folder to use as the source for all of the image files. img_size : tuple The desired size (height, width) to crop the images. The images on disk are large. This function will crop the large image down to <img_size>. Returns ------- dataset : tf.data.Dataset """ dataset = tf.data.Dataset.list_files(folder + '/*.tif') dataset = dataset.map(lambda f: tuple( tfe.py_func(func=load_image, inp=[f, img_size, True], Tout=(tf.float32, tf.float32)))) return dataset
def generate(self, x, fn_logits, y=None): self.fn_logits = fn_logits # wrap attack function in py_func def cw_wrap(x_val): return self.attack(x_val) adv = tfe.py_func(cw_wrap, [x], tf_dtype) adv.set_shape(x.get_shape()) return adv
def step(self, actions): """Returns a TensorFlow op to step the environment. Args: actions: A Tensor, or a nested dict, list or tuple of Tensors corresponding to `action_spec()`. Returns: A `TimeStep` tuple of: step_type: A scalar int32 tensor representing the `StepType` value. reward: A scalar float32 tensor representing the reward at this time_step. discount: A scalar float32 tensor representing the discount [0, 1]. observation: A Tensor, or a nested dict, list or tuple of Tensors corresponding to `observation_spec()`. Raises: ValueError: If any of the actions are scalars or their major axis is known and is not equal to `self.batch_size`. """ def _step(*flattened_actions): with _check_not_called_concurrently(self._lock): flattened_actions = [x.numpy() for x in flattened_actions] packed = nest.pack_sequence_as(structure=self.action_spec(), flat_sequence=flattened_actions) self._time_step = self._env.step(packed) return nest.flatten(self._time_step) with tf.name_scope('step', values=[actions]): flat_actions = [tf.identity(x) for x in nest.flatten(actions)] for action in flat_actions: dim_value = tensor_shape.dimension_value(action.shape[0]) if (action.shape.ndims == 0 or (dim_value is not None and dim_value != self.batch_size)): raise ValueError( 'Expected actions whose major dimension is batch_size (%d), ' 'but saw action with shape %s:\n %s' % (self.batch_size, action.shape, action)) outputs = tfe.py_func(_step, flat_actions, self._time_step_dtypes, name='step_py_func') step_type, reward, discount = outputs[0:3] flat_observations = outputs[3:] return self._set_names_and_shapes(step_type, reward, discount, *flat_observations)
def reset(self): """Returns the current `TimeStep` after resetting the environment. Returns: A `TimeStep` tuple of: step_type: A scalar int32 tensor representing the `StepType` value. reward: A scalar float32 tensor representing the reward at this timestep. discount: A scalar float32 tensor representing the discount [0, 1]. observation: A Tensor, or a nested dict, list or tuple of Tensors corresponding to `observation_spec()`. """ def _reset(): with _check_not_called_concurrently(self._lock): self._time_step = self._env.reset() with tf.name_scope('reset'): reset_op = tfe.py_func( _reset, [], # No inputs. [], name='reset_py_func') with tf.control_dependencies([reset_op]): return self.current_time_step()
def get_counterfactuals(self, factuals: pd.DataFrame) -> pd.DataFrame: best_perturb = np.array([]) def f(best_perturb): # doesn't work with categorical features, so they aren't used original_input = self.model.get_ordered_features(factuals) original_input = original_input.to_numpy() ground_truth = self.model.predict(original_input) # these will be the perturbed features, i.e. counterfactuals perturbed = tf.Variable(initial_value=original_input, name="perturbed_features", trainable=True) to_optimize = [perturbed] class_index = np.zeros(len(original_input), dtype=np.int64) for i, class_name in enumerate(self.model.raw_model.classes_): mask = np.equal(ground_truth, class_name) class_index[mask] = i class_index = tf.constant(class_index, dtype=tf.int64) example_range = tf.constant( np.arange(len(original_input), dtype=np.int64)) example_class_index = tf.stack((example_range, class_index), axis=1) # booleans to indicate if label has flipped indicator = np.ones(len(factuals)) # hyperparameters sigma = np.full(len(factuals), self.sigma_val) temperature = np.full(len(factuals), self.temp_val) distance_weight = np.full(len(factuals), self.distance_weight_val) best_distance = np.full(len(factuals), 1000.0) best_perturb = np.zeros(perturbed.shape) for i in range(self.n_iter): with tf.GradientTape(persistent=True) as t: p_model = _filter_hinge_loss( self.n_class, indicator, perturbed, sigma, temperature, self._prob_from_input, ) approx_prob = tf.gather_nd(p_model, example_class_index) eps = 10.0**-10 distance = distance_func(self.distance_function, perturbed, original_input, eps) # the losses prediction_loss = indicator * approx_prob distance_loss = distance_weight * distance total_loss = tf.reduce_mean(prediction_loss + distance_loss) # optimize the losses grad = t.gradient(total_loss, to_optimize) self.optimizer.apply_gradients( zip(grad, to_optimize), global_step=tf.compat.v1.train. get_or_create_global_step(), ) # clip perturbed values between 0 and 1 (inclusive) tf.compat.v1.assign( perturbed, tf.math.minimum(1, tf.math.maximum(0, perturbed))) true_distance = distance_func(self.distance_function, perturbed, original_input, 0).numpy() # get the class predictions for the perturbed features current_predict = self.model.predict(perturbed.numpy()) indicator = np.equal(ground_truth, current_predict).astype(np.float64) # get best perturbation so far, did prediction flip mask_flipped = np.not_equal(ground_truth, current_predict) # is distance lower then previous best distance mask_smaller_dist = np.less(true_distance, best_distance) # update best distances temp_dist = best_distance.copy() temp_dist[mask_flipped] = true_distance[mask_flipped] best_distance[mask_smaller_dist] = temp_dist[ mask_smaller_dist] # update best perturbations temp_perturb = best_perturb.copy() temp_perturb[mask_flipped] = perturbed[mask_flipped] best_perturb[mask_smaller_dist] = temp_perturb[ mask_smaller_dist] return best_perturb # Little bit hacky, but needed as other tf code is graph based. # Graph based tf and eager execution for tf don't work together nicely. with tf.compat.v1.Session() as sess: pf = tfe.py_func(f, [best_perturb], tf.float32) best_perturb = sess.run(pf) df_cfs = pd.DataFrame(best_perturb, columns=self.model.data.continuous) df_cfs = check_counterfactuals(self._mlmodel, df_cfs, factuals.index) df_cfs = self._mlmodel.get_ordered_features(df_cfs) return df_cfs
def eager_fun(*args, **kwargs): with tf.Session() as sess: sess.run(tfe.py_func(func, inp=list(kwargs.values()), Tout=[]))
def decorator(): with tf.Session() as sess: sess.run(tfe.py_func(func, inp=[], Tout=[]))
""" pattern2 for (i, (x, y)) in enumerate(dataset_train): # minimize() is equivalent to the grad() and apply_gradients() calls. optimizer.minimize(lambda: loss(model, x, y), global_step=tf.train.get_or_create_global_step()) """ # specify run on gpu # with tf.device("/gpu:0"): ... # eager execution: better debugging and interactive performance # graph execution: better distribution-training and deploy performance # also, we can save checkpoints in eager execution and use it again in graph execution # tfe.py_func: eager execution in graph execution environment def my_py_func(x): x = tf.matmul(x, x) # You can use tf ops print(x) # but it's eager! return x with tf.Session() as sess: x = tf.placeholder(dtype=tf.float32) # Call eager function in graph! pf = tfe.py_func(my_py_func, [x], tf.float32) sess.run(pf, feed_dict={x: [[2.0]]}) # [[4.0]]
def iteration(t, pt, var, maxi_sol, best_val, best_sol): def step_safeopt(t, pt, cost, maxi_val, maxi_sol, best_val, best_sol): #Convert to np t_np = np.array(t) pt_np = np.array(pt) cost_np = np.array(cost) best_val = np.array(best_val) best_sol = np.array(best_sol) maxi_val_np = np.array(maxi_val) maxi_sol_np = np.array(maxi_sol) if maxi_val_np < best_val: best_val = maxi_val_np best_sol = maxi_sol_np self.opt.add_new_data_point(pt_np, -cost_np) new_pt, stddev = self.opt.get_new_query_point( "expanders" ) # Returns point most likely to expand safe set #new_pt=self.opt.optimize() maxi_sol2, stddev2 = self.opt.get_new_query_point( "maximizers" ) # returns best parameters from current known points best_sol = np.squeeze(best_sol) maxi_sol2 = np.squeeze(maxi_sol2) if t_np == self.max_iters: del self.opt del self.gpmodel gc.collect() for name in dir(): if not name.startswith('_'): del globals()[name] for name in dir(): if not name.startswith('_'): del locals()[name] return new_pt, maxi_sol2, best_sol, best_val pt = tf.expand_dims(pt, axis=0) maxi_sol = tf.expand_dims(maxi_sol, axis=0) cost = cost_function(pt) cost_maxi = cost_function(maxi_sol) #----A step of safeopt new_pt, maxi_sol2, best_sol2, best_cost = tfe.py_func( func=step_safeopt, inp=[t, pt, cost, cost_maxi, maxi_sol, best_val, best_sol], Tout=[ pt.dtype, maxi_sol.dtype, best_sol.dtype, best_val.dtype ]) #---- #maintain dimensionality maxi_sol2 = tf.convert_to_tensor(maxi_sol2) maxi_sol2.set_shape(maxi_sol.get_shape()) best_sol2 = tf.convert_to_tensor(best_sol2) best_sol2.set_shape(best_sol.get_shape()) new_pt = tf.convert_to_tensor(new_pt) new_pt.set_shape(pt.get_shape()) new_best_cost = tf.convert_to_tensor(best_cost) new_best_cost.set_shape(best_val.get_shape()) new_pt = tf.squeeze(new_pt) # retain shape of pt new_best_cost = tf.squeeze(new_best_cost) maxi_sol2 = tf.squeeze(maxi_sol2) return t + 1, new_pt, var, maxi_sol2, new_best_cost, best_sol2 # var has no significance