def testOneKeypointsExpectsNone(self): """Pass a tensor with one keypoints and check None regularizer.""" output_keypoints_tensor = array_ops.placeholder(dtype=dtypes.float32, shape=[1]) self.assertEqual( regularizers.calibrator_regularization(output_keypoints_tensor), None)
def testNoRegularizerExpectsNone(self): """Set no l1_reg and l2_reg and check None regularizer.""" output_keypoints_tensor = array_ops.placeholder(dtype=dtypes.float32, shape=[2]) self.assertEqual( regularizers.calibrator_regularization(output_keypoints_tensor), None)
def _runAndCheckValues(self, output_keypoints, expected_value, l1_reg=None, l2_reg=None): output_keypoints_tensor = tf.constant( output_keypoints, dtype=tf.float32) reg = regularizers.calibrator_regularization( output_keypoints_tensor, l1_wrinkle_reg=l1_reg, l2_wrinkle_reg=l2_reg) with self.session() as sess: reg_value = sess.run(reg) self.assertAlmostEqual(reg_value, expected_value, delta=1e-1)
def _runAndCheckValues(self, output_keypoints, expected_value, l1_reg=None, l2_reg=None): output_keypoints_tensor = array_ops.constant(output_keypoints, dtype=dtypes.float32) reg = regularizers.calibrator_regularization(output_keypoints_tensor, l1_laplacian_reg=l1_reg, l2_laplacian_reg=l2_reg) with self.test_session() as sess: reg_value = sess.run(reg) self.assertAlmostEqual(reg_value, expected_value, delta=1e-1)
def one_dimensional_calibration_layer(uncalibrated_tensor, num_keypoints, signal_name, keypoints_initializers=None, keypoints_initializer_fns=None, bound=False, monotonic=None, missing_input_value=None, missing_output_value=None, **regularizer_amounts): """Creates a calibration layer for one single continuous signal. Returns a calibrated tensor of the uncalibrated continuous signal and a list of projections ops. Args: uncalibrated_tensor: Tensor of shape [batch_size] of one single signal. num_keypoints: Number of keypoints to use. signal_name: (Required) Used as a suffix to the variable names. keypoints_initializers: For evaluation or inference (or when resuming training from a checkpoint) the values will be loaded from disk, so they don't need to be given -- but in this case num_keypoints need to be accurate. Two tensors of shape [num_keypoints]. See load_keypoints_from_quantiles or uniform_keypoints_for_signal on how to generate these (module keypoints_initialization). keypoints_initializer_fns: Like keypoints_initializers but using lambda initializers. They should be compatible with tf.get_variable. If this is set, then keypoints_initializers must be None. bound: boolean whether output of calibration must be bound. Alternatively a dict mapping feature name to boundness. monotonic: whether calibration has to be kept monotonic: None or 0 means no monotonicity. Positive or negative values mean increasing or decreasing monotonicity respectively. Alternatively a dict mapping feature name to monotonic. missing_input_value: If set, and if the input has this value it is assumed to be missing and the output will either be calibrated to some value between `[calibration_output_min, calibration_output_max]` or set to a fixed value set by missing_output_value. Limitation: it only works for scalars. missing_output_value: Requires missing_input_value also to be set. If set if will convert missing input to this value. **regularizer_amounts: Keyword args of regularization amounts passed to regularizers.calibrator_regularization(). Keyword names should be among supported regularizers.CALIBRATOR_REGULARIZERS and values should be float. Returns: A tuple of: * calibrated tensor of shape [batchsize] * None or projection ops, that must be applied at each step (or every so many steps) to project the model to a feasible space: used for bounding the outputs or for imposing monotonicity. * None of a regularization loss, if regularization is configured. Raises: ValueError: if dtypes are incompatible. ValueError: if keypoints_initializers and keypoints_initializer_fns are both set. """ if (keypoints_initializers is not None and keypoints_initializer_fns is not None): raise ValueError( 'keypoints_initializers and keypoints_initializer_fns ' 'cannot both be set.') with variable_scope.variable_scope('pwl_calibration'): # Sanity checks. if uncalibrated_tensor.get_shape().ndims != 1: raise ValueError( 'one_dimensional_calibration_layer can only be used for a single ' 'signal, so uncalibrated shape must be of form (batchsize), got %s' % uncalibrated_tensor.get_shape()) if missing_output_value is not None and missing_input_value is None: raise ValueError( 'missing_output_value can only be set if a misisng_input_value is ' 'also set, missing_input_value=None, missing_output_values=%s' % missing_output_value) # Create variables: only uses initializer if they are given. kp_in_name = signal_name + '_keypoints_inputs' kp_out_name = signal_name + '_keypoints_outputs' missing_out_calibrated_name = signal_name + '_calibrated_missing_output' if keypoints_initializers is not None: kp_in, kp_out = keypoints_initializers[0], keypoints_initializers[ 1] if (uncalibrated_tensor.dtype != kp_in.dtype or uncalibrated_tensor.dtype != kp_out.dtype): raise ValueError( 'incompatible types for signal \'%s\': uncalibrated=%s, ' 'keypoints_initializers[input=%s, output=%s]' % (signal_name, uncalibrated_tensor.dtype, kp_in.dtype, kp_out.dtype)) tools.assert_shape(kp_in, [num_keypoints], 'keypoints_initializers[input]') tools.assert_shape(kp_out, [num_keypoints], 'keypoints_initializers[output]') keypoints_inputs = variable_scope.get_variable(kp_in_name, initializer=kp_in) keypoints_outputs = variable_scope.get_variable(kp_out_name, initializer=kp_out) if missing_input_value is not None: # Value to be taken by missing features. if missing_output_value is not None: missing_out_calibrated = constant_op.constant( missing_output_value, dtype=uncalibrated_tensor.dtype) else: # Learned missing value, initialized by the first value of kp_out. missing_out_calibrated = variable_scope.get_variable( missing_out_calibrated_name, initializer=kp_out[0]) elif keypoints_initializer_fns is not None: kp_in, kp_out = keypoints_initializer_fns[ 0], keypoints_initializer_fns[1] keypoints_inputs = variable_scope.get_variable( kp_in_name, shape=[num_keypoints], initializer=kp_in) keypoints_outputs = variable_scope.get_variable( kp_out_name, shape=[num_keypoints], initializer=kp_out) if missing_input_value is not None: # Value to be taken by missing features. if missing_output_value is not None: missing_out_calibrated = constant_op.constant( missing_output_value, dtype=uncalibrated_tensor.dtype) else: # Learned missing value, initialized by the first value of kp_out. def first_kp_out(*args, **kwargs): return kp_out(*args, **kwargs)[0] missing_out_calibrated = variable_scope.get_variable( missing_out_calibrated_name, shape=[], initializer=first_kp_out) else: # When loading a model, no initializer. keypoints_inputs = variable_scope.get_variable( kp_in_name, shape=[num_keypoints], dtype=uncalibrated_tensor.dtype) keypoints_outputs = variable_scope.get_variable( kp_out_name, shape=[num_keypoints], dtype=uncalibrated_tensor.dtype) if missing_input_value: if missing_output_value: missing_out_calibrated = constant_op.constant( missing_output_value, dtype=uncalibrated_tensor.dtype) else: missing_out_calibrated = variable_scope.get_variable( missing_out_calibrated_name, shape=[], dtype=uncalibrated_tensor.dtype) # Split missing values from normal values. # FutureWork: move handling of missing values be moved to C++ land. if missing_input_value is not None: missing_mask = math_ops.equal( uncalibrated_tensor, constant_op.constant(missing_input_value)) mask_indices = math_ops.range( array_ops.shape(uncalibrated_tensor)[0]) mask_indices = data_flow_ops.dynamic_partition( mask_indices, math_ops.cast(missing_mask, dtypes.int32), 2) (uncalibrated_tensor, missing_values) = data_flow_ops.dynamic_partition( uncalibrated_tensor, math_ops.cast(missing_mask, dtypes.int32), 2) # Assign value to missing_values. missing_values = array_ops.ones_like(missing_values) missing_values *= missing_out_calibrated # Dense implementation. interpolation = pwl_calibration_ops.pwl_indexing_calibrator( uncalibrated_tensor, keypoints_inputs) calibrated = math_ops.reduce_sum(interpolation * keypoints_outputs, 1) projection_ops = None # Re-join missing values. if missing_input_value is not None: calibrated = data_flow_ops.dynamic_stitch( mask_indices, [calibrated, missing_values]) # Boundness. projected_keypoints_outputs = None if bound: bound_min_name = signal_name + '_bound_min' bound_max_name = signal_name + '_bound_max' # Set bound_min/max from min/max values initialized. if keypoints_initializers is not None: # Store bound_min and bound_max in variables because their values (from # kp_out) are only available during train (when keypoints_initializers # is available). During inference the value is not available. Storing # them in variables make them available during inference. bound_min = variable_scope.get_variable( bound_min_name, dtype=uncalibrated_tensor.dtype, initializer=math_ops.reduce_min(kp_out)) bound_max = variable_scope.get_variable( bound_max_name, dtype=uncalibrated_tensor.dtype, initializer=math_ops.reduce_max(kp_out)) elif keypoints_initializer_fns is not None: # Store bound_min and bound_max in variables because their values (from # kp_out) are only available during train (when keypoints_initializers # is available). During inference the value is not available. Storing # them in variables make them available during inference. def min_kp_out(*args, **kwargs): return math_ops.reduce_min(kp_out(*args, **kwargs)) def max_kp_out(*args, **kwargs): return math_ops.reduce_max(kp_out(*args, **kwargs)) bound_min = variable_scope.get_variable( bound_min_name, dtype=uncalibrated_tensor.dtype, shape=[], initializer=min_kp_out) bound_max = variable_scope.get_variable( bound_max_name, dtype=uncalibrated_tensor.dtype, shape=[], initializer=max_kp_out) else: # No need to initialize, since presumably their values will be read # from some checkpoint. bound_min = variable_scope.get_variable( bound_min_name, dtype=uncalibrated_tensor.dtype, shape=[]) bound_max = variable_scope.get_variable( bound_max_name, dtype=uncalibrated_tensor.dtype, shape=[]) projected_keypoints_outputs = math_ops.minimum( math_ops.maximum(keypoints_outputs, bound_min), bound_max) # Monotonicity. if monotonic: # First a soft-enforcement: might not break indirect constraints. if projected_keypoints_outputs is None: projected_keypoints_outputs = keypoints_outputs projected_keypoints_outputs = pwl_calibration_ops.monotonic_projection( increasing=bool(monotonic > 0), values=projected_keypoints_outputs, name='project_calibration_to_monotonic') # Make assing_add op to projected output. if projected_keypoints_outputs is not None: constrained_diff = projected_keypoints_outputs - keypoints_outputs projection_ops = state_ops.assign_add(keypoints_outputs, constrained_diff, use_locking=None, name='project_feasible') if (bound and missing_input_value is not None and missing_output_value is None): # Include op bounding calibrated missing value. projected_missing_out_calibrated = math_ops.minimum( math_ops.maximum(missing_out_calibrated, bound_min), bound_max) projected_missing_out_calibrated_diff = ( projected_missing_out_calibrated - missing_out_calibrated) projected_missing_out_calibrated_op = state_ops.assign_add( missing_out_calibrated, projected_missing_out_calibrated_diff, use_locking=None, name='project_missing_calibration_to_bounds') projection_ops = control_flow_ops.group( projection_ops, projected_missing_out_calibrated_op) # Regularization regularization = regularizers.calibrator_regularization( keypoints_outputs, name=signal_name + '_calibrator_regularization', **regularizer_amounts) return calibrated, projection_ops, regularization
def testUnknownShapeTensorExpectsError(self): """Pass rank-1 tensor with unknown shape and check the error.""" output_keypoints_tensor = array_ops.placeholder( dtype=dtypes.float32, shape=[None]) with self.assertRaises(ValueError): regularizers.calibrator_regularization(output_keypoints_tensor)
def testRank2TensorExpectsError(self): """Pass rank-2 tensor output keypoints and check the error.""" output_keypoints_tensor = array_ops.placeholder( dtype=dtypes.float32, shape=[10, 10]) with self.assertRaises(ValueError): regularizers.calibrator_regularization(output_keypoints_tensor)
def testTwoKeypointsExpectsNone(self): """Pass a tensor with one keypoints and check None regularizer.""" output_keypoints_tensor = tf.compat.v1.placeholder( dtype=tf.float32, shape=[2]) self.assertEqual( regularizers.calibrator_regularization(output_keypoints_tensor), None)