def _get_base_op_hander_dicts(): return collections.defaultdict( grouping_op_handler.GroupingOpHandler, { 'ConcatV2': concat_op_handler.ConcatOpHandler(), 'DepthToSpace': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), 'DepthwiseConv2dNative': depthwise_convolution_op_handler.DepthwiseConvolutionOpHandler(), 'ExpandDims': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), 'RandomUniform': leaf_op_handler.LeafOpHandler(), 'Reshape': leaf_op_handler.LeafOpHandler(), 'Shape': leaf_op_handler.LeafOpHandler(), 'SpaceToDepth': leaf_op_handler.LeafOpHandler(), 'StridedSlice': leaf_op_handler.LeafOpHandler(), 'TensorArrayGatherV3': leaf_op_handler.LeafOpHandler(), 'Transpose': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), })
def _get_base_op_hander_dicts(): """Returns the base op_hander_dict for all regularizers.""" base_dict = collections.defaultdict( grouping_op_handler.GroupingOpHandler, { 'ConcatV2': concat_op_handler.ConcatOpHandler(), 'DepthToSpace': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), 'DepthwiseConv2dNative': depthwise_convolution_op_handler.DepthwiseConvolutionOpHandler(), 'ExpandDims': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), 'RandomUniform': leaf_op_handler.LeafOpHandler(), 'Reshape': leaf_op_handler.LeafOpHandler(), 'Shape': leaf_op_handler.LeafOpHandler(), 'SpaceToDepth': leaf_op_handler.LeafOpHandler(), 'StridedSlice': leaf_op_handler.LeafOpHandler(), 'TensorArrayGatherV3': leaf_op_handler.LeafOpHandler(), 'Transpose': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), }) for resize_method in RESIZE_OP_NAMES: # Resize* ops, second input might be a tensor which will result in an error. base_dict[resize_method] = grouping_op_handler.GroupingOpHandler([0]) return base_dict
def get_gamma_op_handler_dict(): """Returns the base op_hander_dict for gamma based regularizers.""" op_handler_dict = _get_base_op_hander_dicts() op_handler_dict.update({ 'Conv2D': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), 'MatMul': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), 'Conv2DBackpropInput': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), }) return op_handler_dict
def testImageIsNotZerothOutputOfOp(self): # Throughout the framework, we assume that the 0th output of each op is the # only one of interest. One exception that often happens is when the input # image comes from a queue or from a staging op. Then the image is one of # the outputs of the dequeue (or staging) op, not necessarily the 0th one. # Here we test that the BilinearNetworkRegularizer deals correctly with this # case. # Create an input op where the image is output number 1, not 0. # TODO(g1) Move this mechanism to add_concat_model_stub, possibly using # tf.split to produce an op where the image is not the 0th output image # (instead of FIFOQueue). image = add_concat_model_stub.image_stub() non_image_tensor = tf.zeros(shape=(41,)) queue = tf.FIFOQueue( capacity=1, dtypes=(tf.float32,) * 2, shapes=(non_image_tensor.shape, image.shape)) # Pass the image (output[1]) to the network. with arg_scope(self._batch_norm_scope()): output_op = add_concat_model_stub.build_model(queue.dequeue()[1]) # Create OpHandler dict for test. op_handler_dict = collections.defaultdict( grouping_op_handler.GroupingOpHandler) op_handler_dict.update({ 'FusedBatchNorm': batch_norm_source_op_handler.BatchNormSourceOpHandler(0.1), 'Conv2D': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), 'ConcatV2': concat_op_handler.ConcatOpHandler(), }) # Create OpRegularizerManager and NetworkRegularizer for test. manager = orm.OpRegularizerManager([output_op], op_handler_dict) calculator = cost_calculator.CostCalculator( manager, resource_function.flop_function) # Calculate expected FLOP cost. expected_alive_conv1 = sum(add_concat_model_stub.expected_alive()['conv1']) conv1_op = tf.get_default_graph().get_operation_by_name('conv1/Conv2D') conv1_coeff = resource_function.flop_coeff(conv1_op) num_channels = 3 expected_cost = conv1_coeff * num_channels * expected_alive_conv1 with self.session(): tf.global_variables_initializer().run() # Set gamma values to replicate aliveness in add_concat_model_stub. name_to_var = {v.op.name: v for v in tf.global_variables()} gamma1 = name_to_var['conv1/BatchNorm/gamma'] gamma1.assign([0, 1, 1, 0, 1, 0, 1]).eval() gamma4 = name_to_var['conv4/BatchNorm/gamma'] gamma4.assign([0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0]).eval() queue.enqueue((non_image_tensor, image)).run() self.assertEqual(expected_cost, calculator.get_cost([conv1_op]).eval())
def is_passthrough(op): if op in [self.conv1_op, self.conv2_op]: h = output_non_passthrough_op_handler.OutputNonPassthroughOpHandler() return h.is_passthrough if op == self.batch_norm_op: return True else: return False
def testAssignGrouping_GroupWithOutputOnly(self): # Map ops to slices. self.op_slice_dict = { self.conv1_op: [self.conv1_op_slice], self.relu1_op: [self.relu1_op_slice], self.conv2_op: [self.conv2_op_slice], self.relu2_op: [self.relu2_op_slice], self.batch_norm_op: [self.batch_norm_op_slice], } # Map each slice to a group. Corresponding op slices have the same group. self.op_group_dict = { self.batch_norm_op_slice: self.batch_norm_op_group, } # Call handler to assign grouping. handler = output_non_passthrough_op_handler.OutputNonPassthroughOpHandler( ) handler.assign_grouping(self.conv2_op, self.mock_op_reg_manager) # Verify manager looks up OpSlice for ops of interest. self.mock_op_reg_manager.get_op_slices.assert_has_calls( # Checking for ops to process. [ mock.call(self.relu1_op), mock.call(self.batch_norm_op), # Align. mock.call(self.conv2_op), mock.call(self.batch_norm_op), # Slice. mock.call(self.conv2_op), mock.call(self.batch_norm_op), # Update after align. mock.call(self.batch_norm_op), # Grouping. mock.call(self.conv2_op) ]) # Verify manager does not slice any ops. self.mock_op_reg_manager.slice_op.assert_not_called() # Verify manager adds inputs to process queue. self.mock_op_reg_manager.process_ops.assert_called_once_with( [self.relu1_op]) # Verify manager groups c2 with bn. self.mock_op_reg_manager.group_op_slices.assert_called_once_with( [self.conv2_op_slice, self.batch_norm_op_slice])
def __init__(self, ops, gamma_threshold, regularizer_decorator: Type[ generic_regularizers.OpRegularizer] = None, decorator_parameters=None, force_group=None, regularizer_blacklist=None): """Creates a GammaFlopsRegularizer object. Args: ops: A list of tf.Operation. An OpRegularizer will be created for all the ops in `ops`, and recursively for all ops they depend on via data dependency. Typically `ops` would contain a single tf.Operation, which is the output of the network. gamma_threshold: A float scalar, will be used as a 'gamma_threshold' for all instances GammaL1Regularizer created by this class. regularizer_decorator: A class of OpRegularizer decorator to use. decorator_parameters: A dictionary of parameters to pass to the decorator factory. To be used only with decorators that requires parameters, otherwise use None. force_group: List of regex for ops that should be force-grouped. Each regex corresponds to a separate group. Use '|' operator to specify multiple patterns in a single regex. See op_regularizer_manager for more detail. regularizer_blacklist: List of regex for ops that should not be regularized. See op_regularizer_manager for more detail. """ source_op_handler = batch_norm_source_op_handler.BatchNormSourceOpHandler( gamma_threshold) if regularizer_decorator: source_op_handler = op_handler_decorator.OpHandlerDecorator( source_op_handler, regularizer_decorator, decorator_parameters) op_handler_dict = collections.defaultdict( grouping_op_handler.GroupingOpHandler) op_handler_dict.update({ 'FusedBatchNorm': source_op_handler, 'FusedBatchNormV2': source_op_handler, 'Conv2D': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), 'ConcatV2': concat_op_handler.ConcatOpHandler(), 'DepthToSpace': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), 'DepthwiseConv2dNative': depthwise_convolution_op_handler.DepthwiseConvolutionOpHandler(), 'MatMul': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), 'TensorArrayGatherV3': leaf_op_handler.LeafOpHandler(), 'RandomUniform': leaf_op_handler.LeafOpHandler(), 'Reshape': leaf_op_handler.LeafOpHandler(), 'Transpose': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), 'ExpandDims': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), }) self._manager = orm.OpRegularizerManager( ops, op_handler_dict, force_group=force_group, regularizer_blacklist=regularizer_blacklist) self._calculator = cost_calculator.CostCalculator( self._manager, resource_function.flop_function)
def __init__(self, ops, threshold, l1_fraction=0, regularizer_decorator: Type[ generic_regularizers.OpRegularizer] = None, decorator_parameters=None, force_group=None, regularizer_blacklist=None, convert_to_variable=True): """Creates a GroupLassoFlopsRegularizer object. Args: ops: A list of tf.Operation. An OpRegularizer will be created for all the ops in `ops`, and recursively for all ops they depend on via data dependency. Typically `ops` would contain a single tf.Operation, which is the output of the network. threshold: A float scalar, will be used as a 'threshold' for all regularizer instances created by this class. l1_fraction: Relative weight of L1 in L1 + L2 regularization. regularizer_decorator: A class of OpRegularizer decorator to use. decorator_parameters: A dictionary of parameters to pass to the decorator factory. To be used only with decorators that requires parameters, otherwise use None. force_group: List of regex for ops that should be force-grouped. Each regex corresponds to a separate group. Use '|' operator to specify multiple patterns in a single regex. See op_regularizer_manager for more detail. regularizer_blacklist: List of regex for ops that should not be regularized. See op_regularizer_manager for more detail. convert_to_variable: If `True` convert to variable in the `GroupLassoBaseOpHandler`. If you're graph creates variables outside of `tf.get_variable`, set to `False`. """ conv2d_handler = conv2d_source_op_handler.Conv2DSourceOpHandler( threshold, l1_fraction, convert_to_variable) conv2d_transpose_handler = ( conv2d_transpose_source_op_handler.Conv2DTransposeSourceOpHandler( threshold, l1_fraction, convert_to_variable)) matmul_handler = matmul_source_op_handler.MatMulSourceOpHandler( threshold, l1_fraction, convert_to_variable) if regularizer_decorator: conv2d_handler = op_handler_decorator.OpHandlerDecorator( conv2d_handler, regularizer_decorator, decorator_parameters) conv2d_transpose_handler = op_handler_decorator.OpHandlerDecorator( conv2d_transpose_handler, regularizer_decorator, decorator_parameters) matmul_handler = op_handler_decorator.OpHandlerDecorator( matmul_handler, regularizer_decorator, decorator_parameters) op_handler_dict = collections.defaultdict( grouping_op_handler.GroupingOpHandler) op_handler_dict.update({ 'Conv2D': conv2d_handler, 'Conv2DBackpropInput': conv2d_transpose_handler, 'ConcatV2': concat_op_handler.ConcatOpHandler(), 'DepthToSpace': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), 'DepthwiseConv2dNative': depthwise_convolution_op_handler.DepthwiseConvolutionOpHandler(), 'MatMul': matmul_handler, 'RandomUniform': leaf_op_handler.LeafOpHandler(), 'Reshape': leaf_op_handler.LeafOpHandler(), 'Shape': leaf_op_handler.LeafOpHandler(), 'TensorArrayGatherV3': leaf_op_handler.LeafOpHandler(), 'Transpose': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), 'StridedSlice': leaf_op_handler.LeafOpHandler(), }) self._manager = orm.OpRegularizerManager( ops, op_handler_dict, force_group=force_group, regularizer_blacklist=regularizer_blacklist) self._calculator = cost_calculator.CostCalculator( self._manager, resource_function.flop_function)
def __init__(self, ops, gamma_threshold, hardware, batch_size=1, regularizer_decorator: Type[ generic_regularizers.OpRegularizer] = None, decorator_parameters=None, force_group=None, regularizer_blacklist=None) -> None: """Creates a GammaLatencyRegularizer object. Latency cost and regularization loss is calculated for a specified hardware platform. Args: ops: A list of tf.Operation. An OpRegularizer will be created for all the ops in `ops`, and recursively for all ops they depend on via data dependency. Typically `ops` would contain a single tf.Operation, which is the output of the network. gamma_threshold: A float scalar, will be used as a 'gamma_threshold' for all instances GammaL1Regularizer created by this class. hardware: String name of hardware platform to target. Must be a key from resource_function.PEAK_COMPUTE. batch_size: Integer batch size to calculate cost/loss for. regularizer_decorator: A string, the name of the regularizer decorators to use. Supported decorators are listed in op_regularizer_decorator.SUPPORTED_DECORATORS. decorator_parameters: A dictionary of parameters to pass to the decorator factory. To be used only with decorators that requires parameters, otherwise use None. force_group: List of regex for ops that should be force-grouped. Each regex corresponds to a separate group. Use '|' operator to specify multiple patterns in a single regex. See op_regularizer_manager for more detail. regularizer_blacklist: List of regex for ops that should not be regularized. See op_regularizer_manager for more detail. """ source_op_handler = batch_norm_source_op_handler.BatchNormSourceOpHandler( gamma_threshold) if regularizer_decorator: source_op_handler = op_handler_decorator.OpHandlerDecorator( source_op_handler, regularizer_decorator, decorator_parameters) op_handler_dict = collections.defaultdict( grouping_op_handler.GroupingOpHandler) op_handler_dict.update({ 'FusedBatchNorm': source_op_handler, 'FusedBatchNormV2': source_op_handler, 'Conv2D': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), 'ConcatV2': concat_op_handler.ConcatOpHandler(), 'DepthToSpace': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), 'DepthwiseConv2dNative': depthwise_convolution_op_handler.DepthwiseConvolutionOpHandler(), 'MatMul': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), 'TensorArrayGatherV3': leaf_op_handler.LeafOpHandler(), 'Transpose': output_non_passthrough_op_handler.OutputNonPassthroughOpHandler(), }) self._manager = orm.OpRegularizerManager( ops, op_handler_dict, force_group=force_group, regularizer_blacklist=regularizer_blacklist) self._calculator = cost_calculator.CostCalculator( self._manager, resource_function.latency_function_factory(hardware, batch_size)) self._hardware = hardware