def compress(cls, old_layer: Conv2D, acceptable_error: float) -> Optional[FactorizedConv2DTucker]: '''Compress layer's kernel 4D tensor using tucker decomposition under acceptable_error. If it can't reduce the number of parameters, returns `None`. :param old_layer: :param acceptable_error: :return: ''' W = K.get_value(old_layer.kernel) searcher = TuckerParamSearcher(W) C, U_in, U_out = searcher.factorize_in_acceptable_error( acceptable_error) kernel = C if U_in is None and U_out is None: # compression failed return None if U_in is None: input_components = None pre_kernel = None else: input_components = U_in.shape[1] pre_kernel = U_in[np.newaxis, np.newaxis, :, :] if U_out is None: output_components = None post_kernel = None else: output_components = U_out.shape[1] post_kernel = U_out.T[np.newaxis, np.newaxis, :, :] base_config = old_layer.get_config() new_config = convert_config(base_config, ignore_args=[ 'kernel_constraint', ], converts={ 'kernel_regularizer': [ 'pre_kernel_regularizer', 'kernel_regularizer', 'post_kernel_regularizer', ], 'kernel_initializer': [ 'pre_kernel_initializer', 'kernel_initializer', 'post_kernel_initializer', ], }, new_kwargs={ 'input_components': input_components, 'output_components': output_components, }) new_layer = FactorizedConv2DTucker(**new_config) new_layer.build( old_layer.get_input_shape_at(0)) # to initialize weight variables K.set_value(new_layer.kernel, kernel) if pre_kernel is not None: K.set_value(new_layer.pre_kernel, pre_kernel) if post_kernel is not None: K.set_value(new_layer.post_kernel, post_kernel) return new_layer
def get_config(self): config_dc = DropConnect.get_config(self) config_base = Conv2D.get_config(self) return dict(list(config_dc.items()) + list(config_base.items()))