def __init__(self, kwargs): Layer.__init__(self, kwargs) self.name = ModelAssign(kwargs, 'name', None) self.input = ModelAssign(kwargs, 'input', None) self.filters = ModelAssign(kwargs, 'filters', 32) self.kernel_size = ModelAssign(kwargs, 'kernel_size', 3) self.strides = ModelAssign(kwargs, 'strides', 1) self.padding = ModelAssign(kwargs, 'padding', 'SAME') self.hyper = ModelAssign(kwargs, 'hyper', False) self.batchnorm = ModelAssign(kwargs, 'batchnorm', False) self.activation = ModelAssign(kwargs, 'activation', 'relu') self.use_bias = ModelAssign(kwargs, 'use_bias', True) self.num_trainable_parameters = 0 self.num_non_trainable_parameters = 0 self.shared_trainable_parameters = 0 self.skip_from_names = None self.MACs = 0 self.peak_activation_mem = 0 self.dropout = ModelAssign(kwargs, 'dropout', 0.0) self.dropout_tensor = None # HyperNetwork parameters if not self.hyper: self.zdims = None self.layer_info = None self.basic_block_size = None self.hidden = None else: raise NotImplementedError self.input_shape = None self.output_shape = None self.output_tensor = None
def __init__(self, kwargs): """ Accuracy. :param kwargs: """ Layer.__init__(self, kwargs) self.name = ModelAssign(kwargs, 'name', None) self.logits = ModelAssign(kwargs, 'logits', None) self.labels = ModelAssign(kwargs, 'labels', None) self.output_tensor = None
def __init__(self, kwargs): Layer.__init__(self, kwargs) self.name = ModelAssign(kwargs, 'name', None) self.input_shape = None self.output_shape = None self.output_tensor = None self.num_trainable_parameters = 0 self.num_non_trainable_parameters = 0 self.shared_trainable_parameters = 0 self.MACs = 0
def __init__(self, kwargs): """ Top-k accuracy for large-scale image classification. (ImageNet) :param kwargs: Configurations """ Layer.__init__(self, kwargs) self.name = ModelAssign(kwargs, 'name', None) self.logits = ModelAssign(kwargs, 'logits', None) self.labels = ModelAssign(kwargs, 'labels', None) self.k = ModelAssign(kwargs, 'k', 5) self.output_tensor = None
def __init__(self, kwargs): Layer.__init__(self, kwargs) self.name = ModelAssign(kwargs, 'name', None) self.input_shape = Str2List(ModelAssign(kwargs, 'input_shape', None)) self.dtype = ModelAssign(kwargs, 'dtype', 'float32') self.output_shape = None self.output_tensor = None self.mean = ModelAssign(kwargs, 'mean', None) self.std = ModelAssign(kwargs, 'std', None) self.num_trainable_parameters = 0 self.num_non_trainable_parameters = 0 self.shared_trainable_parameters = 0 self.MACs = 0
def __init__(self, kwargs): Layer.__init__(self, kwargs) self.name = ModelAssign(kwargs, 'name', None) self.axis = ModelAssign(kwargs, 'axis', -1) self.activation = ModelAssign(kwargs, 'activation', None) self.input_shape = None self.output_shape = None self.output_tensor = None self.num_trainable_parameters = 0 self.num_non_trainable_parameters = 0 self.shared_trainable_parameters = 0 self.MACs = 0 self.peak_activation_mem = 0
def __init__(self, kwargs): """ Concatenation. Concatenate two tensors if possible. :param kwargs: configurations for concatenation layers. """ Layer.__init__(self, kwargs) self.name = ModelAssign(kwargs, 'name', None) self.activation = ModelAssign(kwargs, 'activation', None) self.input_shape = None self.output_shape = None self.output_tensor = None self.num_trainable_parameters = 0 self.num_non_trainable_parameters = 0 self.shared_trainable_parameters = 0 self.mem_cost = 0 self.MACs = 0 self.peak_activation_mem = 0
def __init__(self, kwargs): Layer.__init__(self, kwargs) self.name = ModelAssign(kwargs, 'name', None) self.input = ModelAssign(kwargs, 'input', None) self.kernel_size = ModelAssign(kwargs, 'kernel_size', 3) self.strides = ModelAssign(kwargs, 'strides', 1) self.padding = ModelAssign(kwargs, 'padding', 'SAME') self.hyper = ModelAssign(kwargs, 'hyper', False) self.batchnorm = ModelAssign(kwargs, 'batchnorm', False) self.activation = ModelAssign(kwargs, 'activation', 'relu') self.use_bias = ModelAssign(kwargs, 'use_bias', True) self.depthwise_multiplier = ModelAssign(kwargs, 'depthwise_multiplier', 1) self.num_trainable_parameters = 0 self.num_non_trainable_parameters = 0 self.shared_trainable_parameters = 0 self.mem_cost = 0 self.skip_from_names = None self.MACs = 0 self.peak_activation_mem = 0 # HyperNetwork parameters if not self.hyper: self.zdims = None self.layer_info = None self.basic_block_size = None self.hidden = None else: self.zdims = ModelAssign(kwargs, 'hyper_zdims', 4) self.layer_info = tf.placeholder(dtype=tf.float32, shape=[1, self.zdims], name=self.name + 'layer_info') self.basic_block_size = Str2List( ModelAssign(kwargs, 'hyper_basic_block_size', None)) self.hidden = ModelAssign(kwargs, 'hyper_hidden', 16) self.input_shape = None self.output_shape = None self.output_tensor = None
def __init__(self, kwargs): Layer.__init__(self, kwargs) self.name = ModelAssign(kwargs, 'name', None) self.input = ModelAssign(kwargs, 'input', None) self.units = ModelAssign(kwargs, 'units', 10) self.use_bias = ModelAssign(kwargs, 'use_bias', True) self.batchnorm = ModelAssign(kwargs, 'batchnorm', False) self.trainable = ModelAssign(kwargs, 'trainable', True) self.activation = ModelAssign(kwargs, 'activation', 'relu') self.dropout = ModelAssign(kwargs, 'dropout', 0.0) self.skip_from_names = None self.input_shape = None self.output_shape = None self.output_tensor = None self.dropout_tensor = None # Params self.num_trainable_parameters = 0 self.num_non_trainable_parameters = 0 self.shared_trainable_parameters = 0 self.MACs = 0 self.mem_cost = 0 self.peak_activation_mem = 0
def __init__(self, kwargs): Layer.__init__(self, kwargs) self.name = ModelAssign(kwargs, 'name', None) self.output_tensor = None
def __init__(self, kwargs): Layer.__init__(self, kwargs) pass