def build(self, _input, net, store_output_op=False): assert (isinstance(net, BasicModel)) output = _input if not self.ready: return output with tf.variable_scope(self._id): self._scope = tf.get_variable_scope().name param_initializer = self.param_initializer if self.pre_activation: # batch normalization if self.use_bn: output = BasicModel.batch_norm( output, net.is_training, net.net_config.bn_epsilon, net.net_config.bn_decay, param_initializer=param_initializer) # activation output = BasicModel.activation(output, self.activation) # Pooling if self._type == 'avg': output = BasicModel.avg_pool(output, k=self.kernel_size, s=self.strides) elif self._type == 'max': output = BasicModel.max_pool(output, k=self.kernel_size, s=self.strides) else: raise ValueError('Do not support the pooling type: %s' % self._type) else: # Pooling if self._type == 'avg': output = BasicModel.avg_pool(output, k=self.kernel_size, s=self.strides) elif self._type == 'max': output = BasicModel.max_pool(output, k=self.kernel_size, s=self.strides) else: raise ValueError('Do not support the pooling type: %s' % self._type) # batch normalization if self.use_bn: output = BasicModel.batch_norm( output, net.is_training, net.net_config.bn_epsilon, net.net_config.bn_decay, param_initializer=param_initializer) # activation output = BasicModel.activation(output, self.activation) # dropout output = BasicModel.dropout(output, self.keep_prob, net.is_training) if store_output_op: self.output_op = output return output
def build(self, _input, net, store_output_op=False): assert (isinstance(net, BasicModel)) output = _input if not self.ready: return output with tf.variable_scope(self._id): self._scope = tf.get_variable_scope().name param_initializer = self.param_initializer # flatten if not output = BasicModel.flatten(output) if self.pre_activation: # batch normalization if self.use_bn: output = BasicModel.batch_norm( output, net.is_training, net.net_config.bn_epsilon, net.net_config.bn_decay, param_initializer=param_initializer) # activation output = BasicModel.activation(output, self.activation) # FC output = BasicModel.fc_layer( output, self.units, self.use_bias, param_initializer=param_initializer) else: # FC output = BasicModel.fc_layer( output, self.units, self.use_bias, param_initializer=param_initializer) # batch normalization if self.use_bn: output = BasicModel.batch_norm( output, net.is_training, net.net_config.bn_epsilon, net.net_config.bn_decay, param_initializer=param_initializer) # activation output = BasicModel.activation(output, self.activation) # dropout output = BasicModel.dropout(output, self.keep_prob, net.is_training) if store_output_op: self.output_op = output return output
def build(self, _input, net, store_output_op=False): assert (isinstance(net, BasicModel)) output = _input if not self.ready: return output with tf.variable_scope(self._id): self._scope = tf.get_variable_scope().name param_initializer = self.param_initializer if self.pre_activation: # batch normalization if self.use_bn: output = BasicModel.batch_norm( output, net.is_training, net.net_config.bn_epsilon, net.net_config.bn_decay, param_initializer=param_initializer) # activation output = BasicModel.activation(output, self.activation) # convolutional output = BasicModel.conv2d(output, self.filter_num, self.kernel_size, self.strides, param_initializer=param_initializer) else: # convolutional output = BasicModel.conv2d(output, self.filter_num, self.kernel_size, self.strides, param_initializer=param_initializer) # batch normalization if self.use_bn: output = BasicModel.batch_norm( output, net.is_training, net.net_config.bn_epsilon, net.net_config.bn_decay, param_initializer=param_initializer) # activation output = BasicModel.activation(output, self.activation) # dropout output = BasicModel.dropout(output, self.keep_prob, net.is_training) if store_output_op: self.output_op = output return output