def __init__(self, inputs=None, depth=None, on_value=None, off_value=None, axis=None, dtype=None, name='input'): super(OneHotInputLayer, self).__init__(prev_layer=inputs, name=name) logging.info("OneHotInputLayer %s: %s" % (self.name, inputs.get_shape())) if depth is None: logging.error( " [*] depth == None the number of output units is undefined") self.outputs = tf.one_hot(inputs, depth, on_value=on_value, off_value=off_value, axis=axis, dtype=dtype) self._add_layers(self.outputs)
def _argument_dict_checkup(self, args): if not isinstance(args, dict) and args is not None: err_msg = "One of the argument given to %s should be formatted as a dictionnary" % self.__class__.__name__ logging.error(err_msg) raise AssertionError(err_msg) return args if args is not None else {}
def _PS(self, X, r, n_out_channels): _err_log = "SubpixelConv2d: The number of input channels == (scale x scale) x The number of output channels" if n_out_channels >= 1: if int(X.get_shape()[-1]) != (r**2) * n_out_channels: raise Exception(_err_log) # bsize, a, b, c = X.get_shape().as_list() # bsize = tf.shape(X)[0] # Handling Dimension(None) type for undefined batch dim # Xs=tf.split(X,r,3) #b*h*w*r*r # Xr=tf.concat(Xs,2) #b*h*(r*w)*r # X=tf.reshape(Xr,(bsize,r*a,r*b,n_out_channel)) # b*(r*h)*(r*w)*c X = tf.depth_to_space(X, r) else: logging.error(_err_log) return X
def __init__( self, prev_layer, slim_layer, slim_args=None, name='tfslim_layer', ): if slim_layer is None: raise ValueError("slim layer is None") super(SlimNetsLayer, self).__init__(prev_layer=prev_layer, slim_args=slim_args, name=name) logging.info("SlimNetsLayer %s: %s" % (self.name, slim_layer.__name__)) # with tf.variable_scope(name) as vs: # net, end_points = slim_layer(self.inputs, **slim_args) # slim_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) with tf.variable_scope(name): self.outputs, end_points = slim_layer(self.inputs, **self.slim_args) slim_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=self.name) if slim_variables == []: logging.error( "No variables found under %s : the name of SlimNetsLayer should be matched with the begining of the ckpt file, see tutorial_inceptionV3_tfslim.py for more details" % self.name) slim_layers = [] for v in end_points.values(): # tf.contrib.layers.summaries.summarize_activation(v) slim_layers.append(v) self._add_layers(slim_layers) self._add_params(slim_variables)
def __init__(self, prev_layer, act=None, name=None, *args, **kwargs): self.inputs = None self.outputs = None self.all_layers = list() self.all_params = list() self.all_drop = dict() if name is None: raise ValueError('Layer must have a name.') for key in kwargs.keys(): setattr(self, key, self._argument_dict_checkup(kwargs[key])) self.act = act if act not in [None, tf.identity] else None scope_name = tf.get_variable_scope().name self.name = scope_name + '/' + name if scope_name else name if isinstance(prev_layer, Layer): # 1. for normal layer have only 1 input i.e. DenseLayer # Hint : list(), dict() is pass by value (shallow), without them, # it is pass by reference. self.inputs = prev_layer.outputs self._add_layers(prev_layer.all_layers) self._add_params(prev_layer.all_params) self._add_dropout_layers(prev_layer.all_drop) elif isinstance(prev_layer, list): # 2. for layer have multiply inputs i.e. ConcatLayer self.inputs = [layer.outputs for layer in prev_layer] self._add_layers(sum([l.all_layers for l in prev_layer], [])) self._add_params(sum([l.all_params for l in prev_layer], [])) self._add_dropout_layers( sum([list(l.all_drop.items()) for l in prev_layer], [])) elif isinstance(prev_layer, tf.Tensor) or isinstance( prev_layer, tf.Variable): # placeholders if self.__class__.__name__ not in [ 'InputLayer', 'OneHotInputLayer', 'Word2vecEmbeddingInputlayer', 'EmbeddingInputlayer', 'AverageEmbeddingInputlayer' ]: _err = "Please use `tl.layers.InputLayer` to convert Tensor/Placeholder to a TL layer" logging.error(_err) raise RuntimeError(_err) self.inputs = prev_layer elif prev_layer is not None: # 4. tl.models self._add_layers(prev_layer.all_layers) self._add_params(prev_layer.all_params) self._add_dropout_layers(prev_layer.all_drop) if hasattr(prev_layer, "outputs"): self.inputs = prev_layer.outputs
#! /usr/bin/python # -*- coding: utf-8 -*- from tensorlayer.layers.core import Layer from tensorlayer import tl_logging as logging from tensorlayer.decorators import deprecated_alias from tensorlayer.lazy_imports import LazyImport try: roi_pooling = LazyImport( "tensorlayer.third_party.roi_pooling.roi_pooling.roi_pooling_ops") except Exception as e: logging.error(e) logging.error( "HINT: 1. https://github.com/deepsense-ai/roi-pooling 2. tensorlayer/third_party/roi_pooling" ) __all__ = [ 'ROIPoolingLayer', ] class ROIPoolingLayer(Layer): """ The region of interest pooling layer. Parameters -----------