Ejemplo n.º 1
0
    def __init__(self,
                 lid,
                 par,
                 bacth_size,
                 num_lstm,
                 dim_frame,
                 l2_decay,
                 use_th=True,
                 activation='tanh',
                 inner_activation='sigmoid',
                 **kwargs):

        self.num_lstm = num_lstm
        self.dim_frame = dim_frame
        self.batch_size = bacth_size
        self.inner_activation = activations.get(inner_activation)
        self.activation = activations.get(activation)
        self.lstmpar = par
        self.l2_decay = l2_decay
        self.use_th = use_th
        self.bn = BatchNormalization(lid, self.batch_size, self.num_lstm)
        self.bn.build()
        self.id = lid
        kwargs['input_shape'] = (self.batch_size, self.num_lstm)
        super(LSTM_Dec, self).__init__(**kwargs)
Ejemplo n.º 2
0
    def __init__(self, *outshape, **kargs):
        #输出形状
        if len(outshape) == 1 and type(outshape[0]) == type(()):
            self.__outshape = outshape[0]
        else:
            self.__outshape = outshape

        #输入形状
        self.__inshape = None

        #得到激活函数
        self.__activation = activations.get('linear')

        #层在模型中的id, 是层在模型中的索引
        self.__id = 0
        #层的名字
        self.__name = '/%d-%s'%(self.__id, self.tag)

        #得到可选参数
        #print("Layer kargs:", kargs)
        if 'inshape' in kargs:
            self.__inshape = kargs['inshape']
            if type(self.__inshape) != type(()):
                self.__inshape = (self.__inshape,)
            #print("------inshape:", self.__inshape)

        if 'activation' in kargs:
            self.__activation = activations.get(kargs['activation'])


        if self.__inshape is not None:
            self.init_params()
Ejemplo n.º 3
0
 def __init__(self, lid, par, bacth_size, num_lstm, dim_frame, l2_decay, activation='tanh', inner_activation='sigmoid', **kwargs):
     self.num_lstm = num_lstm
     self.dim_frame = dim_frame
     self.has_input_frame = True
     self.batch_size = bacth_size
     self.inner_activation = activations.get(inner_activation)
     self.activation = activations.get(activation)
     self.bn = BatchNormalization(lid, self.batch_size, self.num_lstm)
     self.bn.build()
     self.lstmpar = par
     self.l2_decay = l2_decay
     self.id = lid
     kwargs['input_shape'] = (self.batch_size,self.num_lstm)
     super(LSTM_Unit, self).__init__(**kwargs)
Ejemplo n.º 4
0
    def __init__(self, in_dim, name=None, init='uniform', activation='linear'):

        if name is not None:
            self.set_name(name)
        self.name = name
        self.in_dim = in_dim
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.init_params()
Ejemplo n.º 5
0
    def __init__(self, activation='tanh'):
        self.__activation = activations.get(activation)

        self.__memories = []
        self.__forgets = []
        self.__inputs = []
        self.__mcs = []

        self.__pre_mem = None
        self.__pre_mem_grad = None
Ejemplo n.º 6
0
    def __init__(self, input_dim, output_dim=128, 
        init='uniform', inner_init='orthogonal', 
        activation='tanh', inner_activation='hard_sigmoid',
        truncate_gradient=-1, weights=None, return_sequences=False):

        self.input_dim = input_dim
        self.output_dim = output_dim
        self.truncate_gradient = truncate_gradient
        self.return_sequences = return_sequences

        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.input = T.matrix()

        self.W_i = self.init((self.input_dim, self.output_dim))
        self.U_i = self.inner_init((self.output_dim, self.output_dim))
        self.b_i = shared_zeros((self.output_dim))

        self.W_f = self.init((self.input_dim, self.output_dim))
        self.U_f = self.inner_init((self.output_dim, self.output_dim))
        self.b_f = shared_zeros((self.output_dim))

        self.W_c = self.init((self.input_dim, self.output_dim))
        self.U_c = self.inner_init((self.output_dim, self.output_dim))
        self.b_c = shared_zeros((self.output_dim))

        self.W_o = self.init((self.input_dim, self.output_dim))
        self.U_o = self.inner_init((self.output_dim, self.output_dim))
        self.b_o = shared_zeros((self.output_dim))

        self.params = [
            self.W_i, self.U_i, self.b_i,
            self.W_c, self.U_c, self.b_c,
            self.W_f, self.U_f, self.b_f,
            self.W_o, self.U_o, self.b_o,
        ]

        if weights is not None:
            self.set_weights(weights)
Ejemplo n.º 7
0
    def __init__(self, input_dim, output_dim, depth=3,
        init='uniform', inner_init='orthogonal', 
        activation='sigmoid', inner_activation='hard_sigmoid',
        weights=None, truncate_gradient=-1, return_sequences=False):
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.truncate_gradient = truncate_gradient
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.depth = depth
        self.return_sequences = return_sequences
        self.input = T.matrix()

        self.W = self.init((self.input_dim, self.output_dim))
        self.Us = [self.init((self.output_dim, self.output_dim)) for _ in range(self.depth)]
        self.b = shared_zeros((self.output_dim))
        self.params = [self.W] + self.Us + [self.b]

        if weights is not None:
            self.set_weights(weights)
Ejemplo n.º 8
0
    def __init__(self, input_dim, output_dim, init='uniform', activation='linear', weights=None):
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.input_dim = input_dim
        self.output_dim = output_dim

        self.input = T.matrix()
        self.W = self.init((self.input_dim, self.output_dim))
        self.b = shared_zeros((self.output_dim))

        self.params = [self.W, self.b]

        if weights is not None:
            self.set_weights(weights)
Ejemplo n.º 9
0
    def __init__(self,
                 in_dim,
                 out_dim,
                 name=None,
                 init='xavier',
                 activation='relu'):

        if name is not None:
            self.set_name(name)
        self.name = name
        self.in_dim = in_dim
        self.out_dim = out_dim
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.init_params()
Ejemplo n.º 10
0
	def __init__(self, input_n, output_n, init='glorot_uniform', activation='linear'):

		super(FullyConnected, self).__init__()
		
		self.input_n = input_n
		self.output_n = output_n
		self.init = initializations.get(init)
		self.activation = activations.get(activation)

		self.input = T.matrix()

		self.W = self.init((self.input_n, self.output_n))
		self.b = shared_zeros((self.output_n))

		self.params = [self.W, self.b]
Ejemplo n.º 11
0
    def __init__(self, activation='linear'):

        #得到激活函数
        self.__activation = activations.get(activation)

        #当前层子层的id seed
        self.__child_id_seed = 1

        #父层
        self.__parent = None

        #层在模型中的id, 当前在父层中的索引
        self.__id = 0
        #层的名字
        self.__name = self.tag

        #上一个层
        self.__prev = None
Ejemplo n.º 12
0
    def __init__(self,
                 output_size,
                 input_shape,
                 activation='linear',
                 bias_type="per_node",
                 trainable=True,
                 weight_initializer="uniform",
                 bias_initializer="uniform") -> None:
        """
			The Dense layer is one of the most simplest layers. 

			Arguments:
				output_size        : int                 : The number of outputs for this layer.
				input_shape        : tuple/int      	 : The shape (excluding batch size) of the input data this layer will receive.
				activation         : Base_Activation/str : A mathematical function that generally applies a non-linear mapping to some data.
				bias_type          : str      	         : Has three settings, per_node (a bias with weights for every output), 
												         - single (one bias weight for all outputs), none (no bias is used).
				trainable          : bool     	         : If True, the vars in this layer will update based on the calculated loss of this layer W.R.T the vars.
				weight_initializer : str                 : An initializer function to set the values for the weights.
				bias_initializer   : st                  : An initializer function to set the values for the bias'.

		"""

        super().__init__('Dense')

        # Flattens the input_shape if data type is tuple.
        if type(input_shape) == tuple:
            input_shape = len(np.zeros(input_shape).flatten())

        self.built = False
        self.layer_shape = (output_size, input_shape)
        self.bias_type = bias_type
        self.trainable = trainable

        self.activation = a.get(activation)

        self.weights = 0
        self.bias = 0

        self.weight_initializer = weight_initializer
        self.bias_initializer = bias_initializer

        self.optimizations = {"weight": [0, 0], "bias": [0, 0]}
        self.cached_data = {}
Ejemplo n.º 13
0
    def __init__(self, nb_filter, stack_size, nb_row, nb_col, 
        init='uniform', activation='linear', weights=None, 
        image_shape=None, border_mode='valid', subsample=(1,1)):

        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.subsample = subsample
        self.border_mode = border_mode
        self.image_shape = image_shape
        
        self.input = T.tensor4()
        self.W_shape = (nb_filter, stack_size, nb_row, nb_col)
        self.W = self.init(self.W_shape)
        self.b = shared_zeros((nb_filter,))

        self.params = [self.W, self.b]

        if weights is not None:
            self.set_weights(weights)
Ejemplo n.º 14
0
    def __init__(self, layer_sizes, activation, cost, reg_lambda=0.01):
        '''        
        Arguments:
            layer_sizes {list} -- Initialize NN with number of layers and number of units per layer.
            Takes list of numbers. The size of the list indicate number of layers.
            Each number in the list indicates number of units in each layer.
        
        Keyword Arguments:
            reg_lambda {float} -- regularization lambda value (default: {0.01})
            dropout_p {float} -- probability value for dropouts  (default: {0.5}) 
        '''
        self.num_layers = len(layer_sizes)
        self.layer_sizes = layer_sizes
        self.activation = activations.get(activation)
        self.reg_lambda = reg_lambda
        self.cost = cost_funcs.get(cost)

        self.biases = [np.random.randn(y, 1) for y in layer_sizes[1:]]
        self.weights = [
            np.random.randn(y, x)
            for x, y in zip(layer_sizes[:-1], layer_sizes[1:])
        ]
Ejemplo n.º 15
0
    def load(self, layer_data) -> None:
        """
			Takes the layer_data from the model this layer belongs to, and sets all vars equal to each key in layer_data.

			Arguments:
				layer_data : dict : A dictonary of saved vars from when this layer was first built and then saved.

		"""

        self.name = layer_data["name"]
        self.built = layer_data["built"]
        self.layer_shape = tuple(layer_data["layer_shape"])
        self.bias_type = layer_data["bias_type"]
        self.trainable = layer_data["trainable"]
        self.activation = a.get(layer_data["activation"])
        self.weight_initializer = layer_data["weight_initializer"]
        self.bias_initializer = layer_data["bias_initializer"]
        self.weights = np.asarray(layer_data["weights"])
        self.bias = np.asarray(layer_data["bias"])
        self.optimizations = {
            key: np.asarray(layer_data["optimizations"][key])
            for key in layer_data["optimizations"]
        }
Ejemplo n.º 16
0
	def __init__(self, activation):
		super(Activation, self).__init__()
		self.activation = activations.get(activation)
Ejemplo n.º 17
0
 def __init__(self, activation):
     self.activation = activations.get(activation)
     self.params = []
	def __init__(self, output_size, input_shape, activation="relu", bias_type="per_node", 
				 trainable=True, filter_size=(1,1), stride=(1,1), padding=0,
				 weight_initializer="random", bias_initializer="random") -> None:

		"""
			The conv layer or convolutional layer creates a numpy array that is convolved or cross-correlated over some data.
			The conv layer also makes use of shared variables, meaning that compared to a dense layer there will be less variables.

			Arguments:
				output_size        : int                 : An int of the output size, (E.X output_size=6 returns a numpy array of size 
				                                         - [batch_size, ..., 6] "an image with 6 channels").
				input_shape        : tuple               : A tuple of the input shape, (E.X input_shape=(28, 28, 1) which in this example is a 28*28 image with 1 channel).
				activation         : Base_Activation/str : A mathematical function that generally applies a non-linear mapping to some data.
				bias_type          : str      	         : Has three settings, per_node (a bias with weights for every output), 
												         - single (one bias weight for all outputs), none (no bias is used).
				trainable          : bool     	         : If True, the vars in this layer will update based on the calculated loss of this layer W.R.T the vars.
				filter_size        : tuple/int           : A tuple of 2 values or a int specifying the height and width of each segment of data that will be convolved over.
				stride             : tuple/int           : A tuple or int of values specifying the stride for the height and width when convolving over some data.
				padding            : int                 : An int specifying the amount of padding to be added around the input data.
				weight_initializer : str                 : An initializer function to set the values for the weights.
				bias_initializer   : st                  : An initializer function to set the values for the bias'.

		"""

		super().__init__("Conv")

		if len(input_shape) == 2:
			if channels_first:
				input_shape = (1, input_shape[0], input_shape[1])
			else:
				input_shape = (input_shape[0], input_shape[1], 1)

		if isinstance(filter_size, int):
			filter_size = (filter_size, filter_size)

		if isinstance(stride, int):
			stride = (stride, stride)


		self.built = False
		self.layer_shape = (output_size, input_shape)
		self.activation = a.get(activation)
		self.bias_type = bias_type
		self.trainable = trainable

		self.filter_size = filter_size
		self.stride = stride
		self.padding = padding

		self.weight_initializer = weight_initializer
		self.bias_initializer = bias_initializer

		self.weights = 0
		self.bias = 0

		self.optimizations = {"weight":[0,0], "bias":[0,0]}
		self.cached_data = {}

		# Put this into it's own function called get_ouput_size()
		# calculates the output sizes for the layer.
		height_output_size = int((self.layer_shape[1][0]-self.filter_size[0] + (2*self.padding))/self.stride[0]) + 1
		width_output_size = int((self.layer_shape[1][1]-self.filter_size[1] + (2*self.padding))/self.stride[1]) + 1
		self.output_shape = (height_output_size, width_output_size, output_size)