class AbsoluteValueLayer(AbstractLayer): """ A layer which implements absolute value activation """ def __init__(self, layerSize): """ An absolute value layer can be connected to several inputs """ # Properly inherit the AbstractLayer AbstractLayer.__init__(self) # A sigmoid layer has an input port and output port self.input = InputPort(layerSize) self.output = OutputPort(layerSize) def forward(self): """ Perform a forward step - activate the net input using absolute value """ # Perform the activation (logistic function) self.output.setOutput(np.abs(self.input.getNetInput())) def backward(self): """ Perform a backprop step - gradient is the sign of the output """ self.input.setDelta( np.sign(self.output.getOutput()) * self.output.getNetDelta())
class AbsoluteValueLayer(AbstractLayer): """ A layer which implements absolute value activation """ def __init__(self, layerSize): """ An absolute value layer can be connected to several inputs """ # Properly inherit the AbstractLayer AbstractLayer.__init__(self) # A sigmoid layer has an input port and output port self.input = InputPort(layerSize) self.output = OutputPort(layerSize) def forward(self): """ Perform a forward step - activate the net input using absolute value """ # Perform the activation (logistic function) self.output.setOutput(np.abs(self.input.getNetInput())) def backward(self): """ Perform a backprop step - gradient is the sign of the output """ self.input.setDelta(np.sign(self.output.getOutput()) * self.output.getNetDelta())
class TanhLayer(AbstractLayer): """ A layer which implements sigmoid activation """ def __init__(self, layerSize): """ A sigmoid layer can be connected to several inputs """ # Properly inherit the AbstractLayer AbstractLayer.__init__(self) # A sigmoid layer has an input port and output port self.input = InputPort(layerSize) self.output = OutputPort(layerSize) def forward(self): """ Perform a forward step - activate the net input using logistic function """ # Perform the activation (logistic function) self.output.setOutput((1.0 - np.exp(-self.input.getNetInput())) / (1.0 + np.exp(-self.input.getNetInput()))) def backward(self): """ Perform a backprop step - gradient is the derivative of the sigmoid functon """ self.input.setDelta( (1.0 - self.output.getOutput()**2) * self.output.getNetDelta())
class TanhLayer(AbstractLayer): """ A layer which implements sigmoid activation """ def __init__(self, layerSize): """ A sigmoid layer can be connected to several inputs """ # Properly inherit the AbstractLayer AbstractLayer.__init__(self) # A sigmoid layer has an input port and output port self.input = InputPort(layerSize) self.output = OutputPort(layerSize) def forward(self): """ Perform a forward step - activate the net input using logistic function """ # Perform the activation (logistic function) self.output.setOutput((1.0 - np.exp(-self.input.getNetInput())) / (1.0 + np.exp(-self.input.getNetInput()))) def backward(self): """ Perform a backprop step - gradient is the derivative of the sigmoid functon """ self.input.setDelta((1.0 - self.output.getOutput()**2) * self.output.getNetDelta())
class ReluLayer(AbstractLayer): """ A layer which implements rectified linear activation """ def __init__(self, layerSize): """ """ # Properly inherit the AbstractLayer AbstractLayer.__init__(self) # A ReLU layer has an input port and output port self.input = InputPort(layerSize) self.output = OutputPort(layerSize) def forward(self): """ Perform a forward step - activate the net input using the soft ReLU function """ # Perform the activation (set any negative values to zero) self.output.setOutput(np.fmax(0.0, self.input.getNetInput())) def backward(self): """ Perform a backprop step - gradient is simply 1 where the data is positive """ self.input.setDelta( np.where(self.output.getOutput() > 0, 1.0, 0.0) * self.output.getNetDelta())
class SoftmaxLayer(AbstractLayer): """ A layer which implements sigmoid activation """ def __init__(self, layerSize): """ A sigmoid layer can be connected to several inputs """ # Properly inherit the AbstractLayer AbstractLayer.__init__(self) # A sigmoid layer has an input port and output port self.input = InputPort(layerSize) self.output = OutputPort(layerSize) def forward(self): """ Perform a forward step - activate the net input using logistic function """ # Perform the activation self.output.setOutput(gpu.exp(self.input.getNetInput())) self.output.setOutput(self.output.getOutput() / (gpu.garray([gpu.sum(self.output.getOutput(),1)]).transpose())) def backward(self): """ Perform a backprop step - gradient is the derivative of the sigmoid functon """ # self.input.setDelta(self.output.getOutput() * (1.0 - self.output.getOutput()) * self.output.getNetDelta()) self.input.setDelta(self.output.getNetDelta())
class ReluLayer(AbstractLayer): """ A layer which implements rectified linear activation """ def __init__(self, layerSize): """ """ # Properly inherit the AbstractLayer AbstractLayer.__init__(self) # A ReLU layer has an input port and output port self.input = InputPort(layerSize) self.output = OutputPort(layerSize) def forward(self): """ Perform a forward step - activate the net input using the soft ReLU function """ # Perform the activation (set any negative values to zero) self.output.setOutput(np.fmax(0.0, self.input.getNetInput())) def backward(self): """ Perform a backprop step - gradient is simply 1 where the data is positive """ self.input.setDelta(np.where(self.output.getOutput() > 0, 1.0, 0.0) * self.output.getNetDelta())
class SoftmaxLayer(AbstractLayer): """ A layer which implements sigmoid activation """ def __init__(self, layerSize): """ A sigmoid layer can be connected to several inputs """ # Properly inherit the AbstractLayer AbstractLayer.__init__(self) # A sigmoid layer has an input port and output port self.input = InputPort(layerSize) self.output = OutputPort(layerSize) def forward(self): """ Perform a forward step - activate the net input using logistic function """ # Perform the activation self.output.setOutput(gpu.exp(self.input.getNetInput())) self.output.setOutput( self.output.getOutput() / (gpu.garray([gpu.sum(self.output.getOutput(), 1)]).transpose())) def backward(self): """ Perform a backprop step - gradient is the derivative of the sigmoid functon """ # self.input.setDelta(self.output.getOutput() * (1.0 - self.output.getOutput()) * self.output.getNetDelta()) self.input.setDelta(self.output.getNetDelta())
class DelayLayer(AbstractLayer): """ A layer which implements a delay in time """ def __init__(self, layerSize, initialHistory): """ A delay layer can be connected to several inputs """ # Properly inherit the AbstractLayer AbstractLayer.__init__(self) # A delay layer has an input port and output port self.input = InputPort(layerSize) self.output = OutputPort(layerSize) # A delay layer has a history, which propagates forward # when step is called self.initial_history = initialHistory self.history = np.zeros((1,layerSize)) self.current_step = 0 def forward(self): """ Perform a forward step - set the output to the current history """ # Is this the first timestep? Then adjust the shape of history to match # the shape of the input if self.current_step == 0: net_input = self.input.getNetInput() self.history = np.zeros(net_input.shape) self.history[:] = self.initial_history # Propagate the history forward, and set the input to the history self.output.setOutput(self.history) def backward(self): """ Perform the backprop step - simply shift the delta backward """ self.input.setDelta(self.output.getNetDelta()) def step(self): """ Step forward in time. Set the history to the current input """ self.history = self.input.getNetInput() self.current_step += 1
class DelayLayer(AbstractLayer): """ A layer which implements a delay in time """ def __init__(self, layerSize, initialHistory): """ A delay layer can be connected to several inputs """ # Properly inherit the AbstractLayer AbstractLayer.__init__(self) # A delay layer has an input port and output port self.input = InputPort(layerSize) self.output = OutputPort(layerSize) # A delay layer has a history, which propagates forward # when step is called self.initial_history = initialHistory self.history = np.zeros((1, layerSize)) self.current_step = 0 def forward(self): """ Perform a forward step - set the output to the current history """ # Is this the first timestep? Then adjust the shape of history to match # the shape of the input if self.current_step == 0: net_input = self.input.getNetInput() self.history = np.zeros(net_input.shape) self.history[:] = self.initial_history # Propagate the history forward, and set the input to the history self.output.setOutput(self.history) def backward(self): """ Perform the backprop step - simply shift the delta backward """ self.input.setDelta(self.output.getNetDelta()) def step(self): """ Step forward in time. Set the history to the current input """ self.history = self.input.getNetInput() self.current_step += 1
class HistoryLayer(AbstractLayer): """ A useful internal layer for Recurrent Layers which maintains a history of activations. """ def __init__(self, size, initialHistory=np.zeros((0, 0))): """ Create a History layer """ AbstractLayer.__init__(self) self.layerSize = size self.input = InputPort(self.layerSize) self.output = OutputPort(self.layerSize) self.history = [] self.output.value = np.copy(initialHistory) self.initialHistory = initialHistory def forward(self): """ Do nothing. step handles this layer correctly """ pass def backward(self): """ Do nothing. backstep handles this layer correctly """ pass def step(self): """ Push the current output into the history, and propagate input forward """ self.history.append(self.output.value[:]) self.output.value = self.input.getNetInput() def backstep(self): """ Pop the output from the history, and propagate the delta backward """ self.input.setDelta(self.output.getNetDelta()) self.output.value = self.history.pop() def reset(self): """ Reset the history to empty and output to initialHistory """ self.history = [] self.output.value[:] = self.initialHistory def setDelta(self, delta): """ Set the delta on the input layer to the provided value """ self.input.setDelta(delta)
class HistoryLayer(AbstractLayer): """ A useful internal layer for Recurrent Layers which maintains a history of activations. """ def __init__(self, size, initialHistory=gpu.zeros((0,0))): """ Create a History layer """ AbstractLayer.__init__(self) self.layerSize = size self.input = InputPort(self.layerSize) self.output = OutputPort(self.layerSize) self.history = [] self.output.value = gpu.garray(np.copy(initialHistory.as_numpy_array())) self.initialHistory = initialHistory def forward(self): """ Do nothing. step handles this layer correctly """ pass def backward(self): """ Do nothing. backstep handles this layer correctly """ pass def step(self): """ Push the current output into the history, and propagate input forward """ self.history.append(self.output.value[:]) self.output.value = self.input.getNetInput() def backstep(self): """ Pop the output from the history, and propagate the delta backward """ self.input.setDelta(self.output.getNetDelta()) self.output.value = self.history.pop() def reset(self): """ Reset the history to empty and output to initialHistory """ self.history = [] self.output.value[:] = self.initialHistory def setDelta(self, delta): """ Set the delta on the input layer to the provided value """ self.input.setDelta(delta)