Пример #1
0
class RNNLayer:
    def __init__(self, hidden_size, input_size, activation=ReLU):
        self.hidden_size = hidden_size
        self.layer = Layer(hidden_size, [hidden_size, input_size],
                           activation=activation)

    def forward(self, inputs, dhprev=None, train=False):
        if dhprev is None:
            dhprev = np.zeros(self.hidden_size)
        acts = []
        for input in inputs:
            act = self.layer.forward([dhprev, input], train=train)
            dhprev = act
            acts.append(act)
        return acts

    def backward(self, errors, alpha=0.01):
        dhnext = np.zeros((self.hidden_size, 1))
        errors_out = []
        for error in errors[::-1]:
            dhnext, err = self.layer.backward(error + dhnext,
                                              alpha=alpha,
                                              apply=False)
            errors_out.append(err)
        return errors_out[::-1]

    def apply_gradients(self, alpha=0.01):
        self.layer.apply_gradients(alpha)
Пример #2
0
 def __init__(self,
              input_size,
              window_size,
              stride,
              activation=Tanh,
              pooling=False):
     self.window_size = window_size
     self.stride = stride
     Layer.__init__(self,
                    1, [input_size] * window_size,
                    activation=activation)
     self.pooling = pooling
Пример #3
0
 def __init__(self,
              layer_size,
              input_sizes,
              init_function=np.random.randn,
              alpha=0.1,
              activation=None):
     Layer.__init__(self,
                    layer_size,
                    input_sizes,
                    init_function=np.random.randn,
                    activation=Softmax,
                    alpha=0.1)
Пример #4
0
 def __init__(self, layers, input_size, cost_function):
     self.alpha = 0.1
     self.layers = []
     self.input_size = input_size
     prev = input_size
     for Layer, i, act in layers[:-1]:
         l = Layer(hidden_size=i, input_size=prev, activation=act)
         prev = i
         self.layers.append(l)
     Layer, i, act = layers[-1]
     self.layers.append(Layer(i, [prev], activation=act))
     self.cost_function = cost_function
     self.print_parameters()
Пример #5
0
 def __init__(self,list_sizes,input_size, loss_fn=CrossEntropy):
     prev_size = input_size
     self.alpha = 0.1
     self.layers = []
     for Layer,i,act in list_sizes:
         self.layers.append(Layer(i,[prev_size],activation = act))
         prev_size = i
     self.loss_fn = loss_fn()
Пример #6
0
 def forward(self, input_list, train=False):
     acts = []
     if (len(input_list) < self.window_size):
         return np.array([0]).reshape((1, 1))
     for i in range(0, len(input_list), self.stride):
         input_ = input_list[i:i + self.window_size]
         act = Layer.forward(self, input_, train=train)[0][0]
         acts.append(act)
     if (self.pooling):
         acts = np.array([max(acts)]).reshape((1, 1))
         self.activation_stack = [acts]
     return acts
Пример #7
0
    def load(self, filename):
        '''   
        '''
        self.has_branches = False
        self.has_inputs = False
        self.has_targets = False
        self.has_scaling = False
        self.has_outputs = False
        self.has_binning = False
        self.branches = None
        self.inputs = None
        self.binning = None
        self.outputs = None
        self.architecture = None
        with open(filename, 'r') as f:
            y = yaml.load(f.read())
        if y.has_key('branches'):
            self.branches = y['branches'].keys()
            self.has_branches = True

        if y['network'].has_key('input_order'):
            self.inputs = y['network']['input_order']
            self.has_inputs = True

        if y['network'].has_key('scaling'):
            self.scaling = y['network']['scaling']
            self.has_scaling = True

        if y.has_key('binning'):
            self.binning = y['binning']
            self.has_binning = True

        if y['network'].has_key('target_order'):
            self.outputs = y['network']['target_order']
            self.has_targets = True

        self.arch = [Layer.from_tuple(_layer_from_yaml(y['network'][idx])) for idx in y['network']['layer_access']]
 def __init__(self, name):
     Layer.__init__(self, name)
     self.seed_gui = SeedingGUI()
     self.range_gui = RangeGUI()
Пример #9
0
 def __init__(self,hidden_size,input_size,activation=ReLU):
     self.hidden_size = hidden_size
     self.layer = Layer(hidden_size,[hidden_size,input_size],activation=activation)
     self.update = Layer(hidden_size,[hidden_size,input_size],activation=Sigmoid)
     self.reset = Layer(hidden_size,[hidden_size,input_size],activation=Sigmoid)
     self.stack = []
Пример #10
0
class GRULayer:
    def __init__(self,hidden_size,input_size,activation=ReLU):
        self.hidden_size = hidden_size
        self.layer = Layer(hidden_size,[hidden_size,input_size],activation=activation)
        self.update = Layer(hidden_size,[hidden_size,input_size],activation=Sigmoid)
        self.reset = Layer(hidden_size,[hidden_size,input_size],activation=Sigmoid)
        self.stack = []

    def forward(self,inputs, hprev=None,train=False):
        if hprev is None:
            hprev = np.zeros((self.hidden_size,1))
        acts = []
        for input in inputs:
            z = self.update.forward([hprev,input],train=train)
            r = self.reset.forward([hprev,input],train=train)
            new_mem = self.layer.forward([r*(hprev),input],train=train)
            act = (1-z)*(new_mem) + z*(hprev)
            if train:
                self.stack.append((z,r,new_mem,act,hprev))
            hprev = act
            acts.append(act)
        return acts

    def backward(self,errors, alpha=0.01):
        dhnext = np.zeros((self.hidden_size,1))
        errors_out = []
        for error in errors[::-1]:
            error += dhnext
            (z,r,new_mem,act,hprev) = self.stack.pop()
            d_update, d_reset, dhnext, err_lower = 0, 0, 0, 0
            d_update += (hprev-new_mem)*error
            dhnext += z*error
            d_layer = error*(1-z)
            temp_1, temp_2 = self.layer.backward(d_layer,apply=False)
            err_lower += temp_2
            d_reset += temp_1*hprev
            dhnext += temp_1*r
            temp_1, temp_2 = self.reset.backward(d_reset,apply=False)
            dhnext += temp_1
            err_lower += temp_2
            temp_1,temp_2 = self.update.backward(d_update,apply=False)
            dhnext += temp_1
            err_lower += temp_2
            errors_out.append(err_lower)
        return errors_out[::-1]

    def apply_gradients(self,alpha = 0.01):
        self.layer.apply_gradients(alpha)
        self.update.apply_gradients(alpha)
        self.reset.apply_gradients(alpha)
Пример #11
0
 def __init__(self, hidden_size, input_size, activation=ReLU):
     self.hidden_size = hidden_size
     self.layer = Layer(hidden_size, [hidden_size, input_size],
                        activation=activation)