Example #1
0
from nuronet2.base import MLModel, NetworkModel, NeuralNetwork

if __name__ == "__main__":
    #fName = "/home/evander/Dropbox/data/iris/iris.data"
    fName = "C:\\Users\\Evander\\Dropbox\\data\\iris\\iris.data"
    data = IrisDataset(f_name=fName, batch_size=8, validation=0.1)

    model = NeuralNetwork()
    model.add(DenseLayer(16, activation="tanh2", input_shape=(3, )))
    model.add(DenseLayer(3, activation="softmax"))

    model.compile('rmsprop', "categorical_crossentropy", metrics=['accuracy'])
    model.fit_generator(data,
                        steps_per_epoch=17,
                        n_epochs=20,
                        n_workers=4,
                        validation_data=data,
                        validation_steps=5)

    test = numpy.argmax(model.predict(data.x_test), axis=1)
    real = data.y_test.nonzero()[1]

    print test
    print real
    g = test - real
    non = g.nonzero()[0]
    print(1. - (len(non) / float(g.shape[0]))) * 100.

    f = N.function(
        [model.inbound_connections[0].input_tensors[0], model.targets[0]],
        model.metrics_tensors[0])
Example #2
0
def clip_norm(g, clip, n):
    return N.switch(N.ge(n, clip), g * clip / n, g)
Example #3
0
 def get_error(self, target, output):
     diff = N.abs(
         (target - output) / N.clip(N.abs(target), N._epsilon, numpy.inf))
     return 100. * N.mean(diff)
Example #4
0
 def _pooling_function(self, inputs, pool_size, strides, padding):
     output = N.pool2d(inputs, pool_size, strides, padding, pool_mode='max')
     return output
Example #5
0
 def get_error(self, target, output):
     return N.mean(N.binary_crossentropy(output, target))
Example #6
0
 def get_error(self, target, output):
     num = N.sum(N.square(target - output))
     denom = N.sum(N.square(target))
     return num / denom
 def __init__(self, l1=0., l2=0.):
     self.l1 = N.cast(l1)
     self.l2 = N.cast(l2)
Example #8
0
 def get_error(self, target, output):
     return N.mean(N.maximum(1. - target * output, 0.))
Example #9
0
 def make_weights(self, shape, name):
     weights = numpy.random.RandomState(seed=self.seed).uniform(low=self.minval,
                                             high=self.maxval, size=shape)
     weights = weights.astype(N.floatx)
     return N.shared(weights, name=name)
Example #10
0
 def make_weights(self, shape,name):
     weights = _truncated_normal_vars(mean=self.mean, std=self.std, 
                                      shape=shape, seed=self.seed)
     return N.shared(weights, name=name)
Example #11
0
 def make_weights(self, shape, name):
     weights = numpy.random.RandomState(seed=self.seed).normal(self.mean,
                                                     self.std, size=shape)
     weights = weights.astype(N.floatx)
     return N.shared(weights, name=name)
Example #12
0
 def make_weights(self, shape, name):
     constant = self.value * numpy.ones(shape=shape, dtype=N.floatx)
     return N.shared(constant, name=name)
Example #13
0
 def make_weights(self, shape, name):
     return N.shared(numpy.ones(shape=shape, dtype=N.floatx), name=name)
Example #14
0
def clip_grad(grad, clip=0):
    if (clip > 0):
        norm = N.sqrt(sum([N.sum(g**2) for g in grad]))
        return [clip_norm(g, clip, norm) for g in grad]
    return grad
 def regularise(self, param):
     lOne = N.sum(N.abs(param)) * self.l1
     lTwo = N.sum(N.square(param)) * self.l2
     return lOne + lTwo
Example #16
0
 def get_weights(self):
     return [N.get_value(w) for w in self.weights]
Example #17
0
def time_dist_dense(x, w, b, dropout=None,
                    input_dim=None, output_dim=None, 
                    timesteps=None, training=False):
    """
    Apply 'W.y + b' for every temporal slice y of x
    
    Inputs
    ------
        @param x : tensor holding time series data
        @param w : weight matrix
        @param b : bias vector
        @param is_training: is the caller in training phase or not
        @param dropout : applies dropout to the operation
        @param input_dim: (optional) dimensionality of the input
        @param output_dim: (optional) dimensionality of the output
        @param timesteps: (optional) number of timesteps
        
    Returns
    -------
        Output tensor
    """
    if(not input_dim):
        input_dim = N.shape(x)[2]
    if(not timesteps):
        timesteps = N.shape(x)[1]
    if(not output_dim):
        output_dim = N.shape(w)[1]
    
    if(dropout is not None and 0. < dropout < 1.):
        ones = N.ones_like(N.reshape(x[:, 0, :], (-1, input_dim)))
        dropout_matrix = N.dropout(ones, dropout)
        expanded_dropout_matrix = N.repeat(dropout_matrix, timesteps)
        if(training):
            x = x * expanded_dropout_matrix
            
    #collpse time dimension and batch dimension together
    x = N.reshape(x, (-1, input_dim))
    x = N.dot(x, w)
    x += b
    
    #reshape to 3D
    if N.backend() == 'tensorflow':
        x = N.reshape(x, N.stack([-1, timesteps, output_dim]))
        x.set_shape([None, None, output_dim])
    else:
        x = N.reshape(x, (-1, timesteps, output_dim))
    return x
Example #18
0
 def get_error(self, target, output):
     return N.mean(N.categorical_crossentropy(output, target))
Example #19
0
 def get_cost(self):
     w_cost =  self.w_regulariser(self.W) if self.w_regulariser else N.cast(0.)
     b_cost = self.b_regulariser(self.b) if self.b_regulariser else N.cast(0.)
     h_cost =  self.h_regulariser(self.H) if self.h_regulariser else N.cast(0.)
     return w_cost + b_cost + h_cost
Example #20
0
 def get_error(self, target, output):
     return N.mean(N.square(target - output))
Example #21
0
 def get_weights(self):
     params = self.weights
     return [N.get_value(param) for param in params]
Example #22
0
 def get_error(self, target, output):
     return N.mean(N.abs(target - output))
Example #23
0
    def compile(self, optimiser, loss, metrics=[], **kwargs):
        """
        Configures the model for training.
        
        Inputs
        ------
            optimizer: str (name of optimizer) or optimizer object.
                See [optimizers](/optimizers).
            loss: str (name of objective function) or objective function.
                See [objectives](/objectives).
                If the model has multiple outputs, you can use a different loss
                on each output by passing a dictionary or a list of objectives.
            kwargs: when using the Theano backend, these arguments
                are passed into N.function. Ignored for Tensorflow backend.
        """
        self.build()
        if (not isinstance(loss, list)):
            loss = [loss]
        self.optimiser = get_optimiser(optimiser)
        self.loss = loss

        if (len(loss) != len(self.outputs)):
            raise ValueError("loss should have one entry per output "
                             "currently has {} entries".format(len(loss)) +
                             "for {} outputs".format(len(self.outputs)))
        self.loss_functions = [get_objective(objective) for objective in loss]

        #prepare targets of model
        self.targets = []
        for i in range(len(self.outputs)):
            shape = self.outputs[i]._nuro_shape
            name = self.output_layers[i].name
            self.targets.append(
                N.variable(ndim=len(shape),
                           dtype=N.dtype(self.outputs[i]),
                           name='h' + name + '_target'))

        #prepare metrics
        self.metrics = metrics
        self.metrics_names = ['loss']
        self.metrics_tensors = []

        #compute total loss
        total_loss = None
        for i in range(len(self.outputs)):
            y_true = self.targets[i]
            y_pred = self.outputs[i]
            loss_fn = self.loss_functions[i]
            if (total_loss is None):
                total_loss = loss_fn(y_true, y_pred)
            else:
                total_loss += loss_fn(y_true, y_pred)

        #add regularisation penalties
        total_loss += self.get_cost()

        # List of same size as output_names
        output_names = [n.name for n in self.outputs]
        nested_metrics = _collect_metrics(metrics, output_names)

        def append_metric(output_num, metric_name, metric_tensor):
            if (len(output_names) > 1):
                metric_name = self.outputs[output_num].name + "_" + metric_name
            self.metrics_names.append(metric_name)
            self.metrics_tensors.append(metric_tensor)

        for i in range(len(self.outputs)):
            y_true = self.targets[i]
            y_pred = self.outputs[i]
            output_metrics = nested_metrics[i]
            for metric in output_metrics:
                if (metric == 'accuracy' or metric == 'acc'):
                    output_shape = self.outputs[i]._nuro_shape
                    acc_fn = None
                    if (output_shape[-1] == 1 or isinstance(
                            self.loss_functions[i], BinaryXEntropy)):
                        acc_fn = binary_accuracy
                    elif (isinstance(self.loss_functions[i],
                                     CategoricalXEntropy)):
                        acc_fn = categorical_accuracy
                    append_metric(i, 'acc', acc_fn(y_true, y_pred))

        #prepare gradient updates and state updates
        self.total_loss = total_loss

        # functions for train and test will
        # be created when required
        self._function_kwargs = kwargs

        self.train_function = None
        self.test_function = None
Example #24
0
def categorical_accuracy(y_true, y_pred):
    return N.cast(N.equal(N.argmax(y_true, axis=-1),
                          N.argmax(y_pred, axis=-1)),
                  N.floatx)
Example #25
0
 def get_cost(self):
     return N.cast(0.)