def parse_properties(self, n_in, n_hidden, n_out, det_activations, stoch_activations, stoch_n_hidden): self.n_hidden = np.array(n_hidden) self.n_out = n_out self.n_in = n_in self.stoch_n_hidden = [np.array(i) for i in stoch_n_hidden] self.det_activation_names = det_activations self.det_activation, self.det_activation_prime = parse_activations(det_activations) self.stoch_activation_names = stoch_activations self.stoch_activation, self.stoch_activation_prime = parse_activations(stoch_activations)
def parse_properties(self, n_in, n_hidden, n_out, det_activations, stoch_activations, stoch_n_hidden): self.n_hidden = np.array(n_hidden) self.n_out = n_out self.n_in = n_in self.stoch_n_hidden = [np.array(i) for i in stoch_n_hidden] self.det_activation_names = det_activations self.det_activation, self.det_activation_prime = parse_activations( det_activations) self.stoch_activation_names = stoch_activations self.stoch_activation, self.stoch_activation_prime = parse_activations( stoch_activations)
def __init__(self, n_in, n_hidden, n_out, activation, bias_init, prng): """Defines the basics of a Vanilla Recurrent Neural Network. :param n_in: integer defining the number of input units. :param n_hidden: list of integers defining the number of hidden units per layer. :param activation: list of size len(n_hidden) + 1 defining the activation function per layer. :param bias_init: list with bias initialization for [layers biases] + [output bias]. :param prng: random number generator. """ self.prng = prng self.n_in = n_in assert type( self.n_in) is IntType, "n_in must be an integer: {0!r}".format( self.n_in) assert type( n_hidden) is ListType, "n_hidden must be a list: {0!r}".format( n_hidden) self.n_hidden = np.array(n_hidden) self.activation_list = activation assert type( self.activation_list) is ListType, "activation must be a list:\ {0!r}".format( self.activation_list) assert len(self.n_hidden) + 1 == len(self.activation_list),\ "Activation list must have len(n_hidden) + 1 values. Activation: {0!r}, n_hidden: \ {1!r}" .format(self.activation_list, self.n_hidden) self.bias_init = bias_init assert type(self.bias_init ) is ListType, "biases initilizaition must be a list: \ {0!r}".format( self.bias_init) assert len(self.bias_init) == len(n_hidden) + 1,\ "Bias initialization list must have len(n_hidden) + 1 values. Bias list: {0!r}, n_hidden: \ {1!r}" .format(self.bias_init, self.n_hidden) self.activation = parse_activations(self.activation_list) self.type = 'VanillaRNN' self.opt = { 'type': self.type, 'n_in': self.n_in, 'n_hidden': self.n_hidden, 'activation': self.activation_list, 'bias_init': self.bias_init } self.defined = False logging.info( 'RNN loaded. Type: {0}, input layer: {1}, hidden layers: {2} ' 'activation: {3}'.format(self.type, self.n_in, self.n_hidden, self.activation_list)) self.n_out = n_out self.initialize_weights() self.generative_network()
def __init__(self, n_in, n_hidden, n_out, activation, bias_init, prng): self.prng = prng self.n_in = n_in assert type( self.n_in) is IntType, "n_in must be an integer: {0!r}".format( self.n_in) assert type( n_hidden) is ListType, "n_hidden must be a list: {0!r}".format( n_hidden) self.n_hidden = np.array(n_hidden) self.activation_list = activation assert type( self.activation_list) is ListType, "activation must be a list:\ {0!r}".format( self.activation_list) assert len(self.n_hidden) + 1 == len(self.activation_list),\ "Activation list must have len(n_hidden) + 1 values. Activation: {0!r}, n_hidden: \ {1!r}" .format(self.activation_list, self.n_hidden) self.bias_init = bias_init assert type(self.bias_init ) is ListType, "biases initilizaition must be a list: \ {0!r}".format( self.bias_init) assert len(self.bias_init) == len(n_hidden) + 1,\ "Bias initialization list must have len(n_hidden) + 1 values. Bias list: {0!r}, n_hidden: \ {1!r}" .format(self.bias_init, self.n_hidden) self.activation = parse_activations(self.activation_list) self.n_out = n_out assert type( self.n_out) is IntType, "n_out must be an int: {0!r}".format( self.n_out) self.forward_rnn = HiddenRNN(n_in, n_hidden, activation[:-1], bias_init[:-1], prng) self.backward_rnn = HiddenRNN(n_in, n_hidden, activation[:-1], bias_init[:-1], prng) self.type = 'BidirectionalRNN' self.opt = { 'type': self.type, 'n_in': self.n_in, 'n_hidden': self.n_hidden, 'n_out': self.n_out, 'activation': self.activation_list, 'bias_init': self.bias_init } self.initialize_weights() self.complete_defined = False self.define_complete_network()
def __init__(self, n_in, n_hidden, n_out, activation, bias_init, prng): """Defines the basics of a Vanilla Recurrent Neural Network. :param n_in: integer defining the number of input units. :param n_hidden: list of integers defining the number of hidden units per layer. :param activation: list of size len(n_hidden) + 1 defining the activation function per layer. :param bias_init: list with bias initialization for [layers biases] + [output bias]. :param prng: random number generator. """ self.prng = prng self.n_in = n_in assert type(self.n_in) is IntType, "n_in must be an integer: {0!r}".format(self.n_in) assert type(n_hidden) is ListType, "n_hidden must be a list: {0!r}".format(n_hidden) self.n_hidden = np.array(n_hidden) self.activation_list = activation assert type(self.activation_list) is ListType, "activation must be a list:\ {0!r}".format(self.activation_list) assert len(self.n_hidden) + 1 == len(self.activation_list),\ "Activation list must have len(n_hidden) + 1 values. Activation: {0!r}, n_hidden: \ {1!r}".format(self.activation_list, self.n_hidden) self.bias_init = bias_init assert type(self.bias_init) is ListType, "biases initilizaition must be a list: \ {0!r}".format(self.bias_init) assert len(self.bias_init) == len(n_hidden) + 1,\ "Bias initialization list must have len(n_hidden) + 1 values. Bias list: {0!r}, n_hidden: \ {1!r}".format(self.bias_init, self.n_hidden) self.activation = parse_activations(self.activation_list) self.type = 'VanillaRNN' self.opt = {'type': self.type, 'n_in': self.n_in, 'n_hidden': self.n_hidden, 'activation': self.activation_list, 'bias_init': self.bias_init} self.defined = False logging.info('RNN loaded. Type: {0}, input layer: {1}, hidden layers: {2} ' 'activation: {3}'.format(self.type, self.n_in, self.n_hidden, self.activation_list)) self.n_out = n_out self.initialize_weights() self.generative_network()
def __init__(self, n_in, n_hidden, n_out, activation, bias_init, prng): self.prng = prng self.n_in = n_in assert type(self.n_in) is IntType, "n_in must be an integer: {0!r}".format(self.n_in) assert type(n_hidden) is ListType, "n_hidden must be a list: {0!r}".format(n_hidden) self.n_hidden = np.array(n_hidden) self.activation_list = activation assert type(self.activation_list) is ListType, "activation must be a list:\ {0!r}".format(self.activation_list) assert len(self.n_hidden) + 1 == len(self.activation_list),\ "Activation list must have len(n_hidden) + 1 values. Activation: {0!r}, n_hidden: \ {1!r}".format(self.activation_list, self.n_hidden) self.bias_init = bias_init assert type(self.bias_init) is ListType, "biases initilizaition must be a list: \ {0!r}".format(self.bias_init) assert len(self.bias_init) == len(n_hidden) + 1,\ "Bias initialization list must have len(n_hidden) + 1 values. Bias list: {0!r}, n_hidden: \ {1!r}".format(self.bias_init, self.n_hidden) self.activation = parse_activations(self.activation_list) self.n_out = n_out assert type(self.n_out) is IntType, "n_out must be an int: {0!r}".format(self.n_out) self.forward_rnn = HiddenRNN(n_in, n_hidden, activation[:-1], bias_init[:-1], prng) self.backward_rnn = HiddenRNN(n_in, n_hidden, activation[:-1], bias_init[:-1], prng) self.type = 'BidirectionalRNN' self.opt = {'type': self.type, 'n_in': self.n_in, 'n_hidden': self.n_hidden, 'n_out': self.n_out, 'activation': self.activation_list, 'bias_init': self.bias_init} self.initialize_weights() self.complete_defined = False self.define_complete_network()