def evaluate(self, network, verbose=False): if not isinstance(network, NeuralNetwork): network = NeuralNetwork(network) network.make_feedforward() if not network.node_types[-1](-1000) < -0.95: raise Exception( "Network should be able to output value of -1, e.g. using a tanh node." ) pairs = list(zip(self.INPUTS, self.OUTPUTS)) random.shuffle(pairs) if not self.do_all: pairs = [random.choice(pairs)] rmse = 0.0 for (i, target) in pairs: # Feed with bias output = network.feed(i) # Grab the output output = output[-len(target):] err = (target - output) err[abs(err) < self.EPSILON] = 0 err = (err**2).mean() # Add error if verbose: print("%r -> %r (%.2f)" % (i, output, err)) rmse += err score = 1 / (1 + np.sqrt(rmse / len(pairs))) return {'fitness': score}
def convert(self, individual): cm = np.zeros((self.substrate.num_nodes, self.substrate.num_nodes)) for (i, j ), coords, conn_id, expr_id in self.substrate.get_connection_list( self.add_deltas): # Add a bias (translation) coords = np.hstack((coords, [1])) w = sum(weight * gabor_opt(*(np.dot(mat, coords)), sigma=sigma) for (weight, sigma, mat) in individual.wavelets[conn_id]) cm[j, i] = w # Rescale weights cm[np.abs(cm) < self.min_weight] = 0 cm -= (np.sign(cm) * self.min_weight) cm *= self.weight_range / (self.weight_range - self.min_weight) # Clip highest weights cm = np.clip(cm, -self.weight_range, self.weight_range) net = NeuralNetwork().from_matrix(cm, node_types=[self.node_type]) if self.sandwich: net.make_sandwich() if self.feedforward: net.make_feedforward() return net
def evaluate(self, network, verbose=False): if not isinstance(network, NeuralNetwork): network = NeuralNetwork(network) network.make_feedforward() if not network.node_types[-1](-1000) < -0.95: raise Exception("Network should be able to output value of -1, e.g. using a tanh node.") pairs = list(zip(self.INPUTS, self.OUTPUTS)) random.shuffle(pairs) if not self.do_all: pairs = [random.choice(pairs)] rmse = 0.0 for (i, target) in pairs: # Feed with bias output = network.feed(i) # Grab the output output = output[-len(target):] err = (target - output) err[abs(err) < self.EPSILON] = 0; err = (err ** 2).mean() # Add error if verbose: print("%r -> %r (%.2f)" % (i, output, err)) rmse += err score = 1/(1+np.sqrt(rmse / len(pairs))) return {'fitness': score}
def convert(self, individual): cm = np.zeros((self.substrate.num_nodes, self.substrate.num_nodes)) for (i,j), coords, conn_id, expr_id in self.substrate.get_connection_list(self.add_deltas): # Add a bias (translation) coords = np.hstack((coords, [1])) w = sum(weight * gabor_opt(*(np.dot(mat, coords)), sigma=sigma) for (weight, sigma, mat) in individual.wavelets[conn_id]) cm[j,i] = w # Rescale weights cm[np.abs(cm) < self.min_weight] = 0 cm -= (np.sign(cm) * self.min_weight) cm *= self.weight_range / (self.weight_range - self.min_weight) # Clip highest weights cm = np.clip(cm, -self.weight_range, self.weight_range) net = NeuralNetwork().from_matrix(cm, node_types=[self.node_type]) if self.sandwich: net.make_sandwich() if self.feedforward: net.make_feedforward() return net
def convert(self, network): """ Performs conversion. :param network: Any object that is convertible to a :class:`~peas.networks.NeuralNetwork`. """ # Cast input to a neuralnetwork if it isn't if not isinstance(network, NeuralNetwork): network = NeuralNetwork(network) # Since Stanley mentions to "fully activate" the CPPN, # I assume this means it's a feedforward net, since otherwise # there is no clear definition of "full activation". # In an FF network, activating each node once leads to a stable condition. # Check if the network has enough inputs. required_inputs = 2 * self.substrate.dimensions + 1 if self.add_deltas: required_inputs += self.substrate.dimensions if network.cm.shape[0] <= required_inputs: raise Exception( "Network does not have enough inputs. Has %d, needs %d" % (network.cm.shape[0], cm_dims + 1)) # Initialize connectivity matrix cm = np.zeros((self.substrate.num_nodes, self.substrate.num_nodes)) for (i, j ), coords, conn_id, expr_id in self.substrate.get_connection_list( self.add_deltas): expression = True if expr_id is not None: network.flush() expression = network.feed(coords, self.activation_steps)[expr_id] > 0 if expression: network.flush() weight = network.feed(coords, self.activation_steps)[conn_id] cm[j, i] = weight # Rescale the CM cm[np.abs(cm) < self.min_weight] = 0 cm -= (np.sign(cm) * self.min_weight) cm *= self.weight_range / (self.weight_range - self.min_weight) # Clip highest weights cm = np.clip(cm, -self.weight_range, self.weight_range) net = NeuralNetwork().from_matrix(cm, node_types=[self.node_type]) if self.sandwich: net.make_sandwich() if self.feedforward: net.make_feedforward() if not np.all(np.isfinite(net.cm)): raise Exception("Network contains NaN/inf weights.") return net
def convert(self, network): """ Performs conversion. :param network: Any object that is convertible to a :class:`~peas.networks.NeuralNetwork`. """ # Cast input to a neuralnetwork if it isn't if not isinstance(network, NeuralNetwork): network = NeuralNetwork(network) # Since Stanley mentions to "fully activate" the CPPN, # I assume this means it's a feedforward net, since otherwise # there is no clear definition of "full activation". # In an FF network, activating each node once leads to a stable condition. # Check if the network has enough inputs. required_inputs = 2 * self.substrate.dimensions + 1 if self.add_deltas: required_inputs += self.substrate.dimensions if network.cm.shape[0] <= required_inputs: raise Exception("Network does not have enough inputs. Has %d, needs %d" % (network.cm.shape[0], cm_dims+1)) # Initialize connectivity matrix cm = np.zeros((self.substrate.num_nodes, self.substrate.num_nodes)) for (i,j), coords, conn_id, expr_id in self.substrate.get_connection_list(self.add_deltas): expression = True if expr_id is not None: network.flush() expression = network.feed(coords, self.activation_steps)[expr_id] > 0 if expression: network.flush() weight = network.feed(coords, self.activation_steps)[conn_id] cm[j, i] = weight # Rescale the CM cm[np.abs(cm) < self.min_weight] = 0 cm -= (np.sign(cm) * self.min_weight) cm *= self.weight_range / (self.weight_range - self.min_weight) # Clip highest weights cm = np.clip(cm, -self.weight_range, self.weight_range) net = NeuralNetwork().from_matrix(cm, node_types=[self.node_type]) if self.sandwich: net.make_sandwich() if self.feedforward: net.make_feedforward() if not np.all(np.isfinite(net.cm)): raise Exception("Network contains NaN/inf weights.") return net