def test_get_error(): model = helpers.SetOutputModel([1]) assert validation.get_error( model, numpy.array([[1]]), numpy.array([[0]]), error_func=MeanSquaredError()) == 1.0 assert validation.get_error( model, numpy.array([[1]]), numpy.array([[1]]), error_func=MeanSquaredError()) == 0.0 assert validation.get_error( model, numpy.array([[1]]), numpy.array([[0.5]]), error_func=MeanSquaredError()) == 0.25 assert validation.get_error( model, numpy.array([[1], [1]]), numpy.array([[1], [0]]), error_func=MeanSquaredError()) == 0.5 assert validation.get_error( model, numpy.array([[1], [1]]), numpy.array([[0.5], [0.5]]), error_func=MeanSquaredError()) == 0.25
def test_get_error_unusual_targets_shape(): from learning import error model = multioutputs.MultiOutputs([ helpers.SetOutputModel([1.0]), helpers.SetOutputModel([1.0, 1.0, 1.0]) ]) assert validation.get_error(model, [[]], [[[1.0], [1.0, 1.0, 1.0]]], error_func=MeanSquaredError()) == 0.0 assert validation.get_error(model, [[]], [[[1.0], [0.0, 0.0, 0.0]]], error_func=MeanSquaredError()) == 0.5
def __init__(self, attributes, num_outputs, optimizer=None, error_func=None, penalty_func=None, jacobian_norm_break=1e-10): super(RegressionModel, self).__init__() # Weight matrix, optimized during training self._weight_matrix = self._random_weight_matrix( self._weights_shape(attributes, num_outputs)) # Optimizer to optimize weight_matrix if optimizer is None: optimizer = optimize.make_optimizer( reduce(operator.mul, self._weight_matrix.shape)) self._optimizer = optimizer # Error function for training if error_func is None: error_func = MeanSquaredError() self._error_func = error_func # Penalty function for training self._penalty_func = penalty_func # Convergence criteria self._jacobian_norm_break = jacobian_norm_break
def get_error(model, input_matrix, target_matrix, error_func=MeanSquaredError()): """Return mean error of model on given dataset.""" return numpy.mean([ error_func(model.activate(input_vec), target_vec) for input_vec, target_vec in zip(input_matrix, target_matrix) ])
def get_error(model, input_matrix, target_matrix, error_func=MeanSquaredError()): """Return mean error of model on given dataset.""" # TODO: Activate model on matrix (once all models support it) return numpy.mean([ error_func(model.activate(input_vec), target_vec) for input_vec, target_vec in zip(input_matrix, target_matrix) ])
def __init__(self, attributes, num_clusters, num_outputs, optimizer=None, error_func=None, jacobian_norm_break=1e-10, variance=None, scale_by_similarity=True, clustering_model=None, cluster_incrementally=False): super(RBF, self).__init__() # Clustering algorithm self._cluster_incrementally = cluster_incrementally if clustering_model is None: # TODO: Replace with k-means clustering_model = SOM(attributes, num_clusters, move_rate=0.1, neighborhood=2, neighbor_move_rate=1.0) clustering_model.logging = False self._clustering_model = clustering_model # Variance for gaussian if variance is None: variance = 4.0 / num_clusters self._variance = variance # Weight matrix and bias for output self._shape = (num_clusters, num_outputs) self._weight_matrix = self._random_weight_matrix(self._shape) self._bias_vec = self._random_weight_matrix(self._shape[1]) # Optimizer to optimize weight_matrix if optimizer is None: optimizer = optimize.make_optimizer( reduce(operator.mul, self._weight_matrix.shape)) self._optimizer = optimizer # Error function for training if error_func is None: error_func = MeanSquaredError() self._error_func = error_func # Convergence criteria self._jacobian_norm_break = jacobian_norm_break # Optional scaling output by total gaussian similarity self._scale_by_similarity = scale_by_similarity # For training self._similarity_tensor = None
def __init__(self, shape, transfers=None, optimizer=None, error_func=None, jacobian_norm_break=1e-10): super(MLP, self).__init__() if transfers is None: transfers = [ReluTransfer() for _ in range((len(shape) - 2)) ] + [LinearTransfer()] elif isinstance(transfers, Transfer): # Treat single given transfer as output transfer transfers = [ReluTransfer() for _ in range((len(shape) - 2))] + [transfers] if len(transfers) != len(shape) - 1: raise ValueError( 'Must have exactly 1 transfer between each pair of layers, and after the output' ) self._shape = shape self._bias_vec = self._random_weight_matrix( shape[1]) # Number of outputs of first layer self._weight_matrices = [] self._setup_weight_matrices() self._transfers = transfers # Parameter optimization for training if optimizer is None: optimizer = optimize.make_optimizer( sum([ reduce(operator.mul, weight_matrix.shape) for weight_matrix in self._weight_matrices ])) self._optimizer = optimizer # Error function for training if error_func is None: error_func = MeanSquaredError() self._error_func = error_func # Convergence criteria self._jacobian_norm_break = jacobian_norm_break # Activation vectors # 1 for input, then 2 for each hidden and output (1 for transfer, 1 for perceptron)) # To help with jacobian calculation self._weight_inputs = [None] * (len(self._shape)) self._transfer_inputs = [None] * (len(self._shape) - 1) self.reset()
def __init__(self, attributes, num_clusters, num_outputs, optimizer=None, error_func=None, variance=None, scale_by_similarity=True, pre_train_clusters=False, move_rate=0.1, neighborhood=2, neighbor_move_rate=1.0): super(RBF, self).__init__() # Clustering algorithm self._pre_train_clusters = pre_train_clusters self._som = SOM(attributes, num_clusters, move_rate=move_rate, neighborhood=neighborhood, neighbor_move_rate=neighbor_move_rate) # Variance for gaussian if variance is None: variance = 4.0 / num_clusters self._variance = variance # Weight matrix for output self._weight_matrix = self._random_weight_matrix( (num_clusters, num_outputs)) # Optimizer to optimize weight_matrix if optimizer is None: optimizer = optimize.make_optimizer( reduce(operator.mul, self._weight_matrix.shape)) self._optimizer = optimizer # Error function for training if error_func is None: error_func = MeanSquaredError() self._error_func = error_func # Optional scaling output by total gaussian similarity self._scale_by_similarity = scale_by_similarity # For training self._similarities = None self._total_similarity = None
def __init__(self, shape, transfers=None, optimizer=None, error_func=None): super(MLP, self).__init__() if transfers is None: transfers = [ReluTransfer() for _ in range((len(shape) - 2)) ] + [LinearTransfer()] elif isinstance(transfers, Transfer): # Treat single given transfer as output transfer transfers = [ReluTransfer() for _ in range((len(shape) - 2))] + [transfers] if len(transfers) != len(shape) - 1: raise ValueError( 'Must have exactly 1 transfer between each pair of layers, and after the output' ) self._shape = shape self._weight_matrices = [] self._setup_weight_matrices() self._transfers = transfers # Parameter optimization for training if optimizer is None: optimizer = optimize.make_optimizer( sum([ reduce(operator.mul, weight_matrix.shape) for weight_matrix in self._weight_matrices ])) self._optimizer = optimizer # Error function for training if error_func is None: error_func = MeanSquaredError() self._error_func = error_func # Setup activation vectors # 1 for input, then 2 for each hidden and output (1 for transfer, 1 for perceptron)) # +1 for biases self._weight_inputs = [numpy.ones(shape[0] + 1)] self._transfer_inputs = [] for size in shape[1:]: self._weight_inputs.append(numpy.ones(size + 1)) self._transfer_inputs.append(numpy.zeros(size)) self.reset()
def test_mlp_jacobian_softmax_out_mse(): _check_jacobian(lambda s1, s2, s3: mlp.MLP( (s1, s2, s3), transfers=SoftmaxTransfer(), error_func=MeanSquaredError()))
def test_mlp_jacobian_lin_out_mse(): _check_jacobian(lambda s1, s2, s3: mlp.MLP( (s1, s2, s3), transfers=mlp.LinearTransfer(), error_func=MeanSquaredError()))
def test_mlp_obj_and_obj_jac_match_softmax_out_mse(): _check_obj_and_obj_jac_match(lambda s1, s2, s3: mlp.MLP( (s1, s2, s3), transfers=SoftmaxTransfer(), error_func=MeanSquaredError()))
def test_mlp_obj_and_obj_jac_match_lin_out_mse(): _check_obj_and_obj_jac_match(lambda s1, s2, s3: mlp.MLP( (s1, s2, s3), transfers=mlp.LinearTransfer(), error_func=MeanSquaredError()))