if next_unit_weights == None: raise NpyTransferFunctionError, 'ErrorLinear cannot be used in an output unit.' # Pre-allocate the error_sum list so that we can loop on it error_sum = [] for i in range(len(next_unit_weights[0])): error_sum.append(0) # Compute the error_sum values for nexterror, weights in itertools.izip(next_unit_errors, next_unit_weights): for weight, error_sum_id in itertools.izip(weights, range(len(error_sum))): error_sum[error_sum_id] = error_sum[error_sum_id] + nexterror * weight # Multiply by the derivative of the activation function # to compute the final value errors = [] for currenterror, computed in itertools.izip(error_sum, outputs): errors.append(activation_derivative(computed) * currenterror) return errors @staticmethod def build_instance(): return ErrorLinear() # Declare the error functions to the Factory Factory.declare_instance(ErrorOutputDifference()) Factory.declare_instance(ErrorLinear())
vector[0] = 1 else: vector[label-1] = 1 return vector def vector_to_label(self, vector): index_max = 0 if len(vector) == 1: if(vector[0] >= .5): label = 2 else: label = 1 else: for index in range(1, len(vector)): if vector[index] > vector[index_max]: index_max = index label = index_max + 1 return label @staticmethod def build_instance(): return LabelMax() # Declare the label functions to the Factory Factory.declare_instance(LabelMax())
output = outputs[-1][0] es = [] for eprev, weight_update in itertools.izip(eprevs, weight_update): es.append(vgamma * vlambda * eprev + weight_update * output) weights = unit.get_weights() next_weights = [] for node_weights, e in itertools.izip(weights, es): #print weight, outputnext, output, e next_node_weights = [] for weight in node_weights: next_node_weights.append(weight + valpha *(reward + vgamma * outputnext[0] - output)*e) next_weights.append(next_node_weights) user_data_out.append(es) return next_weights @staticmethod def build_instance(): return UpdateTD() # Declare the activation functions to the Update class Factory.declare_instance(UpdateBackpropagation()) Factory.declare_instance(UpdateTD())
self._set_name("me_accuracy") def compute_metric(self, data_set, data_classification): """ Compute the accuracy. """ nb_correctly_classified = 0 data_instances = data_set.get_data_instances() if len(data_instances) == 0: return 0 for data_instance_original in data_instances: label_original = data_instance_original.get_label_number() data_instance_classified = data_classification.get_data_label_by_id( data_instance_original.get_index_number()) label_classified = data_instance_classified.get_label_number() if label_classified == label_original: nb_correctly_classified += 1 return nb_correctly_classified / len(data_instances) @staticmethod def build_instance(): return MetricAccuracy() # Declare the metric functions to the Factory Factory.declare_instance(MetricAccuracy())
:Raises NpyTransferFunctionError: If name_metric_function does not correspond to a metric function. """ if interval_check < 1: raise NpyValueError, 'interval_check has to be greater or equal to 1' Factory.check_prefix(name_metric_function, Metric.prefix) metric_function = Factory.build_instance_by_name(name_metric_function) nb_iterations_current = 0 metric_value_computed = metric_value_min - 1 while nb_iterations_current < nb_iterations_max and metric_value_computed < metric_value_min: network.learn_cycles(data_set, interval_check) data_classification = network.classify_data_set(data_set) metric_value_computed = metric_function.compute_metric(data_set, data_classification) nb_iterations_current += interval_check return nb_iterations_current @staticmethod def build_instance(): return TrainSimple() # Declare the learning functions to the Factory Factory.declare_instance(TrainSimple())
return 1 @staticmethod def build_instance(): return ActivationPerceptron() class ActivationSigmoid(Activation): """ Sigmoid activation function """ def __init__(self): Activation.__init__(self) self._set_name("ac_sigmoid") def activation_function(self, x): return 1 / (1 + math.exp(-x)) def activation_derivative(self, x): return x * (1 - x) @staticmethod def build_instance(): return ActivationSigmoid() # Declare the activation functions to the Factory Factory.declare_instance(ActivationLinear()) Factory.declare_instance(ActivationPerceptron()) Factory.declare_instance(ActivationSigmoid())
vector[0] = 0 else: vector[0] = 1 else: vector[label - 1] = 1 return vector def vector_to_label(self, vector): index_max = 0 if len(vector) == 1: if (vector[0] >= .5): label = 2 else: label = 1 else: for index in range(1, len(vector)): if vector[index] > vector[index_max]: index_max = index label = index_max + 1 return label @staticmethod def build_instance(): return LabelMax() # Declare the label functions to the Factory Factory.declare_instance(LabelMax())
class ActivationSigmoid(Activation): """ Sigmoid activation function """ def __init__(self): Activation.__init__(self) self._set_name("ac_sigmoid") def activation_function(self, x): return 1 / (1 + math.exp(-x)) def activation_derivative(self, x): return x * (1 - x) @staticmethod def build_instance(): return ActivationSigmoid() # Declare the activation functions to the Factory Factory.declare_instance(ActivationLinear()) Factory.declare_instance(ActivationPerceptron()) Factory.declare_instance(ActivationSigmoid())
# Pre-allocate the error_sum list so that we can loop on it error_sum = [] for i in range(len(next_unit_weights[0])): error_sum.append(0) # Compute the error_sum values for nexterror, weights in itertools.izip(next_unit_errors, next_unit_weights): for weight, error_sum_id in itertools.izip(weights, range(len(error_sum))): error_sum[error_sum_id] = error_sum[ error_sum_id] + nexterror * weight # Multiply by the derivative of the activation function # to compute the final value errors = [] for currenterror, computed in itertools.izip(error_sum, outputs): errors.append(activation_derivative(computed) * currenterror) return errors @staticmethod def build_instance(): return ErrorLinear() # Declare the error functions to the Factory Factory.declare_instance(ErrorOutputDifference()) Factory.declare_instance(ErrorLinear())
try: Factory.check_prefix(name_metric_function, Metric.prefix) metric_function = Factory.build_instance_by_name(name_metric_function) except NpyTransferFunctionError, e: raise NpyTransferFunctionError, e.msg nb_iterations_current = 0 metric_value_computed = metric_value_min - 1 while (nb_iterations_max == None or nb_iterations_current < nb_iterations_max) \ and metric_value_computed < metric_value_min: try: network.learn_cycles(data_set, interval_check) data_classification = network.classify_data_set(data_set) except NpyDataTypeError, e: raise NpyDataTypeError, e.msg metric_value_computed = metric_function.compute_metric(data_set, data_classification) nb_iterations_current += interval_check return nb_iterations_current @staticmethod def build_instance(): return TrainSimple() # Declare the learning functions to the Factory Factory.declare_instance(TrainSimple())
def compute_metric(self, data_set, data_classification): """ Compute the accuracy. """ nb_correctly_classified = 0 data_instances = data_set.get_data_instances() if len(data_instances) == 0: return 0 for data_instance_original in data_instances: label_original = data_instance_original.get_label_number() data_instance_classified = data_classification.get_data_label_by_id(data_instance_original.get_index_number()) label_classified = data_instance_classified.get_label_number() if label_classified == label_original: nb_correctly_classified += 1 return nb_correctly_classified / len(data_instances) @staticmethod def build_instance(): return MetricAccuracy() # Declare the metric functions to the Factory Factory.declare_instance(MetricAccuracy())
output = outputs[-1][0] es = [] for eprev, weight_update in itertools.izip(eprevs, weight_update): es.append(vgamma * vlambda * eprev + weight_update * output) weights = unit.get_weights() next_weights = [] for node_weights, e in itertools.izip(weights, es): #print weight, outputnext, output, e next_node_weights = [] for weight in node_weights: next_node_weights.append( weight + valpha * (reward + vgamma * outputnext[0] - output) * e) next_weights.append(next_node_weights) out_data.append(es) return next_weights @staticmethod def build_instance(): return UpdateTD() # Declare the activation functions to the Update class Factory.declare_instance(UpdateBackpropagation()) Factory.declare_instance(UpdateTD())