def on_epoch_end(gdnet):
    epoch = gdnet.last_epoch
    errors = gdnet.validation_errors

    if errors.previous() and errors.last() > errors.previous():
        # Load parameters and stop training
        storage.load(gdnet, 'training-epoch-{}.pickle'.format(epoch - 1))
        raise StopTraining("Training has been interrupted")
    else:
        # Save parameters after successful epoch
        storage.save(gdnet, 'training-epoch-{}.pickle'.format(epoch))
Exemple #2
0
        def on_epoch_end(network):
            epoch = network.last_epoch
            errors[epoch] = network.prediction_error(x_test, y_test)

            if epoch == 4:
                storage.load_pickle(network.connection,
                                    os.path.join(tempdir, 'training-epoch-2'))
                raise StopTraining('Stop training process after 4th epoch')
            else:
                storage.save_pickle(
                    network.connection,
                    os.path.join(tempdir, 'training-epoch-{}'.format(epoch)))
Exemple #3
0
    def epoch_end(self, optimizer):
        self.epochsNo += 1
        actualValidationError = optimizer.errors.valid[-1]

        if actualValidationError < self.minimumValidationError:
            self.minimumValidationError = actualValidationError
            self.epochSelected = self.epochsNo

        if self.epochsNo < self.epochNoCheck:
            return

        lastValidationError = optimizer.errors.valid[-2]

        if actualValidationError > lastValidationError:
            self.epochsValidationErrorIsRising += 1
        else:
            self.epochsValidationErrorIsRising = 0

        if self.epochsValidationErrorIsRising > self.maxEpochsValidationErrorIsRising:
            self.status = "Testing error rise"
            raise StopTraining("Training has been interrupted")

        if self.epochSelected > 0 and self.epochsNo - self.epochSelected > self.maxEpochsAfterLocalminimum:
            self.status = "No progress in test error"
            raise StopTraining("Training has been interrupted")

        last20trainErrors = optimizer.errors.train[-20:]

        diff = max(last20trainErrors) - min(last20trainErrors)

        if diff < 0.000000001:
            self.status = "Error stabilized"
            raise StopTraining("Training has been interrupted")

        timeTaken = time() - self.timeStart

        if timeTaken > self.maximumTime:
            self.status = "Time exceeded"
            raise StopTraining("Training has been interrupted")
def on_epoch_end(network):
    if network.errors.last() > 10:
        raise StopTraining("Training was interrupted. Error is to high.")

        mnet = algorithms.RMSProp(
            network,
            batch_size=batch_size,
            step=step,
            error='categorical_crossentropy',
            shuffle_data=True,
            epoch_end_signal=on_epoch_end,
        )

        mnet.train(x_train, y_train, epochs=50)

        score = mnet.prediction_error(x_test, y_test)

        y_predicted = mnet.predict(x_test).argmax(axis=1)
        accuracy = metrics.accuracy_score(y_test.argmax(axis=1), y_predicted)

        print("Final score: {}".format(score))
        print("Accuracy: {:.2%}".format(accuracy))

        return score
Exemple #5
0
    def one_training_update(self, X_train, y_train=None):
        graph = self.graph
        step = self.step
        neighbour_step = self.neighbour_step

        max_nodes = self.max_nodes
        max_edge_age = self.max_edge_age

        error_decay_rate = self.error_decay_rate
        after_split_error_decay_rate = self.after_split_error_decay_rate
        n_iter_before_neuron_added = self.n_iter_before_neuron_added

        # We square this value, because we deal with
        # squared distances during the training.
        min_distance_for_update = np.square(self.min_distance_for_update)

        n_samples = len(X_train)
        total_error = 0
        did_update = False

        for sample in X_train:
            nodes = graph.nodes
            weights = np.concatenate([node.weight for node in nodes])

            distance = np.linalg.norm(weights - sample, axis=1)
            neuron_ids = np.argsort(distance)

            closest_neuron_id, second_closest_id = neuron_ids[:2]
            closest_neuron = nodes[closest_neuron_id]
            second_closest = nodes[second_closest_id]
            total_error += distance[closest_neuron_id]

            if distance[closest_neuron_id] < min_distance_for_update:
                continue

            self.n_updates += 1
            did_update = True

            closest_neuron.error += distance[closest_neuron_id]
            closest_neuron.weight += step * (sample - closest_neuron.weight)

            graph.add_edge(closest_neuron, second_closest)

            for to_neuron in list(graph.edges_per_node[closest_neuron]):
                edge_id = graph.find_edge_id(to_neuron, closest_neuron)
                age = graph.edges[edge_id]

                if age >= max_edge_age:
                    graph.remove_edge(to_neuron, closest_neuron)

                    if not graph.edges_per_node[to_neuron]:
                        graph.remove_node(to_neuron)

                else:
                    graph.edges[edge_id] += 1
                    to_neuron.weight += neighbour_step * (
                        sample - to_neuron.weight)

            time_to_add_new_neuron = (
                self.n_updates % n_iter_before_neuron_added == 0 and
                graph.n_nodes < max_nodes)

            if time_to_add_new_neuron:
                nodes = graph.nodes
                largest_error_neuron = max(nodes, key=attrgetter('error'))
                neighbour_neuron = max(
                    graph.edges_per_node[largest_error_neuron],
                    key=attrgetter('error'))

                largest_error_neuron.error *= after_split_error_decay_rate
                neighbour_neuron.error *= after_split_error_decay_rate

                new_weight = 0.5 * (
                    largest_error_neuron.weight + neighbour_neuron.weight
                )
                new_neuron = NeuronNode(weight=new_weight.reshape(1, -1))

                graph.remove_edge(neighbour_neuron, largest_error_neuron)
                graph.add_node(new_neuron)
                graph.add_edge(largest_error_neuron, new_neuron)
                graph.add_edge(neighbour_neuron, new_neuron)

            for node in graph.nodes:
                node.error *= error_decay_rate

        if not did_update and min_distance_for_update != 0 and n_samples > 1:
            raise StopTraining(
                "Distance between every data sample and neurons, closest "
                "to them, is less then {}".format(min_distance_for_update))

        return total_error / n_samples
Exemple #6
0
def on_epoch_end(model):
    if model.train_errors.last() < goal_loss:
        raise StopTraining("Training has been interrupted")
Exemple #7
0
def on_epoch_end(network):
    if network.errors.last() > 10:
        raise StopTraining("Training was interrupted. Error is to high.")
Exemple #8
0
 def stop_training_after_the_5th_epoch(network):
     if network.last_epoch == 5:
         raise StopTraining("Stopped training")