Пример #1
0
    def __init__(self, network, options=None, **kwargs):
        options = options or kwargs

        if isinstance(network, (list, tuple)):
            network = layers.join(*network)

        self.network = network

        if len(self.network.output_layers) != 1:
            n_outputs = len(network.output_layers)

            raise InvalidConnection("Connection should have one output "
                                    "layer, got {}".format(n_outputs))

        target = options.get('target')
        if target is not None and isinstance(target, (list, tuple)):
            options['target'] = tf.placeholder(tf.float32, shape=target)

        self.target = self.network.targets
        super(BaseOptimizer, self).__init__(**options)

        start_init_time = time.time()
        self.logs.message("TENSORFLOW",
                          "Initializing Tensorflow variables and functions.")

        self.variables = AttributeKeyDict()
        self.functions = AttributeKeyDict()
        self.network.outputs
        self.init_functions()

        self.logs.message(
            "TENSORFLOW",
            "Initialization finished successfully. It took {:.2f} seconds"
            "".format(time.time() - start_init_time))
Пример #2
0
    def __init__(self, connection, *args, **kwargs):
        self.connection = clean_layers(connection)

        self.layers = list(self.connection)

        self.input_layer = self.layers[0]
        self.hidden_layers = self.layers[1:]
        self.output_layer = self.layers[-1]

        self.init_layers()
        super(ConstructableNetwork, self).__init__(*args, **kwargs)

        self.logs.message("THEANO", "Initializing Theano variables and "
                                    "functions.")
        start_init_time = time.time()

        self.variables = AttributeKeyDict(
            network_input=create_input_variable(
                self.input_layer, variable_name='x'
            ),
            network_output=create_output_variable(
                self.error, variable_name='y'
            ),
        )
        self.methods = AttributeKeyDict()

        self.init_variables()
        self.init_methods()

        finish_init_time = time.time()
        self.logs.message("THEANO", "Initialization finished sucessfully. "
                          "It took {:.2f} seconds"
                          "".format(finish_init_time - start_init_time))
Пример #3
0
    def __set__(self, instance, value):
        self.validate(value)

        default_value = self.default.copy()
        default_value.update(value)
        value = default_value

        value = AttributeKeyDict(value)
        instance.__dict__[self.name] = value
Пример #4
0
    def __init__(self, *args, **options):
        self.errors = self.train_errors = ErrorHistoryList()
        self.validation_errors = ErrorHistoryList()
        self.training = AttributeKeyDict()
        self.last_epoch = 0

        super(BaseNetwork, self).__init__(*args, **options)

        if self.verbose:
            show_network_options(self, highlight_options=options)
Пример #5
0
    def __init__(self, *args, **kwargs):
        super(BaseAlgorithm, self).__init__(*args, **kwargs)

        self.logs.message("THEANO", "Initializing Theano variables and "
                          "functions.")
        start_init_time = time.time()

        self.variables = AttributeKeyDict()
        self.methods = AttributeKeyDict()

        self.init_input_output_variables()
        self.init_variables()
        self.init_methods()

        finish_init_time = time.time()
        self.logs.message(
            "THEANO", "Initialization finished sucessfully. "
            "It took {:.2f} seconds"
            "".format(finish_init_time - start_init_time))
Пример #6
0
    def __new__(cls, clsname, bases, attrs):
        new_class = super(SharedDocsMeta, cls).__new__(cls, clsname,
                                                       bases, attrs)
        if new_class.__doc__ is None:
            return new_class

        class_docs = new_class.__doc__
        n_indents = find_numpy_doc_indent(class_docs)

        if n_indents is None:
            return new_class

        parameters = {}
        parent_classes = new_class.__mro__

        for parent_class in parent_classes:
            parent_docs = parent_class.__doc__

            if parent_docs is None:
                continue

            parent_name = parent_class.__name__
            parent_params = parameters[parent_name] = AttributeKeyDict()

            for name, type_, desc in iter_parameters(parent_docs):
                parent_params[name] = "{} : {}{}".format(name, type_, desc)

            for name, func_params, desc in iter_methods(parent_docs):
                parent_params[name] = ''.join([name, func_params, desc])

            doc_warns = parse_warns(parent_docs)
            if doc_warns is not None:
                parent_params['Warns'] = doc_warns

        try:
            new_class.__doc__ = new_class.__doc__.format(**parameters)
        except Exception as exception:
            exception_classname = exception.__class__.__name__
            raise SharedDocsException(
                "Can't format documentation for class `{}`. "
                "Catched `{}` exception with message: {}".format(
                    new_class.__name__,
                    exception_classname,
                    exception
                )
            )

        return new_class
Пример #7
0
def parse_variables_from_docs(instances):
    """
    Parse documentation with NumPy style and returns all
    extracted information.

    Parameters
    ----------
    instances : list
        List of objects that has documentations.

    Returns
    -------
    dict
        Variables parsed from the documentations.
    """
    variables = {}
    # Note: We do not include 'Examples' section because it
    # includes class/function name which will be useless when
    # we inerit documentation for the new object.
    doc_sections = [
        'Warns', 'Returns', 'Yields', 'Raises', 'See Also', 'Parameters',
        'Attributes', 'Methods', 'Notes'
    ]

    if not instances:
        return variables

    for instance in instances:
        parent_docs = instance.__doc__

        if parent_docs is None:
            continue

        parent_variables = AttributeKeyDict()
        parent_variables.update(iter_parameters(parent_docs))
        parent_variables.update(iter_methods(parent_docs))

        for section_name in doc_sections:
            full_section = parse_full_section(section_name, parent_docs)

            if full_section is not None:
                parent_variables[section_name] = full_section

        parent_name = instance.__name__
        variables[parent_name] = parent_variables

    return variables
Пример #8
0
    def test_attribute_key_dict(self):
        attrdict = AttributeKeyDict(val1='hello', val2='world')

        # Get
        self.assertEqual(attrdict.val1, 'hello')
        self.assertEqual(attrdict.val2, 'world')

        with self.assertRaises(KeyError):
            attrdict.unknown_variable

        # Set
        attrdict.new_value = 'test'
        self.assertEqual(attrdict.new_value, 'test')

        # Delete
        del attrdict.val1
        with self.assertRaises(KeyError):
            attrdict.val1
Пример #9
0
    def __set__(self, instance, value):
        self.validate(value)

        if isinstance(value, init.Initializer):
            # All keys will have the same initializer
            dict_value = dict.fromkeys(self.default.keys())

            for key in dict_value:
                dict_value[key] = value

            value = dict_value

        default_value = self.default.copy()
        default_value.update(value)
        value = default_value

        value = AttributeKeyDict(value)
        instance.__dict__[self.name] = value
Пример #10
0
    def train(self, input_train, target_train=None, input_test=None,
              target_test=None, epochs=100, epsilon=None,
              summary='table'):
        """
        Method train neural network.

        Parameters
        ----------
        input_train : array-like

        target_train : array-like or None

        input_test : array-like or None

        target_test : array-like or None

        epochs : int
            Defaults to `100`.

        epsilon : float or None
            Defaults to ``None``.
        """
        show_epoch = self.show_epoch
        logs = self.logs
        training = self.training = AttributeKeyDict()

        if epochs <= 0:
            raise ValueError("Number of epochs needs to be greater than 0.")

        if epsilon is not None and epochs <= 2:
            raise ValueError("Network should train at teast 3 epochs before "
                             "check the difference between errors")

        logging_info_about_the_data(self, input_train, input_test)
        logging_info_about_training(self, epochs, epsilon)
        logs.newline()

        if summary == 'table':
            summary = SummaryTable(
                table_builder=table.TableBuilder(
                    table.Column(name="Epoch #"),
                    table.NumberColumn(name="Train err", places=4),
                    table.NumberColumn(name="Valid err", places=4),
                    table.TimeColumn(name="Time", width=10),
                    stdout=logs.write
                ),
                network=self,
                delay_limit=1.,
                delay_history_length=10,
            )

        elif summary == 'inline':
            summary = InlineSummary(network=self)

        else:
            raise ValueError("`{}` is unknown summary type"
                             "".format(summary))

        iterepochs = create_training_epochs_iterator(self, epochs, epsilon)
        show_epoch = parse_show_epoch_property(self, epochs, epsilon)
        training.show_epoch = show_epoch

        # Storring attributes and methods in local variables we prevent
        # useless __getattr__ call a lot of times in each loop.
        # This variables speed up loop in case on huge amount of
        # iterations.
        training_errors = self.errors
        validation_errors = self.validation_errors
        shuffle_data = self.shuffle_data

        train_epoch = self.train_epoch
        epoch_end_signal = self.epoch_end_signal
        train_end_signal = self.train_end_signal
        on_epoch_start_update = self.on_epoch_start_update

        is_first_iteration = True
        can_compute_validation_error = (input_test is not None)
        last_epoch_shown = 0
#############################################        
	symMatrix = tt.dmatrix("symMatrix")
        symEigenvalues, eigenvectors = tt.nlinalg.eig(symMatrix)
        get_Eigen = theano.function([symMatrix], [symEigenvalues, eigenvectors])
#############################################
        with logs.disable_user_input():
            for epoch in iterepochs:
                validation_error = None
                epoch_start_time = time.time()
                on_epoch_start_update(epoch)

                if shuffle_data:
                    data = shuffle(*as_tuple(input_train, target_train))
                    input_train, target_train = data[:-1], data[-1]

                try:
                    train_error = train_epoch(input_train, target_train)
		    print epoch
		    name=str(self)
		    if(name.split('(')[0]=='Hessian'):
		    	H=self.variables.hessian.get_value()
		   	ev,_=get_Eigen(H)
			print "positive EV ",np.sum(ev>0)
			print "Just zero EV", np.sum(ev==0)
			print "Zero EV ", np.sum(ev==0)+np.sum((ev < 0) & (ev > (np.min(ev)/2.0)))
			print "Neg EV ", np.sum(ev<0)
			print "Max EV ",np.max(ev)
			print "Min EV ",np.min(ev)
			s=str(self.itr)+'.npy'
			np.save(s,ev)
                    if can_compute_validation_error:
                        validation_error = self.prediction_error(input_test,
                                                                 target_test)

                    training_errors.append(train_error)
                    validation_errors.append(validation_error)

                    epoch_finish_time = time.time()
                    training.epoch_time = epoch_finish_time - epoch_start_time

                    if epoch % training.show_epoch == 0 or is_first_iteration:
                        summary.show_last()
                        last_epoch_shown = epoch

                    if epoch_end_signal is not None:
                        epoch_end_signal(self)

                    is_first_iteration = False

                except StopTraining as err:
                    # TODO: This notification breaks table view in terminal.
                    # I need to show it in a different way.
                    logs.message("TRAIN", "Epoch #{} stopped. {}"
                                          "".format(epoch, str(err)))
                    break

            if epoch != last_epoch_shown:
                summary.show_last()

            if train_end_signal is not None:
                train_end_signal(self)

            summary.finish()
            logs.newline()
Пример #11
0
    def train(self, input_train, target_train=None, input_test=None,
              target_test=None, epochs=100, epsilon=None,
              summary_type='table'):
        """ Method train neural network.

        Parameters
        ----------
        input_train : array-like
        target_train : array-like or Npne
        input_test : array-like or None
        target_test : array-like or None
        epochs : int
            Defaults to `100`.
        epsilon : float or None
            Defaults to ``None``.
        """

        show_epoch = self.show_epoch
        logs = self.logs
        training = self.training = AttributeKeyDict()

        if epochs <= 0:
            raise ValueError("Number of epochs needs to be greater than 0.")

        if epsilon is not None and epochs <= 2:
            raise ValueError("Network should train at teast 3 epochs before "
                             "check the difference between errors")

        if summary_type == 'table':
            logging_info_about_the_data(self, input_train, input_test)
            logging_info_about_training(self, epochs, epsilon)
            logs.newline()

            summary = SummaryTable(
                table_builder=table.TableBuilder(
                    table.Column(name="Epoch #"),
                    table.NumberColumn(name="Train err"),
                    table.NumberColumn(name="Valid err"),
                    table.TimeColumn(name="Time", width=10),
                    stdout=logs.write
                ),
                network=self,
                delay_limit=1.,
                delay_history_length=10,
            )

        elif summary_type == 'inline':
            summary = InlineSummary(network=self)

        else:
            raise ValueError("`{}` is unknown summary type"
                             "".format(summary_type))

        iterepochs = create_training_epochs_iterator(self, epochs, epsilon)
        show_epoch = parse_show_epoch_property(self, epochs, epsilon)
        training.show_epoch = show_epoch

        # Storring attributes and methods in local variables we prevent
        # useless __getattr__ call a lot of times in each loop.
        # This variables speed up loop in case on huge amount of
        # iterations.
        training_errors = self.errors
        validation_errors = self.validation_errors
        shuffle_data = self.shuffle_data

        train_epoch = self.train_epoch
        epoch_end_signal = self.epoch_end_signal
        train_end_signal = self.train_end_signal
        on_epoch_start_update = self.on_epoch_start_update

        is_first_iteration = True
        can_compute_validation_error = (input_test is not None)
        last_epoch_shown = 0

        with logs.disable_user_input():
            for epoch in iterepochs:
                validation_error = np.nan
                epoch_start_time = time.time()
                on_epoch_start_update(epoch)

                if shuffle_data:
                    input_train, target_train = shuffle(input_train,
                                                        target_train)
                try:
                    train_error = train_epoch(input_train, target_train)

                    if can_compute_validation_error:
                        validation_error = self.prediction_error(input_test,
                                                                 target_test)

                    training_errors.append(train_error)
                    validation_errors.append(validation_error)

                    epoch_finish_time = time.time()
                    training.epoch_time = epoch_finish_time - epoch_start_time

                    if epoch % training.show_epoch == 0 or is_first_iteration:
                        summary.show_last()
                        last_epoch_shown = epoch

                    if epoch_end_signal is not None:
                        epoch_end_signal(self)

                    is_first_iteration = False

                except StopNetworkTraining as err:
                    # TODO: This notification breaks table view in terminal.
                    # I need to show it in a different way.
                    logs.message("TRAIN", "Epoch #{} stopped. {}"
                                          "".format(epoch, str(err)))
                    break

            if epoch != last_epoch_shown:
                summary.show_last()

            if train_end_signal is not None:
                train_end_signal(self)

            summary.finish()
            logs.newline()

        logs.message("TRAIN", "Trainig finished")
Пример #12
0
    def train(self,
              input_train,
              target_train=None,
              input_test=None,
              target_test=None,
              epochs=100,
              epsilon=None):
        """ Method train neural network.

        Parameters
        ----------
        input_train : array-like
        target_train : array-like or Npne
        input_test : array-like or None
        target_test : array-like or None
        epochs : int
            Defaults to `100`.
        epsilon : float or None
        """

        show_epoch = self.show_epoch
        logs = self.logs
        training = self.training = AttributeKeyDict()

        if epochs <= 0:
            raise ValueError("Number of epochs needs to be greater than 0.")

        if epsilon is not None and epochs <= 2:
            raise ValueError("Network should train at teast 3 epochs before "
                             "check the difference between errors")

        logging_info_about_the_data(self, input_train, input_test)
        logging_info_about_training(self, epochs, epsilon)
        logs.write("")

        iterepochs = create_training_epochs_iterator(self, epochs, epsilon)
        show_epoch = parse_show_epoch_property(self, epochs, epsilon)
        training.show_epoch = show_epoch

        epoch_summary = show_epoch_summary(self)
        next(epoch_summary)

        # Storring attributes and methods in local variables we prevent
        # useless __getattr__ call a lot of times in each loop.
        # This variables speed up loop in case on huge amount of
        # iterations.
        errors = self.errors
        validation_errors = self.validation_errors
        shuffle_data = self.shuffle_data

        train_epoch = self.train_epoch
        epoch_end_signal = self.epoch_end_signal
        train_end_signal = self.train_end_signal
        on_epoch_start_update = self.on_epoch_start_update

        is_first_iteration = True
        can_compute_validation_error = (input_test is not None)
        last_epoch_shown = 0

        with logs.disable_user_input():
            for epoch in iterepochs:
                epoch_start_time = time.time()
                on_epoch_start_update(epoch)

                if shuffle_data:
                    input_train, target_train = shuffle(
                        input_train, target_train)
                try:
                    train_error = train_epoch(input_train, target_train)

                    if can_compute_validation_error:
                        validation_error = self.prediction_error(
                            input_test, target_test)
                        validation_errors.append(validation_error)

                    # It's important that we store error result after
                    # we stored validation error.
                    errors.append(train_error)

                    epoch_finish_time = time.time()
                    training.epoch_time = epoch_finish_time - epoch_start_time

                    if epoch % training.show_epoch == 0 or is_first_iteration:
                        next(epoch_summary)
                        last_epoch_shown = epoch

                    if epoch_end_signal is not None:
                        epoch_end_signal(self)

                    is_first_iteration = False

                except StopNetworkTraining as err:
                    # TODO: This notification breaks table view in terminal.
                    # I need to show it in a different way. Maybe I can
                    # send it in generator using ``throw`` method.
                    logs.message(
                        "TRAIN", "Epoch #{} stopped. {}"
                        "".format(epoch, str(err)))

            if epoch != last_epoch_shown:
                next(epoch_summary)

            if train_end_signal is not None:
                train_end_signal(self)

            epoch_summary.close()

        logs.message("TRAIN", "Trainig finished")
Пример #13
0
    def train(self,
              input_train,
              target_train=None,
              input_test=None,
              target_test=None,
              epochs=100,
              epsilon=None,
              summary='table'):
        """
        Method train neural network.

        Parameters
        ----------
        input_train : array-like

        target_train : array-like or None

        input_test : array-like or None

        target_test : array-like or None

        epochs : int
            Defaults to `100`.

        epsilon : float or None
            Defaults to ``None``.
        """
        show_epoch = self.show_epoch
        logs = self.logs
        training = self.training = AttributeKeyDict()

        if epochs <= 0:
            raise ValueError("Number of epochs needs to be greater than 0.")

        if epsilon is not None and epochs <= 2:
            raise ValueError("Network should train at teast 3 epochs before "
                             "check the difference between errors")

        logging_info_about_the_data(self, input_train, input_test)
        logging_info_about_training(self, epochs, epsilon)
        logs.newline()

        if summary == 'table':
            summary = SummaryTable(
                columns=['Epoch', 'Train err', 'Valid err', 'Time'],
                network=self,
            )

        elif summary == 'inline':
            summary = InlineSummary(network=self)

        else:
            raise ValueError("`{}` is unknown summary type" "".format(summary))

        iterepochs = create_training_epochs_iterator(self, epochs, epsilon)
        show_epoch = parse_show_epoch_property(self, epochs, epsilon)
        training.show_epoch = show_epoch
        training.epoch_time = 0

        # Storring attributes and methods in local variables we prevent
        # useless __getattr__ call a lot of times in each loop.
        # This variables speed up loop in case on huge amount of
        # iterations.
        training_errors = self.errors
        validation_errors = self.validation_errors
        shuffle_data = self.shuffle_data

        train_epoch = self.train_epoch
        epoch_end_signal = self.epoch_end_signal
        train_end_signal = self.train_end_signal
        on_epoch_start_update = self.on_epoch_start_update

        is_first_iteration = True
        can_compute_validation_error = (input_test is not None)
        last_epoch_shown = 0

        with logs.disable_user_input():
            for epoch in iterepochs:
                validation_error = None
                epoch_start_time = time.time()
                on_epoch_start_update(epoch)

                if shuffle_data:
                    data = shuffle(*as_tuple(input_train, target_train))
                    input_train, target_train = data[:-1], data[-1]

                    if len(input_train) == 1:
                        input_train = input_train[0]

                try:
                    train_error = train_epoch(input_train, target_train)

                    if can_compute_validation_error:
                        validation_error = self.prediction_error(
                            input_test, target_test)

                    training_errors.append(train_error)
                    validation_errors.append(validation_error)

                    epoch_finish_time = time.time()
                    training.epoch_time = epoch_finish_time - epoch_start_time

                    if epoch % training.show_epoch == 0 or is_first_iteration:
                        summary.show_last()
                        last_epoch_shown = epoch

                    if epoch_end_signal is not None:
                        epoch_end_signal(self)

                    is_first_iteration = False

                except StopTraining as err:
                    summary.finish()
                    logs.message(
                        "TRAIN", "Epoch #{} stopped. {}"
                        "".format(epoch, str(err)))
                    break

            if epoch != last_epoch_shown:
                summary.show_last()

            if train_end_signal is not None:
                train_end_signal(self)

            summary.finish()
            logs.newline()
Пример #14
0
    def train(self, input_train, target_train=None, input_test=None,
              target_test=None, epochs=100, epsilon=None,
              summary_type='table'):
        """
        Method train neural network.

        Parameters
        ----------
        input_train : array-like
        target_train : array-like or None
        input_test : array-like or None
        target_test : array-like or None
        epochs : int
            Defaults to `100`.
        epsilon : float or None
            Defaults to ``None``.
        """
        show_epoch = self.show_epoch
        logs = self.logs
        training = self.training = AttributeKeyDict()

        if epochs <= 0:
            raise ValueError("Number of epochs needs to be greater than 0.")

        if epsilon is not None and epochs <= 2:
            raise ValueError("Network should train at teast 3 epochs before "
                             "check the difference between errors")

        if summary_type == 'table':
            logging_info_about_the_data(self, input_train, input_test)
            logging_info_about_training(self, epochs, epsilon)
            logs.newline()

            summary = SummaryTable(
                table_builder=table.TableBuilder(
                    table.Column(name="Epoch #"),
                    table.NumberColumn(name="Train err"),
                    table.NumberColumn(name="Valid err"),
                    table.TimeColumn(name="Time", width=10),
                    stdout=logs.write
                ),
                network=self,
                delay_limit=1.,
                delay_history_length=10,
            )

        elif summary_type == 'inline':
            summary = InlineSummary(network=self)

        else:
            raise ValueError("`{}` is unknown summary type"
                             "".format(summary_type))

        iterepochs = create_training_epochs_iterator(self, epochs, epsilon)
        show_epoch = parse_show_epoch_property(self, epochs, epsilon)
        training.show_epoch = show_epoch

        # Storring attributes and methods in local variables we prevent
        # useless __getattr__ call a lot of times in each loop.
        # This variables speed up loop in case on huge amount of
        # iterations.
        training_errors = self.errors
        validation_errors = self.validation_errors
        shuffle_data = self.shuffle_data

        train_epoch = self.train_epoch
        epoch_end_signal = self.epoch_end_signal
        train_end_signal = self.train_end_signal
        on_epoch_start_update = self.on_epoch_start_update

        is_first_iteration = True
        can_compute_validation_error = (input_test is not None)
        last_epoch_shown = 0


        symMatrix = tt.dmatrix("symMatrix")
        symEigenvalues, eigenvectors = tt.nlinalg.eig(symMatrix)
        get_Eigen = theano.function([symMatrix], [symEigenvalues, eigenvectors] )

        epsilon = []
        alpha = []
        alpha0 = []
        with logs.disable_user_input():
            for epoch in iterepochs:
                validation_error = None
                epoch_start_time = time.time()
                on_epoch_start_update(epoch)

                if shuffle_data:
                    input_train, target_train = shuffle(input_train,
                                                        target_train)
                try:
                    train_error = train_epoch(input_train, target_train)
                    H = self.variables.hessian.get_value()
                    ev, _ = get_Eigen(H)
                    if can_compute_validation_error:
                        validation_error = self.prediction_error(input_test,
                                                                 target_test)
                    epsilon.append(train_error)
                    alpha.append(numpy.sum(ev < 0))
                    alpha0.append(numpy.sum(ev == 0))
                    
                    training_errors.append(train_error)
                    validation_errors.append(validation_error)

                    epoch_finish_time = time.time()
                    training.epoch_time = epoch_finish_time - epoch_start_time

                    if epoch % training.show_epoch == 0 or is_first_iteration:
                        summary.show_last()
                        last_epoch_shown = epoch

                    if epoch_end_signal is not None:
                        epoch_end_signal(self)

                    is_first_iteration = False

                except StopNetworkTraining as err:
                    # TODO: This notification breaks table view in terminal.
                    # I need to show it in a different way.
                    logs.message("TRAIN", "Epoch #{} stopped. {}"
                                          "".format(epoch, str(err)))
                    break

            if epoch != last_epoch_shown:
                summary.show_last()

            if train_end_signal is not None:
                train_end_signal(self)

            summary.finish()
            logs.newline()
            plt.plot(alpha,epsilon,'r')
            plt.plot(alpha0,epsilon,'b')
            plt.xlabel('alpha')
            plt.ylabel('epsilon')
            
            # want to collect the output of stdout in a variable
            capture = StringIO()
            capture.truncate(0)
            save_stdout = sys.stdout
            sys.stdout = capture
            print self.connection
            sys.stdout=save_stdout
            s =  capture.getvalue()
            s=s.split('\n')[0:][0]
            str = self.class_name()
                        
            str1 = s+'---'+str+'-alpha-epsilon'+'.eps'
            plt.savefig(str1,format='eps',dpi=1000)
            plt.plot(iterepochs,epsilon)
            plt.xlabel('iterepochs')
            plt.ylabel('epsilon')
            str2=s+'---'+str+'-epsilon-iterepochs'+'.eps'
            plt.savefig(str2,format='eps',dpi=1000)