コード例 #1
0
ファイル: training.py プロジェクト: paulfun92/simplelearn
    def __init__(self, inputs, input_iterator, epoch_callbacks):
        '''
        Parameters
        ----------

        inputs: sequence of theano.gof.Variables
          Symbols for the outputs of the input_iterator.

        input_iterator: simplelearn.data.DataIterator
          Yields tuples of validation set batches, such as (values, labels).

        epoch_callbacks: Sequence of EpochCallbacks.
        '''

        #
        # Checks inputs
        #

        assert_is_instance(inputs, Sequence)
        assert_all_is_instance(inputs, theano.gof.Variable)

        assert_is_instance(input_iterator, DataIterator)
        assert_true(input_iterator.next_is_new_epoch())

        assert_is_instance(epoch_callbacks, Sequence)
        assert_greater(len(epoch_callbacks), 0)
        assert_all_is_instance(epoch_callbacks, EpochCallback)

        #
        # Sets members
        #

        self._input_iterator = input_iterator

        symbols_to_compute = []
        update_pairs = OrderedDict()

        for epoch_callback in epoch_callbacks:
            if isinstance(epoch_callback, IterationCallback):
                callback_symbols = [node.output_symbol for node
                                    in epoch_callback.nodes_to_compute]
                symbols_to_compute.extend(callback_symbols)
                update_pairs.update(epoch_callback.update_pairs)

        if len(update_pairs) > 0:
            warnings.warn("Are you sure you meant to pass IterationCallbacks "
                          "with update pairs to a ValidationCallback? "
                          "ValidationCallbacks are generally supposed to "
                          "operate without side-effects.")

        if any(not isinstance(c, IterationCallback) for c in epoch_callbacks):
            warnings.warn("It's rare to pass in a non-IterationCallback to "
                          "ValidationCallback. Did you mean to do this?")

        self._epoch_callbacks = epoch_callbacks

        self._update_function = theano.function(inputs,
                                                symbols_to_compute,
                                                updates=update_pairs)
コード例 #2
0
    def __init__(self,
                 inputs,
                 input_iterator,
                 parameters_updaters,
                 parameters,
                 callbacks,
                 theano_function_mode=None):

        '''
        Parameters
        ----------

        inputs: sequence of Nodes.
          Symbols for the outputs of the input_iterator.
          These should come from input_iterator.make_input_nodes()

        input_iterator: simplelearn.data.DataIterator
          Yields tuples of training set batches, such as (values, labels).

        callbacks: Sequence of EpochCallbacks
          This includes subclasses like IterationCallback &
          ParameterUpdater. One of these callbacks must throw a StopTraining
          exception for the training to halt.

        theano_function_mode: theano.compile.Mode
          Optional. The 'mode' argument to pass to theano.function().
          An example: pylearn2.devtools.nan_guard.NanGuard()
        '''

        #
        # sanity-checks the arguments.
        #

        assert_all_is_instance(inputs, Node)
        assert_is_instance(input_iterator, DataIterator)
        assert_true(input_iterator.next_is_new_epoch())

        for (input,
             iterator_input) in safe_izip(inputs,
                                          input_iterator.make_input_nodes()):
            assert_equal(input.output_format, iterator_input.output_format)

        assert_equal(len(callbacks),
                     len(frozenset(callbacks)),
                     "There were duplicate callbacks.")

        assert_all_is_instance(callbacks, EpochCallback)

        #
        # Sets members
        #

        self._inputs = inputs
        self._input_iterator = input_iterator
        self._theano_function_mode = theano_function_mode
        self.epoch_callbacks = list(callbacks)
        self._train_called = False
        self.parameter_updaters = parameters_updaters
        self.parameters = parameters
コード例 #3
0
    def __init__(self,
                inputs,
                input_iterator,
                parameters,
                old_parameters,
                parameter_updaters,
                iterator_full_gradient,
                epoch_callbacks,
                theano_function_mode=None):


        #
        # sanity-checks the arguments.
        #

        assert_all_is_instance(inputs, Node)
        assert_is_instance(input_iterator, DataIterator)
        assert_true(input_iterator.next_is_new_epoch())

        for (input,
             iterator_input) in safe_izip(inputs,
                                          input_iterator.make_input_nodes()):
            assert_equal(input.output_format, iterator_input.output_format)

        assert_equal(len(epoch_callbacks),
                     len(frozenset(epoch_callbacks)),
                     "There were duplicate callbacks.")

        assert_all_is_instance(epoch_callbacks, EpochCallback)


        #
        # Sets members
        #

        self._input_iterator = input_iterator
        self._parameters = tuple(parameters)
        self._old_parameters = tuple(old_parameters)
        self._parameter_updaters = tuple(parameter_updaters)
        self._theano_function_mode = theano_function_mode
        self._inputs = tuple(inputs)

        input_symbols = [i.output_symbol for i in self._inputs]

        self.epoch_callbacks = tuple(epoch_callbacks)

        self._train_called = False

        self.new_epoch = True
        self.method = self._parameter_updaters[0].method
        self.update_function = self._compile_update_function(input_symbols)
        self.full_gradient_function = self._compile_full_gradient_update_function(input_symbols)

        self.full_gradient_iterator = iterator_full_gradient
        total_size_dataset = self.full_gradient_iterator.dataset.tensors[0].shape[0]
        batch_size = self.full_gradient_iterator.batch_size
        self.batches_in_epoch_full = total_size_dataset/batch_size
コード例 #4
0
    def __init__(
        self, inputs, parameters, gradient, learning_rate, training_iterator, scalar_loss, batch_size, epoch_callbacks
    ):

        #
        # sanity-checks the arguments.
        #

        assert_all_is_instance(inputs, Node)
        assert_is_instance(training_iterator, DataIterator)
        # assert_true(training_iterator.next_is_new_epoch())

        """
            for (input,
                 iterator_input) in safe_izip(inputs,
                                              training_iterator.make_input_nodes()):
                assert_equal(input.output_format, iterator_input.output_format)
            """

        assert_equal(len(epoch_callbacks), len(frozenset(epoch_callbacks)), "There were duplicate callbacks.")

        assert_all_is_instance(epoch_callbacks, EpochCallback)

        #
        # Sets members
        #

        self.parameters = parameters
        self.training_iterator = training_iterator
        self.learning_rate = learning_rate

        input_symbols = [i.output_symbol for i in inputs]

        self.epoch_callbacks = tuple(epoch_callbacks)

        self._train_called = False

        self.gradient_function = theano.function(input_symbols, gradient)
        self.loss_function = theano.function(input_symbols, scalar_loss)

        self.new_epoch = True
        #        self.method = self._parameter_updaters[0].method

        total_size_dataset = self.training_iterator.dataset.tensors[0].shape[0]
        self.batches_in_epoch = total_size_dataset / batch_size

        batch_size_for_calculation = self.training_iterator.batch_size
        assert_less_equal(batch_size_for_calculation, batch_size)

        self.calculating_gradient_steps = batch_size / batch_size_for_calculation
コード例 #5
0
ファイル: __init__.py プロジェクト: SuperElectric/poselearn
def load_h5_saveables(group, sequence, **kwargs):
    '''
    Loads the states of a sequence of H5Saveables.

    The elements' names will be expected to be '0', '1', etc.
    '''
    assert_all_is_instance(sequence, H5Saveable)
    assert_is_instance(group, h5py.Group)
    assert_equal(len(group), len(sequence))

    assert_equal(group.attrs['class'], 'A sequence of H5Saveables')

    for i, element in enumerate(sequence):
        element.load_from_h5(group[str(i)], **kwargs)
コード例 #6
0
ファイル: __init__.py プロジェクト: SuperElectric/poselearn
def save_h5_saveables(sequence, group):
    '''
    Saves the states of a sequence of H5Saveables to an empty group.

    The elements will be added to the group in order with names '0', '1', etc.
    '''
    assert_all_is_instance(sequence, H5Saveable)
    assert_is_instance(group, h5py.Group)
    assert_equal(len(group), 0)

    group.attrs['class'] = 'A sequence of H5Saveables'

    for i, element in enumerate(sequence):
        element.save_to_h5(group.create_group(str(i)))
コード例 #7
0
ファイル: memmap_dataset.py プロジェクト: imclab/simplelearn
def make_memmap_file(path, num_examples, tensor_names, tensor_formats):
    '''
    Allocates a memmap file on disk. Overwrites if necessary.

    Parameters
    ----------

    path: str
      Path to file.

    num_examples: int
      # of examples in this dataset.

    tensor_names: Iterable of strings
      Names of the tensors in this dataset.

    tensor_formats: Iterable of simplelearn.format.DenseFormats
      Formats of the tensors. MemmapDataset requires that the batch axis be the
      first axis.
    '''
    assert_is_instance(path, basestring)
    assert_greater(num_examples, 0)
    assert_equal(len(tensor_names), len(tensor_formats))

    assert_all_is_instance(tensor_names, basestring)
    assert_all_is_instance(tensor_formats, DenseFormat)

    # We store datasets in a single structured array, so the batch axis must
    # be the first axis for all tensors.
    for tensor_format in tensor_formats:
        assert_equal(tensor_format.axes.index('b'), 0)

    dtype_dict = {
        'names': tensor_names,
        'formats': [(fmt.dtype, fmt.shape[1:]) for fmt in tensor_formats],
        'titles': [fmt.axes for fmt in tensor_formats]}

    memmap_file = numpy.lib.format.open_memmap(path,
                                               mode='w+',
                                               dtype=dtype_dict,
                                               shape=(num_examples, ))

    return memmap_file
コード例 #8
0
ファイル: __init__.py プロジェクト: SuperElectric/poselearn
def __check_arg_types(input_node,
                      yaml_dict,
                      use_dropout,
                      numpy_rng,
                      theano_rng,
                      output_list,
                      output_size=None):
    assert_is_instance(input_node, Node)
    assert_is_instance(yaml_dict, dict)
    assert_is_instance(use_dropout, bool)
    assert_is_instance(numpy_rng, numpy.random.RandomState)

    if use_dropout:
        assert_is_instance(theano_rng, RandomStreams)
    else:
        assert_true(theano_rng is None or isinstance(theano_rng, RandomStreams))

    assert_is_instance(output_list, list)
    assert_all_is_instance(output_list, Node)

    if output_size is not None:
        assert_integer(output_size)
コード例 #9
0
    def train(self):
        '''
        Runs training until a StopTraining exception is raised.

        Training runs indefinitely until one of self.epoch_callbacks raises
        a StopTraining exception.
        '''

        if self._train_called:
            raise RuntimeError("train() has already been called on this %s. "
                               "Re-running train() risks inadvertently "
                               "carrying over implicit state from the "
                               "previous training run, such as the direction "
                               "of parameter updates (via the momentum "
                               "term), or the internal state of the Monitors "
                               "or EpochCallbacks. Instead, instantiate a new "
                               "copy of this %s and run train() on that." %
                               (type(self), type(self)))

        self._train_called = True

        if len(self.epoch_callbacks) == 0:
            raise RuntimeError("self.epoch_callbacks is empty, so Sgd will "
                               "iterate through the training data forever. "
                               "Please add an EpochCallback that will throw a "
                               "StopTraining exception at some point.")

        assert_all_is_instance(self.epoch_callbacks, EpochCallback)

        #
        # End sanity checks
        #

        # Overlaps with self.epoch_callbacks
        iteration_callbacks = [e for e in self.epoch_callbacks
                       if (isinstance(e, IterationCallback) and not isinstance(e, EpochTimer2))]

        try:

            for epoch_callback in self.epoch_callbacks:
                epoch_callback.on_start_training()

            # Set initial parameters for SemiSGD:
            # max_stochastic_steps_per_epoch = max # of stochastic steps per epoch
            # v = lower bound on the constant of the strongly convex loss function
            # stochastic_steps = # of stochastic steps taken in an epoch, calculated geometrically.

            total_size_dataset = self._input_iterator.dataset.tensors[0].shape[0]
            batch_size = self._input_iterator.batch_size
            self.stochastic_steps = total_size_dataset/batch_size
            epoch_counter = 1

            if self.method == 'S2GD':
                max_stochastic_steps_per_epoch = total_size_dataset/batch_size
                v = 0.05
                learning_rate = self._parameter_updaters[0].learning_rate.get_value()
                # Calculate the sum of the probabilities for geometric distribution:
                sum = 0
                for t in range(1,max_stochastic_steps_per_epoch+1):
                    add = pow((1-v*learning_rate),(max_stochastic_steps_per_epoch - t))
                    sum = sum + add

            while True:

                if self.method == 'S2GD':
                    # Determine # of stochastic steps taken in the epoch:

                    cummulative_prob = 0
                    rand = numpy.random.uniform(0,1)
                    for t in range(1,max_stochastic_steps_per_epoch+1):
                        prob = pow((1-v*learning_rate),(max_stochastic_steps_per_epoch - t)) / sum
                        cummulative_prob = cummulative_prob + prob
                        if  rand < cummulative_prob:
                            self.stochastic_steps = t
                            break

                # Run the semi-stochastic gradient descent main loop
                for t in range(self.stochastic_steps):
                    # Now take a step:
                    all_callback_outputs = self.semi_sgd_step(epoch_counter)

                    # calls iteration_callbacks' on_iteration() method, passing
                    # in their output values, if any.
                    output_index = 0
                    for iteration_callback in iteration_callbacks:
                        num_outputs = len(iteration_callback.nodes_to_compute)
                        new_output_index = output_index + num_outputs

                        assert_less_equal(new_output_index,
                                          len(all_callback_outputs))

                        outputs = \
                            all_callback_outputs[output_index:new_output_index]

                        iteration_callback.on_iteration(outputs)

                        output_index = new_output_index

                    assert_equal(output_index, len(all_callback_outputs))

                    # if we've iterated through an epoch, call epoch_callbacks'
                    # on_epoch() methods.
                    #if self._input_iterator.next_is_new_epoch():
                for epoch_callback in self.epoch_callbacks:
                    x = epoch_callback.on_epoch()

                self.epoch_callbacks[-1].callbacks[0](x, None)
                self.new_epoch = True
                epoch_counter += 1

        except StopTraining, exception:
            if exception.status == 'ok':
                print("Training halted normally with message: {}".format(
                    exception.message))
                return
            else:
                raise
コード例 #10
0
    def train(self):
        """
        Runs training until a StopTraining exception is raised.

        Training runs indefinitely until one of self.epoch_callbacks raises
        a StopTraining exception.
        """

        if self._train_called:
            raise RuntimeError(
                "train() has already been called on this %s. "
                "Re-running train() risks inadvertently "
                "carrying over implicit state from the "
                "previous training run, such as the direction "
                "of parameter updates (via the momentum "
                "term), or the internal state of the Monitors "
                "or EpochCallbacks. Instead, instantiate a new "
                "copy of this %s and run train() on that." % (type(self), type(self))
            )

        self._train_called = True

        if len(self.epoch_callbacks) == 0:
            raise RuntimeError(
                "self.epoch_callbacks is empty, so Sgd will "
                "iterate through the training data forever. "
                "Please add an EpochCallback that will throw a "
                "StopTraining exception at some point."
            )

        assert_all_is_instance(self.epoch_callbacks, EpochCallback)

        #
        # End sanity checks
        #

        # Overlaps with self.epoch_callbacks
        iteration_callbacks = [c for c in self.epoch_callbacks if isinstance(c, IterationCallback)]

        try:

            for epoch_callback in self.epoch_callbacks:
                epoch_callback.on_start_training()

            while True:

                for _ in range(self.batches_in_epoch):

                    self.SGD_step()

                print(" ")

                # Epoch callbacks after epoch
                for epoch_callback in self.epoch_callbacks:
                    x = epoch_callback.on_epoch()

                self.epoch_callbacks[-1].callbacks[0](x, None)

        except StopTraining, exception:
            if exception.status == "ok":
                print("Training halted normally with message: {}".format(exception.message))
                return
            else:
                raise
コード例 #11
0
    def train(self):
        '''
        Runs training until a StopTraining exception is raised.

        Training runs indefinitely until one of self.epoch_callbacks raises
        a StopTraining exception.
        '''

        if self._train_called:
            raise RuntimeError("train() has already been called on this %s. "
                               "Re-running train() risks inadvertently "
                               "carrying over implicit state from the "
                               "previous training run, such as the direction "
                               "of parameter updates (via the momentum "
                               "term), or the internal state of the Monitors "
                               "or EpochCallbacks. Instead, instantiate a new "
                               "copy of this %s and run train() on that." %
                               (type(self), type(self)))

        self._train_called = True

        if len(self.epoch_callbacks) + len(self._monitors) == 0:
            raise RuntimeError("self._monitors and self.epoch_callbacks are "
                               "both empty, so this will "
                               "iterate through the training data forever. "
                               "Please add an EpochCallback or "
                               "Monitor that will throw a "
                               "StopTraining exception at some point.")

        assert_all_is_instance(self.epoch_callbacks, EpochCallback)

        #
        # End sanity checks
        #

        update_function = self._compile_update_function()


        iteration_callbacks = [c for c in self.epoch_callbacks
                               if isinstance(c, IterationCallback)]

        try:
            for epoch_callback in self.epoch_callbacks:
                epoch_callback.on_start_training()

            while True:

                # gets batch of data
                cost_arguments = self._input_iterator.next()

                # fprop-bprop, updates parameters
                # pylint: disable=star-args
                outputs = self._update_function(*cost_arguments)

                # updates monitors
                output_index = 0
                for monitor in self._monitors:
                    new_output_index = (output_index +
                                        len(monitor.monitored_values))
                    assert_less_equal(new_output_index, len(outputs))
                    monitored_values = outputs[output_index:new_output_index]

                    monitor.on_batch(cost_arguments, monitored_values)

                    output_index = new_output_index

                # calls epoch callbacks, if we've iterated through an epoch
                if self._input_iterator.next_is_new_epoch():
                    for callback in all_callbacks:
                        callback.on_epoch()

        except StopTraining, exception:
            if exception.status == 'ok':
                print("Stopped training with message: %s" % exception.message)
                return
            else:
                raise
コード例 #12
0
    def __init__(self,
                inputs,
                parameters,
                gradient,
                learning_rate,
                training_iterator,
                validation_iterator,
                scalar_loss,
                armijo,
                tangent,
                batch_size,
                epoch_callbacks,
                param_shapes=None):


        #
        # sanity-checks the arguments.
        #

        assert_all_is_instance(inputs, Node)
        assert_is_instance(training_iterator, DataIterator)
        #assert_true(training_iterator.next_is_new_epoch())

        '''
        for (input,
             iterator_input) in safe_izip(inputs,
                                          training_iterator.make_input_nodes()):
            assert_equal(input.output_format, iterator_input.output_format)
        '''

        assert_equal(len(epoch_callbacks),
                     len(frozenset(epoch_callbacks)),
                     "There were duplicate callbacks.")

        assert_all_is_instance(epoch_callbacks, EpochCallback)


        #
        # Sets members
        #

        self.armijo = armijo
        self.tangent = tangent
        self.parameters = parameters
        self.training_iterator = training_iterator
        self.validation_iterator = validation_iterator
        self.learning_rate = learning_rate

        input_symbols = [i.output_symbol for i in inputs]

        self.epoch_callbacks = tuple(epoch_callbacks)

        self._train_called = False

        self.classification_errors = numpy.asarray([])

        self.gradient_function = theano.function(input_symbols,gradient)

        '''
        output_symbols = []

        iteration_callbacks = [e for e in self.epoch_callbacks
                               if isinstance(e, IterationCallback)]

        for iteration_callback in iteration_callbacks:
            for node_to_compute in iteration_callback.nodes_to_compute:
                output_symbols.append(node_to_compute.output_symbol)

        self.function_outputs = theano.function(input_symbols, output_symbols)

        self.full_training_iterator = training_set.iterator(iterator_type='sequential',
                                                        loop_style='divisible',
                                                        batch_size=50000)
        '''

        '''
        self.full_training_iterator = training_set.iterator(iterator_type='sequential',
                                                        loop_style='divisible',
                                                        batch_size=10000)

        self.training_iterator2 = training_set.iterator(iterator_type='sequential',
                                                        loop_style='divisible',
                                                        batch_size=1000)
        '''

        self.loss_function = theano.function(input_symbols,scalar_loss)

        self.new_epoch = True
#        self.method = self._parameter_updaters[0].method

        self.param_shapes = param_shapes
        # Initialize saved variables:
        self.y = []
        self.s = []
        self.rho = []
        self.grad_log = []
        self.k = 0 #counter

        '''
        total_size_dataset_full = self.full_training_iterator.dataset.tensors[0].shape[0]
        batch_size_full =self.full_training_iterator.batch_size
        self.batches_in_epoch_full_gradient = total_size_dataset_full / batch_size_full
        '''

        total_size_dataset = self.training_iterator.dataset.tensors[0].shape[0]
        self.batches_in_epoch = total_size_dataset / batch_size

        batch_size_for_calculation =self.training_iterator.batch_size
        assert_less_equal(batch_size_for_calculation, batch_size)

        self.calculating_gradient_steps = batch_size / batch_size_for_calculation

        total_size_validation_dataset = self.validation_iterator.dataset.tensors[0].shape[0]
        batch_size_validation = self.validation_iterator.batch_size
        self.batches_in_epoch_validation = total_size_validation_dataset / batch_size_validation

        if self.armijo == True:
            self.function_value = theano.function(input_symbols, scalar_loss)
            self.validation_function_value_log = []
            validation_function_value = self.get_validation_function_value()
            self.validation_function_value_log.append(validation_function_value)
コード例 #13
0
    def train(self):
        '''
        Runs training until a StopTraining exception is raised.

        Training runs indefinitely until one of self.epoch_callbacks raises
        a StopTraining exception.
        '''

        if self._train_called:
            raise RuntimeError("train() has already been called on this %s. "
                               "Re-running train() risks inadvertently "
                               "carrying over implicit state from the "
                               "previous training run, such as the direction "
                               "of parameter updates (via the momentum "
                               "term), or the internal state of the Monitors "
                               "or EpochCallbacks. Instead, instantiate a new "
                               "copy of this %s and run train() on that." %
                               (type(self), type(self)))

        self._train_called = True

        if len(self.epoch_callbacks) == 0:
            raise RuntimeError("self.epoch_callbacks is empty, so Sgd will "
                               "iterate through the training data forever. "
                               "Please add an EpochCallback that will throw a "
                               "StopTraining exception at some point.")


        assert_all_is_instance(self.epoch_callbacks, EpochCallback)

        #
        # End sanity checks
        #

        update_function = self._compile_update_function()
        update_function2 = self._compile_update_function2()

        # Overlaps with self.epoch_callbacks
        iteration_callbacks = [e for e in self.epoch_callbacks
                               if (isinstance(e, IterationCallback) and not isinstance(e, EpochTimer2))]

        try:
            for epoch_callback in self.epoch_callbacks:
                epoch_callback.on_start_training()

            in_batch_counter = 0
            iteration_counter = 0
            while True:

                if in_batch_counter == 0:
                    cost_arguments = self._input_iterator.next()
                    all_callback_outputs = update_function2(*cost_arguments)
                else:
                    all_callback_outputs = update_function(*cost_arguments)

                if in_batch_counter > 2:
                    # get new batch of data
                    in_batch_counter = -1
                    self.parameter_updater.previous_gradient.set_value(numpy.zeros(self.parameter_updater.param_length, dtype = theano.config.floatX))
                    self.parameter_updater.previous_direction.set_value(numpy.zeros(self.parameter_updater.param_length, dtype = theano.config.floatX))

                '''
                print(self.parameter_updater.previous_direction.get_value())
                print(numpy.dot(self.parameter_updater.previous_direction.get_value(),self.parameter_updater.previous_direction.get_value()))
                print(self.parameter_updater.previous_gradient.get_value())
                '''
                in_batch_counter += 1
                iteration_counter += 1

                # calls iteration_callbacks' on_iteration() method, passing
                # in their output values, if any.
                output_index = 0
                for iteration_callback in iteration_callbacks:
                    num_outputs = len(iteration_callback.nodes_to_compute)
                    new_output_index = output_index + num_outputs

                    assert_less_equal(new_output_index,
                                      len(all_callback_outputs))

                    outputs = \
                        all_callback_outputs[output_index:new_output_index]

                    iteration_callback.on_iteration(outputs)

                    output_index = new_output_index

                assert_equal(output_index, len(all_callback_outputs))

                # if we've iterated through an epoch, call epoch_callbacks'
                # on_epoch() methods.
                #if self._input_iterator.next_is_new_epoch():
                if iteration_counter >= 500:
                    for epoch_callback in self.epoch_callbacks:
                        x = epoch_callback.on_epoch()

                    self.epoch_callbacks[-1].callbacks[0](x, None)
                    iteration_counter = 0

        except StopTraining, exception:
            if exception.status == 'ok':
                print("Training halted normally with message: {}".format(
                    exception.message))
                return
            else:
                raise
    def train(self):
        '''
        Runs training until a StopTraining exception is raised.

        Training runs indefinitely until one of self.epoch_callbacks raises
        a StopTraining exception.
        '''

        if self._train_called:
            raise RuntimeError("train() has already been called on this %s. "
                               "Re-running train() risks inadvertently "
                               "carrying over implicit state from the "
                               "previous training run, such as the direction "
                               "of parameter updates (via the momentum "
                               "term), or the internal state of the Monitors "
                               "or EpochCallbacks. Instead, instantiate a new "
                               "copy of this %s and run train() on that." %
                               (type(self), type(self)))

        self._train_called = True

        if len(self.epoch_callbacks) == 0:
            raise RuntimeError("self.epoch_callbacks is empty, so Sgd will "
                               "iterate through the training data forever. "
                               "Please add an EpochCallback that will throw a "
                               "StopTraining exception at some point.")

        assert_all_is_instance(self.epoch_callbacks, EpochCallback)

        #
        # End sanity checks
        #

        # Overlaps with self.epoch_callbacks
        iteration_callbacks = [c for c in self.epoch_callbacks
                               if isinstance(c, IterationCallback)]

        try:

            for epoch_callback in self.epoch_callbacks:
                epoch_callback.on_start_training()

            epoch_counter = 0
            while True:

                for _ in range(self.batches_in_epoch):

                    self.LBFGS_step()
                    #self.learning_rate = self.learning_rate * 0.9


                '''
                self.validation_function_value = self.get_validation_function_value()
                #self.validation_function_value_log.append(validation_function_value)

                for epoch_callback in self.epoch_callbacks:
                    epoch_callback.on_epoch()

                for _ in range(batches_in_epoch):
                    self.LBFGS_step()
                '''

                '''
                cost_arguments = self.full_training_iterator.next()
                all_callback_outputs = self.function_outputs(*cost_arguments)

                output_index = 0
                for iteration_callback in iteration_callbacks:
                    num_outputs = len(iteration_callback.nodes_to_compute)
                    new_output_index = output_index + num_outputs

                    assert_less_equal(new_output_index,
                                      len(all_callback_outputs))

                    outputs = \
                        all_callback_outputs[output_index:new_output_index]

                    iteration_callback.on_iteration(outputs)

                    output_index = new_output_index

                assert_equal(output_index, len(all_callback_outputs))
                '''
                print(" ")

                #new_validation_value = self.get_validation_function_value()
                #print("The new validation value is: ", new_validation_value)
                '''
                cost_args = self.full_training_iterator.next()
                print(self.loss_function(*cost_args))
                '''

                # if we've iterated through an epoch, call epoch_callbacks'
                # on_epoch() methods.
                #if self.training_iterator.next_is_new_epoch():
                for epoch_callback in self.epoch_callbacks:
                    x = epoch_callback.on_epoch()

                self.epoch_callbacks[-1].callbacks[0](x, None)

                print("Batch size used was: ", self.batch_size)
                print("Batch size for calculation was: ", self.batch_size_for_calculation)

                # Double batch-size:
                #if not (epoch_counter > 2 and epoch_counter < 6):
                self.batch_size = 2 * self.batch_size

                if self.batch_size >= 8000:
                    self.batch_size = 6250

                '''
                elif self.batch_size >= 50000:
                    self.batch_size == 50000
                    self.armijo = True
                '''

                self.batches_in_epoch = self.total_size_dataset / self.batch_size

                self.batch_size_for_calculation =self.training_iterator.batch_size
                assert_less_equal(self.batch_size_for_calculation, self.batch_size)

                self.calculating_gradient_steps = self.batch_size / self.batch_size_for_calculation

                #Reset statistics before new batch is used:
                self.y = []
                self.s = []
                self.rho = []
                self.grad_log = []
                self.k = 0 #counter
                self.current_batch = []
                self.current_batch_counter = 0

                '''
                for _ in range(batches_in_epoch):
                    self.SGD_step_armijo()

                for epoch_callback in self.epoch_callbacks:
                    epoch_callback.on_epoch()
                '''
                epoch_counter += 1


        except StopTraining, exception:
            if exception.status == 'ok':
                print("Training halted normally with message: {}".format(
                    exception.message))
                return
            else:
                raise
    def __init__(self,
                 inputs,
                 parameters,
                 gradient,
                 learning_rate,
                 training_iterator,
                 validation_iterator,
                 scalar_loss,
                 armijo,
                 tangent,
                 method,
                 batch_size,
                 epoch_callbacks,
                 param_shapes=None):


        #
        # sanity-checks the arguments.
        #

        assert_all_is_instance(inputs, Node)
        assert_is_instance(training_iterator, DataIterator)
        #assert_true(training_iterator.next_is_new_epoch())

        '''
        for (input,
             iterator_input) in safe_izip(inputs,
                                          training_iterator.make_input_nodes()):
            assert_equal(input.output_format, iterator_input.output_format)
        '''

        assert_equal(len(epoch_callbacks),
                     len(frozenset(epoch_callbacks)),
                     "There were duplicate callbacks.")

        assert_all_is_instance(epoch_callbacks, EpochCallback)

#
        # Sets members
        #

        self.armijo = armijo
        self.tangent = tangent
        self.parameters = parameters
        self.training_iterator = training_iterator
        self.validation_iterator = validation_iterator
        self.learning_rate = learning_rate
        self.method = method

        input_symbols = [i.output_symbol for i in inputs]
        self.epoch_callbacks = tuple(epoch_callbacks)

        self._train_called = False

        self.classification_errors = numpy.asarray([])
        self.gradient_function = theano.function(input_symbols,gradient)
        self.loss_function = theano.function(input_symbols,scalar_loss)

        self.new_epoch = True
#        self.method = self._parameter_updaters[0].method

        self.param_shapes = param_shapes
        # Initialize saved variables:
        self.k = 0 #counter

        '''
        total_size_dataset_full = self.full_training_iterator.dataset.tensors[0].shape[0]
        batch_size_full =self.full_training_iterator.batch_size
        self.batches_in_epoch_full_gradient = total_size_dataset_full / batch_size_full
        '''

        total_size_dataset = self.training_iterator.dataset.tensors[0].shape[0]
        self.batches_in_epoch = total_size_dataset / batch_size

        batch_size_for_calculation =self.training_iterator.batch_size
        assert_less_equal(batch_size_for_calculation, batch_size)

        self.calculating_gradient_steps = batch_size / batch_size_for_calculation

        total_size_validation_dataset = self.validation_iterator.dataset.tensors[0].shape[0]
        batch_size_validation = self.validation_iterator.batch_size
        self.batches_in_epoch_validation = total_size_validation_dataset / batch_size_validation

        if self.armijo == True:
            self.function_value = theano.function(input_symbols, scalar_loss)
        #self.validation_function_value_log = []
        #validation_function_value = self.get_validation_function_value()
        #self.validation_function_value_log.append(validation_function_value)

        self.previous_gradient = 0
        self.previous_direction = 0
        self.current_batch = None
        self.current_batch_counter = 0
コード例 #16
0
    def __init__(self,
                inputs,
                parameters,
                old_parameters,
                gradient,
                learning_rate,
                training_iterator,
                training_set,
                scalar_loss,
                epoch_callbacks):


        #
        # sanity-checks the arguments.
        #

        assert_all_is_instance(inputs, Node)
        assert_is_instance(training_iterator, DataIterator)
        #assert_true(training_iterator.next_is_new_epoch())

        '''
        for (input,
             iterator_input) in safe_izip(inputs,
                                          training_iterator.make_input_nodes()):
            assert_equal(input.output_format, iterator_input.output_format)
        '''

        assert_equal(len(epoch_callbacks),
                     len(frozenset(epoch_callbacks)),
                     "There were duplicate callbacks.")

        assert_all_is_instance(epoch_callbacks, EpochCallback)


        #
        # Sets members
        #

        self.parameters = parameters
        self.old_parameters = old_parameters
        self.training_iterator = training_iterator
        self.learning_rate = learning_rate

        input_symbols = [i.output_symbol for i in inputs]

        self.epoch_callbacks = tuple(epoch_callbacks)

        self._train_called = False

        self.classification_errors = numpy.asarray([])

        self.gradient_function = theano.function(input_symbols,gradient)

        '''
        output_symbols = []

        iteration_callbacks = [e for e in self.epoch_callbacks
                               if isinstance(e, IterationCallback)]

        for iteration_callback in iteration_callbacks:
            for node_to_compute in iteration_callback.nodes_to_compute:
                output_symbols.append(node_to_compute.output_symbol)

        self.function_outputs = theano.function(input_symbols, output_symbols)

        self.full_training_iterator = training_set.iterator(iterator_type='sequential',
                                                        loop_style='divisible',
                                                        batch_size=50000)
        '''
        self.full_training_iterator = training_set.iterator(iterator_type='sequential',
                                                        loop_style='divisible',
                                                        batch_size=50000)
        self.loss_function = theano.function(input_symbols,scalar_loss)

        self.new_epoch = True
#        self.method = self._parameter_updaters[0].method

        # Initialize saved variables:
        self.rho = []
        self.y = []
        self.s = []
        self.grad_log = []
        self.k = 0 #counter
コード例 #17
0
    def train(self):
        '''
        Runs training until a StopTraining exception is raised.

        Training runs indefinitely until one of self.epoch_callbacks raises
        a StopTraining exception.
        '''

        if self._train_called:
            raise RuntimeError("train() has already been called on this %s. "
                               "Re-running train() risks inadvertently "
                               "carrying over implicit state from the "
                               "previous training run, such as the direction "
                               "of parameter updates (via the momentum "
                               "term), or the internal state of the Monitors "
                               "or EpochCallbacks. Instead, instantiate a new "
                               "copy of this %s and run train() on that." %
                               (type(self), type(self)))

        self._train_called = True

        if len(self.epoch_callbacks) == 0:
            raise RuntimeError("self.epoch_callbacks is empty, so Sgd will "
                               "iterate through the training data forever. "
                               "Please add an EpochCallback that will throw a "
                               "StopTraining exception at some point.")


        assert_all_is_instance(self.epoch_callbacks, EpochCallback)

        #
        # End sanity checks
        #

        update_function = self._compile_update_function()

        # Overlaps with self.epoch_callbacks
        iteration_callbacks = [e for e in self.epoch_callbacks
                               if (isinstance(e, IterationCallback) and not isinstance(e, EpochTimer2))]

        try:
            for epoch_callback in self.epoch_callbacks:
                epoch_callback.on_start_training()

            iteration_number = 0

            while True:

                '''
                for parameter_updater in self.parameter_updaters:
                    parameter_updater.epoch_number += 1
                '''

                iteration_number += 1

                # gets batch of data
                cost_arguments = self._input_iterator.next()

                # fprop-bprops, updates parameters, computes callback outputs.
                # pylint: disable=star-args
                all_callback_outputs = update_function(*cost_arguments)

                if iteration_number == 1 or iteration_number%5000==0:
                    for parameter_updater in self.parameter_updaters:
                        parameter_updater.averaged_param.set_value(parameter_updater.parameter.get_value())
                        parameter_updater.iteration_number = 1.0

                '''
                # calls iteration_callbacks' on_iteration() method, passing
                # in their output values, if any.
                output_index = 0
                for iteration_callback in iteration_callbacks:
                    num_outputs = len(iteration_callback.nodes_to_compute)
                    new_output_index = output_index + num_outputs

                    assert_less_equal(new_output_index,
                                      len(all_callback_outputs))

                    outputs = \
                        all_callback_outputs[output_index:new_output_index]

                    iteration_callback.on_iteration(outputs)

                    output_index = new_output_index

                assert_equal(output_index, len(all_callback_outputs))
                '''

                # if we've iterated through an epoch, call epoch_callbacks'
                # on_epoch() methods.
                if self._input_iterator.next_is_new_epoch():

                    # Sets parameters to the averages:

                    for parameter_updater in self.parameter_updaters:
                        parameter_updater.parameter_temp.set_value(parameter_updater.parameter.get_value())
                        parameter_updater.parameter.set_value(parameter_updater.averaged_param.get_value())


                    for epoch_callback in self.epoch_callbacks:
                        x = epoch_callback.on_epoch()

                    self.epoch_callbacks[-1].callbacks[0](x, None)


                    # Set parameters back:
                    for parameter_updater in self.parameter_updaters:
                        parameter_updater.parameter.set_value(parameter_updater.parameter_temp.get_value())


        except StopTraining, exception:
            if exception.status == 'ok':
                print("Training halted normally with message: {}".format(
                    exception.message))
                return
            else:
                raise
コード例 #18
0
ファイル: h5_dataset.py プロジェクト: imclab/simplelearn
def make_h5_file(path,
                 partition_names,
                 partition_sizes,
                 tensor_names,
                 tensor_formats):
    '''
    Creates a h5py.File with groups that can be wrapped by H5Dataset.

    Usage
    -----

    h5_file = make_hf_file(file_path, p_names, p_sizes, t_names, t_formats)
      1: Call this function to create a h5py.File object
      2: Fill the h5py.File's data tensors with appropriate data.
      3: Close the h5py.File, then re-open it using H5Dataset,
         a read-only dataset interface.

    Parameters
    ----------
    partition_names: Sequence
      Names of the sub-datasets, e.g. ['test', 'train'].
      May only contain alphanumeric characters and underscores, as
      load_h5_dataset() uses these names as NamedTuple names.

    partition_sizes: Sequence
      Number of examples in each sub-dataset, e.g. [50000, 10000] for
      MNIST.

    tensor_names: Sequence
      Names of the data tensors, e.g. ['images', 'labels']. Each
      sub-tensor uses the same tensor_names.

    tensor_formats: Sequence
      The DataFormats of the data tensors, e.g. (for MNIST):
      [DataFormat(axes=['b', '0', '1'], shape=[-1, 28, 28], dtype='uint8'),
       DataFormat(axes=['b'], shape=[-1], dtype='uint8')]

    The example parameter values above would create an h5py.File
    with the following hierarchical structure:

    hfpy.File/
      'partition_names': an h5py.Dataset of strings, ['test', 'train']
      'tensor_names': an h5py.Dataset of strings, ['images', 'labels']
      'partitions': an h5py.Group with the following members:
        'train': an h5py.Group, with the following members:
          'images': an h5py.Dataset tensor, with shape given by
                    partition_sizes[0] and tensor_formats[0].
          'labels': an h5py.Dataset tensor, with shape given by
                    partition_sizes[0] and tensor_formats[1].
        'test': an h5py.Group, with the following members:
          'images': an h5py.Dataset tensor, with shape given by
                    partition_sizes[1] and tensor_formats[0].
          'labels': an h5py.Dataset tensor, with shape given by
                    partition_sizes[1] and tensor_formats[1].
    '''

    assert_is_instance(path, basestring)
    assert_equal(os.path.splitext(path)[1], '.h5')
    absolute_path = os.path.abspath(path)
    assert_true(absolute_path.startswith(simplelearn.data.data_path),
                ("{} is not a subdirectory of simplelearn.data.data_path "
                 "{}").format(absolute_path, simplelearn.data.data_path))

    assert_all_is_instance(partition_names, basestring)
    assert_equal(len(frozenset(partition_names)), len(partition_names))
    for partition_name in partition_names:
        for char in partition_name:
            if not (char.isalnum() or char == "_"):
                raise ValueError("Partition name {} must contain only "
                                 "alphanumeric characters or "
                                 "underscores.".format(partition_name))

    assert_all_integer(partition_sizes)
    assert_all_greater_equal(partition_sizes, 0)

    assert_all_is_instance(tensor_names, basestring)
    assert_equal(len(frozenset(tensor_names)), len(tensor_names))

    assert_all_is_instance(tensor_formats, DenseFormat)
    for tensor_format in tensor_formats:
        assert_in('b', tensor_format.axes)

    # Done sanity-checking args

    h5_file = h5py.File(absolute_path, mode='w')

    # Add ordered lists of tensor/partition names, since h5py.Group.keys()
    # can't be trusted to list group members in the order that they were
    # added in.

    def add_ordered_names(list_name, names, group):
        '''
        Adds a list of names to a group, as a h5py.Dataset of strings.
        '''
        max_name_length = max([len(n) for n in names])
        string_dtype = 'S{}'.format(max_name_length)
        result = group.create_dataset(list_name,
                                      (len(names), ),
                                      dtype=string_dtype)
        for n, name in enumerate(names):
            result[n] = name

    # Not sure if storing partition order is necessary, but why not.
    add_ordered_names('partition_names', partition_names, h5_file)

    # Storing tensor order is definitely necessary.
    add_ordered_names('tensor_names', tensor_names, h5_file)

    partitions = h5_file.create_group('partitions')

    for partition_name, partition_size in safe_izip(partition_names,
                                                    partition_sizes):
        partition = partitions.create_group(partition_name)

        for tensor_name, tensor_format in safe_izip(tensor_names,
                                                    tensor_formats):
            tensor_shape = list(tensor_format.shape)
            tensor_shape[tensor_format.axes.index('b')] = partition_size

            # fletcher32: checksum against data corruption with tiny overhead.
            # http://docs.h5py.org/en/latest/high/dataset.html#fletcher32-filter
            tensor = partition.create_dataset(tensor_name,
                                              tensor_shape,
                                              tensor_format.dtype,
                                              fletcher32=True)

            # Label the tensor axes by their axis names in fmt.
            for index, axis in enumerate(tensor_format.axes):
                tensor.dims[index].label = axis

    return h5_file
コード例 #19
0
    def train(self):
        '''
        Runs training until a StopTraining exception is raised.

        Training runs indefinitely until one of self.epoch_callbacks raises
        a StopTraining exception.
        '''

        if self._train_called:
            raise RuntimeError("train() has already been called on this %s. "
                               "Re-running train() risks inadvertently "
                               "carrying over implicit state from the "
                               "previous training run, such as the direction "
                               "of parameter updates (via the momentum "
                               "term), or the internal state of the Monitors "
                               "or EpochCallbacks. Instead, instantiate a new "
                               "copy of this %s and run train() on that." %
                               (type(self), type(self)))

        self._train_called = True

        if len(self.epoch_callbacks) == 0:
            raise RuntimeError("self.epoch_callbacks is empty, so Sgd will "
                               "iterate through the training data forever. "
                               "Please add an EpochCallback that will throw a "
                               "StopTraining exception at some point.")

        assert_all_is_instance(self.epoch_callbacks, EpochCallback)

        #
        # End sanity checks
        #

        # Overlaps with self.epoch_callbacks
        iteration_callbacks = [c for c in self.epoch_callbacks
                               if isinstance(c, IterationCallback)]

        try:

            for epoch_callback in self.epoch_callbacks:
                epoch_callback.on_start_training()

            while True:

                for _ in range(self.batches_in_epoch):
                    self.LBFGS_step()

                '''
                self.validation_function_value = self.get_validation_function_value()
                #self.validation_function_value_log.append(validation_function_value)

                for epoch_callback in self.epoch_callbacks:
                    epoch_callback.on_epoch()

                for _ in range(batches_in_epoch):
                    self.LBFGS_step()
                '''

                '''
                cost_arguments = self.full_training_iterator.next()
                all_callback_outputs = self.function_outputs(*cost_arguments)

                output_index = 0
                for iteration_callback in iteration_callbacks:
                    num_outputs = len(iteration_callback.nodes_to_compute)
                    new_output_index = output_index + num_outputs

                    assert_less_equal(new_output_index,
                                      len(all_callback_outputs))

                    outputs = \
                        all_callback_outputs[output_index:new_output_index]

                    iteration_callback.on_iteration(outputs)

                    output_index = new_output_index

                assert_equal(output_index, len(all_callback_outputs))
                '''
                print(" ")
                '''
                cost_args = self.full_training_iterator.next()
                print(self.loss_function(*cost_args))
                '''

                # if we've iterated through an epoch, call epoch_callbacks'
                # on_epoch() methods.
                #if self.training_iterator.next_is_new_epoch():
                for epoch_callback in self.epoch_callbacks:
                    x = epoch_callback.on_epoch()

                self.epoch_callbacks[-1].callbacks[0](x, None)

                '''
                for _ in range(batches_in_epoch):
                    self.SGD_step_armijo()

                for epoch_callback in self.epoch_callbacks:
                    epoch_callback.on_epoch()
                '''


        except StopTraining, exception:
            if exception.status == 'ok':
                print("Training halted normally with message: {}".format(
                    exception.message))
                return
            else:
                raise
コード例 #20
0
    def __init__(self,
                 inputs,
                 input_iterator,
                 parameters,
                 parameter_updaters,
                 epoch_callbacks,
                 theano_function_mode=None):

        '''
        Parameters
        ----------

        inputs: sequence of Nodes.
          Symbols for the outputs of the input_iterator.
          These should come from input_iterator.make_input_nodes()

        input_iterator: simplelearn.data.DataIterator
          Yields tuples of training set batches, such as (values, labels).

        parameters: sequence of theano.tensor.sharedvar.SharedVariables
          What this trainer modifies to lower the cost. These are typically
          model weights, though they could also be inputs (e.g. for optimizing
          input images).

        parameter_updaters: sequence of SgdParameterUpdaters
          updaters for the corresponding elements in <parameters>.
          These are defined using the loss function to be minimized.

        monitors: (optional) sequence of Monitors.
          These are also used as epoch callbacks.

        epoch_callbacks: sequence of EpochCallbacks
          One of these must throw a StopTraining exception for the training to
          halt.

        theano_function_mode: theano.compile.Mode
          Optional. The 'mode' argument to pass to theano.function().
          An example: pylearn2.devtools.nan_guard.NanGuard()
        '''

        #
        # sanity-checks the arguments.
        #

        assert_all_is_instance(inputs, Node)
        assert_is_instance(input_iterator, DataIterator)
        assert_true(input_iterator.next_is_new_epoch())

        for (input,
             iterator_input) in safe_izip(inputs,
                                          input_iterator.make_input_nodes()):
            assert_equal(input.output_format, iterator_input.output_format)

        assert_equal(len(callbacks),
                     len(frozenset(callbacks)),
                     "There were duplicate callbacks.")

        assert_all_is_instance(callbacks, EpochCallback)

        #
        # Sets members
        #

        self._inputs = inputs
        self._input_iterator = input_iterator
        self._theano_function_mode = theano_function_mode
        self.epoch_callbacks = list(callbacks)
        self._train_called = False