Пример #1
0
def make_time_units_string(time):
    """
    This takes a time (in seconds) and converts it to an easy-to-read format with the appropriate units.

    :param time: the time to make into a string (in seconds)
    :type time: Integer

    :return: an easy-to-read string representation of the time
    :rtype: String
    """
    # Show the time with appropriate units.
    if time < 1:
        return trunc(time * 1000) + " milliseconds"
    elif time < 60:
        return trunc(time) + " seconds"
    elif time < 3600:
        return trunc(time / 60) + " minutes"
    else:
        return trunc(time / 3600) + " hours"
Пример #2
0
def make_time_units_string(time):
    """
    This takes a time (in seconds) and converts it to an easy-to-read format with the appropriate units.

    :param time: the time to make into a string (in seconds)
    :type time: Integer

    :return: an easy-to-read string representation of the time
    :rtype: String
    """
    # Show the time with appropriate units.
    if time < 1:
        return trunc(time*1000)+" milliseconds"
    elif time < 60:
        return trunc(time)+" seconds"
    elif time < 3600:
        return trunc(time/60)+" minutes"
    else:
        return trunc(time/3600)+" hours"
Пример #3
0
def make_time_units_string(time):
    """
    This takes a time (in seconds) and converts it to an easy-to-read format with the appropriate units.

    Parameters
    ----------
    time : int
        The time to make into a string (in seconds). Normally from computing differences in time.time().

    Returns
    -------
    str
        An easy-to-read string representation of the time i.e. "x hours", "x minutes", etc.
    """
    # Show the time with appropriate units.
    if time < 1:
        return trunc(time * 1000) + " milliseconds"
    elif time < 60:
        return trunc(time) + " seconds"
    elif time < 3600:
        return trunc(time / 60) + " minutes"
    else:
        return trunc(time / 3600) + " hours"
Пример #4
0
def make_time_units_string(time):
    """
    This takes a time (in seconds) and converts it to an easy-to-read format with the appropriate units.

    Parameters
    ----------
    time : int
        The time to make into a string (in seconds). Normally from computing differences in time.time().

    Returns
    -------
    str
        An easy-to-read string representation of the time i.e. "x hours", "x minutes", etc.
    """
    # Show the time with appropriate units.
    if time < 1:
        return trunc(time*1000)+" milliseconds"
    elif time < 60:
        return trunc(time)+" seconds"
    elif time < 3600:
        return trunc(time/60)+" minutes"
    else:
        return trunc(time/3600)+" hours"
Пример #5
0
    def _perform_one_epoch(self, f_learn):
        self.epoch_counter += 1
        t = time.time()
        log.info('EPOCH %s', str(self.epoch_counter))

        # set the noise switches on for training function! (this is where things like dropout happen)
        if len(self.noise_switches) > 0:
            log.debug("Turning on %s noise switches", str(len(self.noise_switches)))
            switch_vals = [switch.get_value() for switch in self.noise_switches]
            [switch.set_value(0.) for switch in self.noise_switches]

        # train
        train_costs = []
        train_monitors = {key: [] for key in self.monitors.keys()}
        for batch_start, batch_end in self.train_batches:
            train_costs.append(f_learn(batch_start, batch_end))
            self.call_monitors(monitor_function=self.train_monitor_function,
                               monitors_dict=train_monitors,
                               inputs=[batch_start, batch_end])
        log.info('Train cost: %s', trunc(numpy.mean(train_costs, 0)))
        if len(self.monitors.keys()) > 0:
            log.info('Train monitors: %s',
                     str({key: numpy.mean(value, 0) for key, value in train_monitors.items()}))


        # set the noise switches off for valid and test sets! we assume unseen data is noisy anyway :)
        if len(self.noise_switches) > 0:
            log.debug("Turning off %s noise switches", str(len(self.noise_switches)))
            [switch.set_value(0.) for switch in self.noise_switches]
        # valid
        if self.dataset.hasSubset(VALID) and len(self.monitors.keys()) > 0:
            valid_monitors = {key: [] for key in self.monitors.keys()}
            for batch_start, batch_end in self.valid_batches:
                self.call_monitors(monitor_function=self.valid_monitor_function,
                                   monitors_dict=valid_monitors,
                                   inputs=[batch_start, batch_end])
            log.info('Valid monitors: %s',
                     str({key: numpy.mean(value, 0) for key, value in valid_monitors.items()}))

        #test
        if self.dataset.hasSubset(TEST) and len(self.monitors.keys()) > 0:
            test_monitors = {key: [] for key in self.monitors.keys()}
            for batch_start, batch_end in self.test_batches:
                self.call_monitors(monitor_function=self.test_monitor_function,
                                   monitors_dict=test_monitors,
                                   inputs=[batch_start, batch_end])
            log.info('Test monitors: %s', str({key: numpy.mean(value, 0) for key, value in test_monitors.items()}))

        # check for early stopping on train costs
        cost = numpy.sum(train_costs)
        if cost < self.best_cost * self.early_stop_threshold:
            self.patience = 0
            self.best_cost = cost
            # save the parameters that made it the best
            self.best_params = get_shared_values(self.params)
        else:
            self.patience += 1

        # check for stopping either from n_epochs or from threshold/patience
        stop = False
        if self.epoch_counter >= self.n_epoch:
            log.info("Stopping (reached max number of epochs)...")
            stop = True
        if self.patience >= self.early_stop_length:
            log.info("Stopping early (reached stop threshold)...")
            stop = True

        timing = time.time() - t
        self.times.append(timing)

        log.info('time: ' + make_time_units_string(timing))

        log.info('remaining time: ' +
                 make_time_units_string((self.n_epoch - self.epoch_counter) * numpy.mean(self.times)))

        if (self.epoch_counter % self.save_frequency) == 0:
            #save params
            self.model.save_params('trained_epoch_' + str(self.epoch_counter) + '.pkl')

        # ANNEAL!
        if not stop:
            if hasattr(self, 'learning_rate_decay') and self.learning_rate_decay:
                self.learning_rate_decay.decay()
            if hasattr(self, 'momentum_decay') and self.momentum_decay:
                self.momentum_decay.decay()
            for decay_param in self.model.get_decay_params():
                decay_param.decay()

        # reset the switches
        if len(self.noise_switches) > 0:
            [switch.set_value(val) for switch, val in zip(self.noise_switches, switch_vals)]

        # return whether or not to stop this epoch
        return stop
Пример #6
0
    def _perform_one_epoch(self):
        self.epoch_counter += 1
        t = time.time()
        log.info('EPOCH %s', str(self.epoch_counter))

        #train
        train_costs = []
        train_monitors = {key: [] for key in self.monitors.keys()}
        for x, y in self.iterator(self.dataset, datasets.TRAIN,
                                  self.batch_size, self.minimum_batch_size,
                                  self.rng):
            if self.unsupervised:
                train_costs.append(self.f_learn(x))
                for key in self.monitors.keys():
                    monitor_function = self.monitors[key]
                    train_monitors[key].append(monitor_function(x))
            else:
                train_costs.append(self.f_learn(x, y))
                for key in self.monitors.keys():
                    monitor_function = self.monitors[key]
                    train_monitors[key].append(monitor_function(x, y))
        log.info('Train cost: %s', trunc(numpy.mean(train_costs, 0)))
        if len(self.monitors.keys()) > 0:
            log.info(
                'Train monitors: %s',
                str({
                    key: numpy.mean(value, 0)
                    for key, value in train_monitors.items()
                }))

        #valid
        if self.dataset.hasSubset(
                datasets.VALID) and len(self.monitors.keys()) > 0:
            valid_monitors = {key: [] for key in self.monitors.keys()}
            for x, y in self.iterator(self.dataset, datasets.VALID,
                                      self.batch_size, self.minimum_batch_size,
                                      self.rng):
                if self.unsupervised:
                    for key in self.monitors.keys():
                        monitor_function = self.monitors[key]
                        valid_monitors[key].append(monitor_function(x))
                else:
                    for key in self.monitors.keys():
                        monitor_function = self.monitors[key]
                        valid_monitors[key].append(monitor_function(x, y))
            log.info(
                'Valid monitors: %s',
                str({
                    key: numpy.mean(value, 0)
                    for key, value in valid_monitors.items()
                }))

        #test
        if self.dataset.hasSubset(
                datasets.TEST) and len(self.monitors.keys()) > 0:
            test_monitors = {key: [] for key in self.monitors.keys()}
            for x, y in self.iterator(self.dataset, datasets.TEST,
                                      self.batch_size, self.minimum_batch_size,
                                      self.rng):
                if self.unsupervised:
                    for key in self.monitors.keys():
                        monitor_function = self.monitors[key]
                        test_monitors[key].append(monitor_function(x))
                else:
                    for key in self.monitors.keys():
                        monitor_function = self.monitors[key]
                        test_monitors[key].append(monitor_function(x, y))
            log.info(
                'Test monitors: %s',
                str({
                    key: numpy.mean(value, 0)
                    for key, value in test_monitors.items()
                }))

        # check for early stopping on train costs
        cost = numpy.sum(train_costs)
        if cost < self.best_cost * self.early_stop_threshold:
            self.patience = 0
            self.best_cost = cost
            # save the parameters that made it the best
            self.best_params = get_shared_values(self.params)
        else:
            self.patience += 1

        if self.epoch_counter >= self.n_epoch or self.patience >= self.early_stop_length:
            log.info("Stopping early...")
            self.STOP = True

        timing = time.time() - t
        self.times.append(timing)

        log.info('time: ' + make_time_units_string(timing))

        log.info('remaining time: ' +
                 make_time_units_string((self.n_epoch - self.epoch_counter) *
                                        numpy.mean(self.times)))

        if (self.epoch_counter % self.save_frequency) == 0:
            #save params
            self.model.save_params('trained_epoch_' + str(self.epoch_counter) +
                                   '.pkl')

        # ANNEAL!
        if hasattr(self, 'learning_rate_decay'):
            self.learning_rate_decay.decay()
        if hasattr(self, 'momentum_decay'):
            self.momentum_decay.decay()
        for decay_param in self.model.get_decay_params():
            decay_param.decay()
Пример #7
0
    def _perform_one_epoch(self, f_learn, plot=None):
        """
        Performs a single training iteration with the given learn function.
        """
        self.epoch_counter += 1
        t = time.time()
        log.info('EPOCH %s', str(self.epoch_counter))

        # set the noise switches on for training function! (this is where things like dropout happen)
        switch_vals = []
        if len(self.noise_switches) > 0 and (self.valid_flag or self.test_flag or self.epoch_counter == 1):
            log.debug("Turning on %s noise switches", str(len(self.noise_switches)))
            switch_vals = [switch.get_value() for switch in self.noise_switches]
            [switch.set_value(1.) for switch in self.noise_switches]

        # train
        train_costs = []
        train_monitors = {key: [] for key in self.train_monitors_dict.keys()}
        for batch_start, batch_end in self.train_batches:
            _outs = raise_to_list(f_learn(batch_start, batch_end))
            train_costs.append(_outs[0])
            # handle any user defined monitors
            if len(train_monitors) > 0:
                current_monitors = zip(self.train_monitors_dict.keys(), _outs[1:])
                for name, val in current_monitors:
                    train_monitors[name].append(val)

        # get the mean values for the batches
        mean_train = numpy.mean(train_costs, 0)
        current_mean_monitors = {key: numpy.mean(vals, 0) for key, vals in train_monitors.items()}
        # log the mean values!
        log.info('Train cost: %s', trunc(mean_train))
        if len(current_mean_monitors) > 0:
            log.info('Train monitors: %s', str(current_mean_monitors))
        # send the values to their outservices
        if self.train_outservice:
            self.train_outservice.write(mean_train, TRAIN)
        for name, service in self.train_monitors_outservice_dict.items():
            if name in current_mean_monitors and service:
                service.write(current_mean_monitors[name], TRAIN)
        # if there is a plot, also send them over!
        if plot:
            current_mean_monitors.update({TRAIN_COST_KEY: mean_train})
            plot.update_plots(epoch=self.epoch_counter, monitors=current_mean_monitors)

        # set the noise switches off for valid and test sets! we assume unseen data is noisy anyway :)
        if len(self.noise_switches) > 0 and (self.valid_flag or self.test_flag):
            log.debug("Turning off %s noise switches", str(len(self.noise_switches)))
            [switch.set_value(0.) for switch in self.noise_switches]

        # valid
        if self.valid_flag:
            valid_monitors = {key: [] for key in self.valid_monitors_dict.keys()}
            for batch_start, batch_end in self.valid_batches:
                _outs = raise_to_list(self.valid_monitor_function(batch_start, batch_end))
                current_monitors = zip(self.valid_monitors_dict.keys(), _outs)
                for name, val in current_monitors:
                    valid_monitors[name].append(val)

            # get the mean values for the batches
            current_mean_monitors = {key: numpy.mean(vals, 0) for key, vals in valid_monitors.items()}
            # log the mean values!
            log.info('Valid monitors: %s', str(current_mean_monitors))
            # send the values to their outservices
            for name, service in self.valid_monitors_outservice_dict.items():
                if name in current_mean_monitors and service:
                    service.write(current_mean_monitors[name], VALID)
            # if there is a plot, also send them over!
            if plot:
                plot.update_plots(epoch=self.epoch_counter, monitors=current_mean_monitors)

        #test
        if self.test_flag:
            test_monitors = {key: [] for key in self.test_monitors_dict.keys()}
            for batch_start, batch_end in self.test_batches:
                _outs = raise_to_list(self.test_monitor_function(batch_start, batch_end))
                current_monitors = zip(self.test_monitors_dict.keys(), _outs)
                for name, val in current_monitors:
                    test_monitors[name].append(val)

            # get the mean values for the batches
            current_mean_monitors = {key: numpy.mean(vals, 0) for key, vals in test_monitors.items()}
            # log the mean values!
            log.info('Test monitors: %s', str(current_mean_monitors))
            # send the values to their outservices
            for name, service in self.test_monitors_outservice_dict.items():
                if name in current_mean_monitors and service:
                    service.write(current_mean_monitors[name], TEST)
            # if there is a plot, also send them over!
            if plot:
                plot.update_plots(epoch=self.epoch_counter, monitors=current_mean_monitors)

        # check for early stopping on train costs
        cost = numpy.sum(train_costs)
        if cost < self.best_cost * self.early_stop_threshold:
            self.patience = 0
            self.best_cost = cost
            # save the parameters that made it the best
            self.best_params = get_shared_values(self.params)
        else:
            self.patience += 1

        # check for stopping either from n_epochs or from threshold/patience
        stop = False
        if self.epoch_counter >= self.n_epoch:
            log.info("Stopping (reached max number of epochs)...")
            stop = True
        if self.patience >= self.early_stop_length:
            log.info("Stopping early (reached stop threshold)...")
            stop = True

        timing = time.time() - t
        self.times.append(timing)

        log.info('time: ' + make_time_units_string(timing))

        log.debug('remaining time: ' +
                 make_time_units_string((self.n_epoch - self.epoch_counter) * numpy.mean(self.times)))

        if (self.epoch_counter % self.save_frequency) == 0:
            #save params
            self.model.save_params('trained_epoch_' + str(self.epoch_counter) + '.pkl')

        # ANNEAL!
        if not stop:
            # perform the appropriate decay on the decay functions/parameters for this optimizer and model
            for decay_param in self.get_decay_params():
                decay_param.decay()

        # reset the switches
        if len(self.noise_switches) > 0:
            [switch.set_value(val) for switch, val in zip(self.noise_switches, switch_vals)]

        # return whether or not to stop this epoch
        return stop