Exemplo n.º 1
0
 def on_epoch_end(self, epoch, logs={}):
     filepath = self.filepath.format(epoch=epoch, **logs)
     if self.save_best_only:
         current = logs.get(self.monitor)
         if current is None:
             warnings.warn("Can save best model only with %s available, skipping." % (self.monitor), RuntimeWarning)
         else:
             if current < self.best:
                 if self.verbose > 0:
                     print("Epoch %05d: %s improved from %0.5f to %0.5f, saving model to %s"
                           % (epoch, self.monitor, self.best, current, filepath))
                 self.best = current
                 self.model.save_weights(filepath, overwrite=True)
                 
                 #XD
                 from examples.pm25.test import test_model #XD
                 train_res = test_model(self.model, dataset='train', show_details=False)
                 valid_res = test_model(self.model, dataset='valid')
                 self.model.epoch = epoch + 1
                 self.model.train_result = train_res
                 self.model.valid_result = valid_res
             else:
                 if self.verbose > 0:
                     print("Epoch %05d: %s did not improve" % (epoch, self.monitor))
                     
     else:
         if self.verbose > 0:
             print("Epoch %05d: saving model to %s" % (epoch, filepath))
         self.model.save_weights(filepath, overwrite=True)
Exemplo n.º 2
0
    def on_epoch_end(self, epoch, logs={}):
        filepath = self.filepath.format(epoch=epoch, **logs)
        if self.save_best_only:
            current = logs.get(self.monitor)
            if current is None:
                warnings.warn(
                    "Can save best model only with %s available, skipping." %
                    (self.monitor), RuntimeWarning)
            else:
                if current < self.best:
                    if self.verbose > 0:
                        print(
                            "Epoch %05d: %s improved from %0.5f to %0.5f, saving model to %s"
                            % (epoch, self.monitor, self.best, current,
                               filepath))
                    self.best = current
                    self.model.save_weights(filepath, overwrite=True)

                    #XD
                    from examples.pm25.test import test_model  #XD
                    train_res = test_model(self.model,
                                           dataset='train',
                                           show_details=False)
                    valid_res = test_model(self.model, dataset='valid')
                    self.model.epoch = epoch + 1
                    self.model.train_result = train_res
                    self.model.valid_result = valid_res
                else:
                    if self.verbose > 0:
                        print("Epoch %05d: %s did not improve" %
                              (epoch, self.monitor))

        else:
            if self.verbose > 0:
                print("Epoch %05d: saving model to %s" % (epoch, filepath))
            self.model.save_weights(filepath, overwrite=True)
Exemplo n.º 3
0
    def _fit(self, f, ins, out_labels=[], batch_size=128, nb_epoch=100, verbose=1, callbacks=[],
             val_f=None, val_ins=None, monitor_f=None, monitor_labels=[], shuffle=True, metrics=[]):
        '''
            Abstract fit function for f(*ins). Assume that f returns a list, labelled by out_labels.
        '''
        do_validation = False
        if val_f and val_ins:
            do_validation = True
            if verbose:
                print("Train on %d samples, validate on %d samples" % (len(ins[0]), len(val_ins[0])))
        #XD
        do_monitoring = False
        if monitor_f:
            do_monitoring = True

        nb_train_sample = len(ins[0])
        index_array = np.arange(nb_train_sample)

        history = cbks.History()
        if verbose:
            callbacks = [history, cbks.BaseLogger()] + callbacks
        else:
            callbacks = [history] + callbacks
        callbacks = cbks.CallbackList(callbacks)

        callbacks._set_model(self)
        callbacks._set_params({
            'batch_size': batch_size,
            'nb_epoch': nb_epoch,
            'nb_sample': nb_train_sample,
            'verbose': verbose,
            'do_validation': do_validation,
            'metrics': metrics,
        })
        callbacks.on_train_begin()

        #XD
        from examples.pm25.test import test_model  #XD
#        from examples.pm25.test_pm25 import data, targets, targets_mean  #XD
        print('Before training:')
        test_model(self, dataset='train')
        test_model(self, dataset='valid')
#        print(self.layers[-1].U_c.get_value(), self.layers[-1].U_f.get_value(), self.layers[-1].b_f.get_value())
#        val_outs = self._test_loop(val_f, val_ins, batch_size=batch_size, verbose=0)
#        if type(val_outs) != list:
#            val_outs = [val_outs]
#        # same labels assumed
#        epoch_logs = {}
#        for l, o in zip(out_labels, val_outs):
#            epoch_logs['val_' + l] = o
#        print(epoch_logs)
                            
        self.stop_training = False
        for epoch in range(nb_epoch):
            callbacks.on_epoch_begin(epoch)
            if shuffle == 'batch':
                index_array = batch_shuffle(index_array, batch_size)
            elif shuffle:
                np.random.shuffle(index_array)

            batches = make_batches(nb_train_sample, batch_size)
            for batch_index, (batch_start, batch_end) in enumerate(batches):
                batch_ids = index_array[batch_start:batch_end]
                try:
                    ins_batch = slice_X(ins, batch_ids)
                except TypeError as err:
                    raise Exception('TypeError while preparing batch. \
                        If using HDF5 input data, pass shuffle="batch".\n')

                batch_logs = {}
                batch_logs['batch'] = batch_index
                batch_logs['size'] = len(batch_ids)
                callbacks.on_batch_begin(batch_index, batch_logs)
                outs = f(*ins_batch)
                if type(outs) != list:
                    outs = [outs]
                for l, o in zip(out_labels, outs):
                    batch_logs[l] = o

                callbacks.on_batch_end(batch_index, batch_logs)

                epoch_logs = {}
                if batch_index == len(batches) - 1:  # last batch
                    # validation
                    if do_validation:
                        # replace with self._evaluate
                        val_outs = self._test_loop(val_f, val_ins, batch_size=batch_size, verbose=0)
                        if type(val_outs) != list:
                            val_outs = [val_outs]
                        # same labels assumed
                        for l, o in zip(out_labels, val_outs):
                            epoch_logs['val_' + l] = o
                            
                    # monitoring XD
#                    if do_monitoring:
#                        monitor_logs = OrderedDict()
#                        monitor_outs = self._test_loop(monitor_f, ins, batch_size=batch_size, verbose=0, shuffle=True)
#                        if type(monitor_outs) != list:
#                            monitor_outs = [monitor_outs]
#                        for l, o in zip(monitor_labels, monitor_outs):
#                            monitor_logs['train_' + l] = o
#                        monitor_val_outs = self._test_loop(monitor_f, val_ins, batch_size=batch_size, verbose=0, shuffle=True)
#                        if type(monitor_val_outs) != list:
#                            monitor_val_outs = [monitor_val_outs]
#                        for l, o in zip(monitor_labels, monitor_val_outs):
#                            monitor_logs['val_' + l] = o
#                        print(monitor_logs)

            callbacks.on_epoch_end(epoch, epoch_logs)
            if self.stop_training:
                break

        callbacks.on_train_end()
        return history