def _test_loop(self, f, ins, batch_size=128, verbose=0): '''Abstract method to loop over some data in batches. ''' nb_sample = len(ins[0]) outs = [] if verbose == 1: progbar = Progbar(target=nb_sample) batches = make_batches(nb_sample, batch_size) index_array = np.arange(nb_sample) for batch_index, (batch_start, batch_end) in enumerate(batches): batch_ids = index_array[batch_start:batch_end] ins_batch = slice_X(ins, batch_ids) batch_outs = f(ins_batch) if type(batch_outs) == list: if batch_index == 0: for batch_out in enumerate(batch_outs): outs.append(0.) for i, batch_out in enumerate(batch_outs): outs[i] += batch_out * len(batch_ids) else: if batch_index == 0: outs.append(0.) outs[0] += batch_outs * len(batch_ids) if verbose == 1: progbar.update(batch_end) for i, _ in enumerate(outs): outs[i] /= nb_sample return outs
def _predict_loop(self, f, ins, batch_size=128, verbose=0): '''Abstract method to loop over some data in batches. ''' nb_sample = len(ins[0]) outs = [] if verbose == 1: progbar = Progbar(target=nb_sample) batches = make_batches(nb_sample, batch_size) index_array = np.arange(nb_sample) for batch_index, (batch_start, batch_end) in enumerate(batches): batch_ids = index_array[batch_start:batch_end] ins_batch = slice_X(ins, batch_ids) batch_outs = f(ins_batch) if type(batch_outs) != list: batch_outs = [batch_outs] if batch_index == 0: for batch_out in batch_outs: shape = (nb_sample,) + batch_out.shape[1:] outs.append(np.zeros(shape)) for i, batch_out in enumerate(batch_outs): outs[i][batch_start:batch_end] = batch_out if verbose == 1: progbar.update(batch_end) return outs
def _predict_loop(self, f, ins, batch_size=128, verbose=0): '''Abstract method to loop over some data in batches. ''' nb_sample = len(ins[0]) outs = [] if verbose == 1: progbar = Progbar(target=nb_sample) batches = make_batches(nb_sample, batch_size) index_array = np.arange(nb_sample) for batch_index, (batch_start, batch_end) in enumerate(batches): batch_ids = index_array[batch_start:batch_end] ins_batch = slice_X(ins, batch_ids) batch_outs = f(ins_batch) if type(batch_outs) != list: batch_outs = [batch_outs] if batch_index == 0: for batch_out in batch_outs: shape = (nb_sample, ) + batch_out.shape[1:] outs.append(np.zeros(shape)) for i, batch_out in enumerate(batch_outs): outs[i][batch_start:batch_end] = batch_out if verbose == 1: progbar.update(batch_end) return outs
class BaseLogger(Callback): '''Callback that prints events to the standard output. This callback is automatically applied to every Keras model (it is the basis of the verbosity modes in models). ''' def on_train_begin(self, logs={}): self.verbose = self.params['verbose'] self.nb_epoch = self.params['nb_epoch'] def on_epoch_begin(self, epoch, logs={}): if self.verbose: print('Epoch %d/%d' % (epoch + 1, self.nb_epoch)) self.progbar = Progbar(target=self.params['nb_sample'], verbose=self.verbose) self.seen = 0 self.totals = {} def on_batch_begin(self, batch, logs={}): if self.seen < self.params['nb_sample']: self.log_values = [] def on_batch_end(self, batch, logs={}): batch_size = logs.get('size', 0) self.seen += batch_size for k, v in logs.items(): if k in self.totals: self.totals[k] += v * batch_size else: self.totals[k] = v * batch_size for k in self.params['metrics']: if k in logs: self.log_values.append((k, logs[k])) # skip progbar update for the last batch; # will be handled by on_epoch_end if self.verbose and self.seen < self.params['nb_sample']: self.progbar.update(self.seen, self.log_values) def on_epoch_end(self, epoch, logs={}): for k in self.params['metrics']: if k in self.totals: self.log_values.append((k, self.totals[k] / self.seen)) if k in logs: self.log_values.append((k, logs[k])) if self.verbose: self.progbar.update(self.seen, self.log_values)