def fit(self, trX, trY, n_iter=1): out_shape = self.model[-1].out_shape n = 0. t = time() costs = [] for e in range(n_iter): epoch_costs = [] for xmb, ymb in self.iterator.iterXY(trX, trY): c = self._train(xmb, ymb) epoch_costs.append(c) n += len(ymb) if self.verbose >= 2: n_per_sec = n / (time() - t) n_left = len(trY)*n_iter - n time_left = n_left/n_per_sec sys.stdout.write("\rIter %d Seen %d samples Avg cost %0.4f Examples per second %d Time left %d seconds" % (e, n, np.mean(epoch_costs[-250:]), n_per_sec, time_left)) sys.stdout.flush() costs.extend(epoch_costs) n_per_sec = n / (time() - t) n_left = len(trY)*n_iter - n time_left = n_left/n_per_sec status = "Iter %d Seen %d samples Avg cost %0.4f Examples per second %d Time elapsed %d seconds" % (e, n, np.mean(epoch_costs[-250:]), n_per_sec, time() - t) if self.verbose >= 2: sys.stdout.write("\r"+status) sys.stdout.flush() sys.stdout.write("\n") elif self.verbose == 1: print status return costs
def fit(self, trX, trY, n_iter=1): out_shape = self.model[-1].out_shape n = 0. t = time() costs = [] for e in range(n_iter): epoch_costs = [] for xmb, ymb in self.iterator.iterXY(trX, trY): c = self._train(xmb, ymb) epoch_costs.append(c) n += len(ymb) if self.verbose >= 2: n_per_sec = n / (time() - t) n_left = len(trY) * n_iter - n time_left = n_left / n_per_sec sys.stdout.write( "\rIter %d Seen %d samples Avg cost %0.4f Examples per second %d Time left %d seconds" % (e, n, np.mean( epoch_costs[-250:]), n_per_sec, time_left)) sys.stdout.flush() costs.extend(epoch_costs) n_per_sec = n / (time() - t) n_left = len(trY) * n_iter - n time_left = n_left / n_per_sec status = "Iter %d Seen %d samples Avg cost %0.4f Examples per second %d Time elapsed %d seconds" % ( e, n, np.mean(epoch_costs[-250:]), n_per_sec, time() - t) if self.verbose >= 2: sys.stdout.write("\r" + status) sys.stdout.flush() sys.stdout.write("\n") elif self.verbose == 1: print status return costs
def fit(self, trX, trY, batch_size=64, n_epochs=1, len_filter=LenFilter(), snapshot_freq=1, path=None): """Train model on given training examples and return the list of costs after each minibatch is processed. Args: trX (list) -- Inputs trY (list) -- Outputs batch_size (int, optional) -- number of examples in a minibatch (default 64) n_epochs (int, optional) -- number of epochs to train for (default 1) len_filter (object, optional) -- object to filter training example by length (default LenFilter()) snapshot_freq (int, optional) -- number of epochs between saving model snapshots (default 1) path (str, optional) -- prefix of path where model snapshots are saved. If None, no snapshots are saved (default None) Returns: list -- costs of model after processing each minibatch """ if len_filter is not None: trX, trY = len_filter.filter(trX, trY) trY = standardize_targets(trY, cost=self.cost) n = 0. stats = [] t = time() costs = [] for e in range(n_epochs): epoch_costs = [] for xmb, ymb in self.iterator.iterXY(trX, trY): c = self._train(xmb, ymb) epoch_costs.append(c) n += len(ymb) if self.verbose >= 2: n_per_sec = n / (time() - t) n_left = len(trY) - n % len(trY) time_left = n_left / n_per_sec sys.stdout.write( "\rEpoch %d Seen %d samples Avg cost %0.4f Time left %d seconds" % (e, n, np.mean(epoch_costs[-250:]), time_left)) sys.stdout.flush() costs.extend(epoch_costs) status = "Epoch %d Seen %d samples Avg cost %0.4f Time elapsed %d seconds" % ( e, n, np.mean(epoch_costs[-250:]), time() - t) if self.verbose >= 2: sys.stdout.write("\r" + status) sys.stdout.flush() sys.stdout.write("\n") elif self.verbose == 1: print status if path and e % snapshot_freq == 0: save(self, "{0}.{1}".format(path, e)) return costs
def fit(self, trX, trY, batch_size=64, n_epochs=1, len_filter=LenFilter(), snapshot_freq=1, path=None, callback=None, callback_freq=1): """Train model on given training examples and return the list of costs after each minibatch is processed. Args: trX (list) -- Inputs trY (list) -- Outputs batch_size (int, optional) -- number of examples in a minibatch (default 64) n_epochs (int, optional) -- number of epochs to train for (default 1) len_filter (object, optional) -- object to filter training example by length (default LenFilter()) snapshot_freq (int, optional) -- number of epochs between saving model snapshots (default 1) path (str, optional) -- prefix of path where model snapshots are saved. If None, no snapshots are saved (default None) Returns: list -- costs of model after processing each minibatch """ if len_filter is not None: trX, trY = len_filter.filter(trX, trY) trY = standardize_targets(trY, cost=self.cost) n = 0. stats = [] t = time() costs = [] for e in range(n_epochs): epoch_costs = [] for xmb, ymb in self.iterator.iterXY(trX, trY): if not self.is_padseq_output: c = self._train(xmb, ymb) else: ymb, padsizes = ymb c = self._train(xmb, ymb, padsizes) epoch_costs.append(c) n += len(ymb) if self.verbose >= 2: n_per_sec = n / (time() - t) n_left = len(trY) - n % len(trY) time_left = n_left/n_per_sec sys.stdout.write("\rEpoch %d Seen %d samples Avg cost %0.4f Time left %d seconds" % (e, n, np.mean(epoch_costs[-250:]), time_left)) sys.stdout.flush() costs.extend(epoch_costs) status = "Epoch %d Seen %d samples Avg cost %0.4f Time elapsed %d seconds" % (e, n, np.mean(epoch_costs[-250:]), time() - t) if self.verbose >= 2: sys.stdout.write("\r"+status) sys.stdout.flush() sys.stdout.write("\n") elif self.verbose == 1: print status if path and e % snapshot_freq == 0: save(self, "{0}.{1}".format(path, e)) if callback is not None and e % callback_freq == 0: try: callback(e) except RNN.EarlyStoppingException: break return costs