def map(X, batch_size, map_func, caption="", force_output_type=None): if force_output_type is not None: if force_output_type == 'gnumpy': xp = gp elif force_output_type == 'numpy': xp = np else: assert False, "force_output_type must be either numpy or gnumpy" else: if isinstance(X, gp.garray): xp = gp else: xp = np ms = None for b, x in enumerate(draw_slices(X, batch_size, kind='sequential', samples_are='rows', stop=True, last_batch_may_be_smaller=True)): progress.status(b, X.shape[0] / batch_size, caption) m = map_func(x) if ms is None: if m.ndim == 1: ms = xp.zeros((X.shape[0],)) elif m.ndim == 2: ms = xp.zeros((X.shape[0], m.shape[1])) elif m.ndim == 3: ms = xp.zeros((X.shape[0], m.shape[1], m.shape[2])) elif m.ndim == 4: ms = xp.zeros((X.shape[0], m.shape[1], m.shape[2], m.shape[3])) else: assert False, "%d dimensions are not supported" % m.ndim if ms.ndim == 1: ms[b*batch_size : (b+1)*batch_size] = m elif ms.ndim == 2: ms[b*batch_size : (b+1)*batch_size, :] = m elif ms.ndim == 3: ms[b*batch_size : (b+1)*batch_size, :, :] = m elif ms.ndim == 4: ms[b*batch_size : (b+1)*batch_size, :, :, :] = m progress.done() return ms
def predict_multicurve(self, predictor, force, skin, valid, what='skin'): assert what in ['skin', 'force'] if what == 'skin': predicted = np.zeros(skin.shape) else: predicted = np.zeros(force.shape) predicted_conf = np.zeros(predicted.shape) using_conf = False predicted_prob = None using_prob = False for smpl in range(force.shape[1]): status(smpl, force.shape[1], "Predicting") if skin.ndim == 3: f, s = self.trim_to_valid(force[:, smpl], skin[:, :, smpl], valid[:, smpl]) else: f, s = self.trim_to_valid(force[:, smpl], skin[:, smpl], valid[:, smpl]) if what == 'skin': pp = predictor(f) else: pp = predictor(s) if isinstance(pp, tuple): predicted[0:pp[0].shape[0], smpl] = pp[0] if pp[1].ndim == 1: predicted_conf[0:pp[1].shape[0], smpl] = pp[1] using_conf = True elif pp[1].ndim == 2: if predicted_prob is None: predicted_prob = np.zeros((pp[1].shape[0], predicted.shape[0], predicted.shape[1])) predicted_prob[:, 0:pp[1].shape[1], smpl] = pp[1] using_prob = True else: predicted[0:pp.shape[0], smpl] = pp done() if using_prob: return predicted, predicted_prob elif using_conf: return predicted, predicted_conf else: return predicted
def add(self, iter, pars, trn_loss, val_loss, tst_loss): # keep track of best results so far if val_loss < self.best_val_loss - self.min_improvement: self.best_iter = iter self.best_val_loss = val_loss self.best_tst_loss = tst_loss if isinstance(pars, gp.garray): self.best_pars = gp.garray(pars, copy=True) else: self.best_pars = np.copy(pars) self.last_val_improvement = iter # termination criteria if (self.max_missed_val_improvements is not None and iter - self.last_val_improvement > self.max_missed_val_improvements): self.should_terminate = True if self.min_iters is not None and iter < self.min_iters: self.should_terminate = False if self.desired_loss is not None and val_loss <= self.desired_loss: self.should_terminate = True if self.max_iters is not None and iter >= self.max_iters: self.should_terminate = True # store current losses self.history = np.hstack((self.history, [[iter], [trn_loss], [val_loss], [tst_loss]])) # display progress if self.show_progress: progress.status(iter, caption= "training: %9.5f validation: %9.5f (best: %9.5f) test: %9.5f" % (trn_loss, val_loss, self.best_val_loss, tst_loss)) # termination by user if get_key() == "q": print print "Termination by user." self.should_terminate = True