def evaluate(self, Z, Y): n, loss = 0, 0. minibatch_size = np.min((10000, Z.shape[0])) for (Zb, Yb) in minibatches(minibatch_size, Z, Y, shuffle=False): n_ = Zb.shape[0] loss_ = self.loss_func(Zb, Yb) loss = (n / (n + n_)) * loss + (n_ / (n + n_)) * loss_ n += n_ return loss + self.reg_func()
def evaluate(self, Z, Y): n, nc, loss = 0, 0, 0. minibatch_size = np.min((10000, Z.shape[0])) for (Zb, Yb) in minibatches(minibatch_size, Z, Y, shuffle=False): (loss_, n_, nc_) = self.__calc_accuracy(Zb, Yb) loss = (n / (n + n_)) * loss + (n_ / (n + n_)) * loss_ n += n_ nc += nc_ acc = nc / n return loss, acc
def evaluate(self, Z, Y, eval_metric=None): n, nc, loss = 0, 0, 0. minibatch_size = np.min((10000, Z.shape[0])) preds = [] for (Zb, Yb) in tqdm(minibatches(minibatch_size, Z, Y, shuffle=False), desc='evalueate'): (loss_, n_, nc_, pred_proba) = self.__calc_accuracy(Zb, Yb) preds.append(pred_proba) loss = (n / (n + n_)) * loss + (n_ / (n + n_)) * loss_ n += n_ nc += nc_ acc = nc / n preds = np.vstack(preds) if eval_metric is not None: acc = eval_metric(Y, preds) return loss, acc
def evaluate_eta(self, X, Y, eta): self.save_params() self.optimizer.set_eta(eta) n_iters = 0 eval_f = True while eval_f: for (Xb, Yb) in minibatches(self.minibatch_size, X, Y, shuffle=True): if n_iters >= self.eval_iters: eval_f = False break self.optimizer.update_func(Xb, Yb) n_iters += 1 val = self.evaluate(X, Y)[0] self.load_params() self.optimizer.reset_func() return val
def fit(self, X, Y, max_epoch, Xv=None, Yv=None, early_stop=-1, use_best_param=False): """ Run algorigthm for up to (max_eopch) epochs on training data X. Arguments --------- X : Numpy array. Training data. Y : numpy array. Training label. max_epoch : Integer. Xv : Numpy array. Validation data. Yv : Numpy array. Validation label. early_stop : Integer. """ logger.log( self.log_level, '{0:<5}{1:^26}{2:>5}'.format('-' * 5, 'Training classifier', '-' * 5)) best_val_loss = 1e+10 best_val_acc = 0. best_param = None best_epoch = 0 val_results = [] total_time = 0. best_loss = 1e+10 monitor = True if Xv is not None else False self.save_params() success = False init_train_loss, init_train_acc = self.evaluate(X, Y) while success is False: success = True for e in range(max_epoch): stime = time.time() for (Xb, Yb) in minibatches(self.minibatch_size, X, Y, shuffle=True): self.optimizer.update_func(Xb, Yb) etime = time.time() total_time += etime - stime train_loss, train_acc = self.evaluate(X, Y) if np.isnan(train_loss) or np.isinf(train_loss) \ or (2 * init_train_loss + 1) <= train_loss: eta = self.optimizer.get_eta() / 2. self.optimizer.set_eta(eta) success = False self.load_params() self.optimizer.reset_func() logger.log(self.log_level, 'the learning process diverged') logger.log( self.log_level, 'retrain a model with a smaller learning rate: {0}'. format(eta)) break logger.log( self.log_level, 'epoch: {0:4}, time: {1:>13.1f} sec'.format(e, total_time)) logger.log( self.log_level, 'train_loss: {0:5.4f}, train_acc: {1:4.3f}'.format( train_loss, train_acc)) if monitor: val_loss, val_acc = self.evaluate(Xv, Yv) logger.log( self.log_level, 'val_loss : {0:5.4f}, val_acc : {1:4.3f}'.format( val_loss, val_acc)) val_results.append(({'epoch': e + 1}, val_loss, val_acc)) if val_loss < best_val_loss: best_epoch = e + 1 best_val_loss = val_loss best_val_acc = val_acc best_param = self.get_params(real_f=True) # early_stopping if train_loss < 0.999 * best_loss: best_loss = train_loss best_epoch = e if early_stop > 0 and e - best_epoch >= early_stop: success = True break if monitor and use_best_param: if best_epoch < max_epoch: self.set_params(best_param) return val_results
def fit(self, X, Y, max_epoch, early_stop=-1): """ Run algorigthm for up to (max_epoch) on training data X. Arguments --------- optimizer : Instance of optimizer class. X : Numpy array. Training data. Y : Numpy array. Training label. max_epoch : Integer. early_stop : Integer. """ logger.log( self.log_level, '{0:<5}{1:^26}{2:>5}'.format('-' * 5, 'Training regressor', '-' * 5)) total_time = 0. self.__save_param() success = False init_train_loss = self.evaluate(X, Y) best_loss = 1e+10 best_epoch = 0 while success is False: success = True for e in range(max_epoch): stime = time.time() for (Xb, Yb) in minibatches(self.minibatch_size, X, Y, shuffle=True): self.optimizer.update_func(Xb, Yb) etime = time.time() total_time += etime - stime train_loss = self.evaluate(X, Y) if np.isnan(train_loss) or np.isinf(train_loss) \ or (2 * init_train_loss + 1) <= train_loss: eta = self.optimizer.get_eta() / 2. self.optimizer.set_eta(eta) success = False self.__load_param() self.optimizer.reset_func() logger.log(self.log_level, 'the learning process diverged') logger.log( self.log_level, 'retrain a model with a smaller learning rate: {0}'. format(eta)) break logger.log( self.log_level, 'epoch: {0:4}, time: {1:>13.1f} sec'.format(e, total_time)) logger.log(self.log_level, 'train_loss: {0:5.4f}'.format(train_loss)) # early_stopping if train_loss < 0.999 * best_loss: best_loss = train_loss best_epoch = e if early_stop > 0 and e - best_epoch >= early_stop: success = True break return None