Exemple #1
0
def learn(in_data: ndarray, out_data: ndarray, test_in_data: ndarray,
          test_out_data: ndarray, model_func: Callable[[Tensor, Tensor, bool], Tensor],
          loss_func: Callable[[Tensor, Tensor, bool], Tensor], optimizer: Optimizer,
          score_func: Callable[[Tensor, ndarray], float]=None,
          batch_size: int=100, epoch_number: int=100):
    input_data = in_data.astype(np.float32)
    output_data = out_data.astype(np.float32)
    test_input_data = test_in_data.astype(np.float32)
    test_output_data = test_out_data.astype(np.float32)
    train_loss_values = []
    test_loss_values = []
    test_score_values = []
    start = time()
    for i in range(0, epoch_number):
        bit = BatchIterator(input_data, output_data, batch_size)
        iter_loss = 0
        for b_in, b_out in bit:
            x = Tensor(b_in)
            y = Tensor(b_out)
            model = model_func(x, y, True)
            loss = loss_func(y, model, True)
            iter_loss += loss.data[0] / input_data.shape[0]
            optimizer.step(loss)
        if score_func is not None:
            test_loss, err_ratio = score_test(test_input_data, test_output_data, model_func, loss_func, score_func)
        else:
            err_ratio = 'N/A'
            test_loss = 'N/A'
        train_loss_values.append(iter_loss)
        test_loss_values.append(test_loss)
        test_score_values.append(err_ratio)   
        print("Iteration {0} train-loss: {1}, test-loss: {2}, score: {3}%".format(i, iter_loss, test_loss, err_ratio))
    end = time()
    print("Execution time: {0}s".format(end - start))
    return train_loss_values, test_loss_values, test_score_values
 def step(self, loss: Tensor):
     loss.calc_gradients()
     self.t = self.t + 1
     for i in range(0, len(self.parameters)):
         self.m[i] = running_avg(self.m[i], self.beta, self.parameters[i].grad)
         m_hat = running_avg_bias_correction(self.m[i], self.beta, self.t) if self.bias_correction else self.m[i]
         self.parameters[i].data -= self.learning_rate * m_hat
Exemple #3
0
def score_test(input_data, out_data, model_func, loss_func, score_func):
    err_ratio = 0
    y = Tensor(out_data)
    actual = model_func(Tensor(input_data))
    loss = loss_func(y, actual, False)
    err_ratio += score_func(actual.data, out_data)
    return loss.data[0] / input_data.shape[0], err_ratio
Exemple #4
0
 def __init__(self, dim: Tuple[int, int], optimizer: Optimizer = None):
     self.dim = dim
     self.w = Tensor(np.random.normal(0, 0.01, dim).astype(np.float32), diff=True)
     self.b = Tensor(np.zeros((1, dim[1]), dtype=np.float32), diff=True)
     self.parameters = [self.w, self.b]
     if optimizer is not None:
         self.optimizer = optimizer
         self.optimizer.add_parameters(self.parameters)
 def compute(self, tensor_in: Tensor, train_mode=False):
     if train_mode:
         m = Tensor(mean(tensor_in.data), diff=False)
         v = Tensor(var(tensor_in.data, m.data), diff=False)
         self.ravg_mean = running_avg(self.ravg_mean, 0.9, m.data)
         self.ravg_var = running_avg(self.ravg_var, 0.9, v.data)
     else:
         m = Tensor(self.ravg_mean)
         v = Tensor(self.ravg_var)
     x = (tensor_in - m) / op.sqrt(v + self.epsilon)
     y = self.gamma * x + self.beta
     return y
 def __init__(self, dim: Tuple[int, ...], optimizer: Optimizer):
     self.dim = dim
     self.gamma = Tensor(np.expand_dims(np.ones(dim, dtype=np.float32),
                                        axis=0),
                         diff=True)
     self.beta = Tensor(np.expand_dims(np.zeros(dim, dtype=np.float32),
                                       axis=0),
                        diff=True)
     self.ravg_mean = np.zeros(dim, dtype=np.float32)
     self.ravg_var = np.zeros(dim, dtype=np.float32)
     self.epsilon = 1e-7
     optimizer.add_parameters([self.beta])
Exemple #7
0
 def step(self, loss: Tensor):
     loss.calc_gradients()
     self.t = self.t + 1
     for i in range(0, len(self.parameters)):
         self.v[i] = running_avg_squared(self.v[i], self.beta,
                                         self.parameters[i].grad)
         v_hat = running_avg_bias_correction(
             self.v[i], self.beta,
             self.t) if self.bias_correction else self.v[i]
         self.parameters[i].data -= grad_square_delta(
             self.learning_rate, self.parameters[i].grad, v_hat,
             self.epsilon)
Exemple #8
0
def dropout(tensor_in: Tensor, keep_prob: float=0.5, train_mode=False):
    if train_mode:
        mask = Tensor(binomial(keep_prob, tensor_in.shape()), diff=False)
        return tensor_in * mask
    else:
        return tensor_in
Exemple #9
0
 def step(self, loss: Tensor):
     loss.calc_gradients()
     for p in self.parameters:
         p.data -= self.learning_rate * p.grad