def evaluate(model, X_test, Y_test, eval_size, batch_size): N_test = X_test.shape[0] if eval_size is None else eval_size if N_test > X_test.shape[0]: raise ValueError('Test size can be no larger than {}'.format( X_test.shape[0])) model.no_grad() # TODO(beam2d): make chx.array(0, dtype=...) work total_loss = chx.zeros((), dtype=chx.float32) num_correct = chx.zeros((), dtype=chx.int64) for i in range(0, N_test, batch_size): x = X_test[i:min(i + batch_size, N_test)] t = Y_test[i:min(i + batch_size, N_test)] y = model(x) total_loss += compute_loss(y, t) * batch_size num_correct += (y.argmax(axis=1).astype(t.dtype) == t).astype( chx.int32).sum() model.require_grad() mean_loss = float(total_loss) / N_test accuracy = int(num_correct) / N_test return mean_loss, accuracy
def new_linear_params(n_in, n_out): W = np.random.randn(n_out, n_in).astype( np.float32) # TODO(beam2d): not supported in chx W /= np.sqrt(n_in) # TODO(beam2d): not supported in chx W = chx.array(W) b = chx.zeros(n_out, dtype=chx.float32) return W, b
def forward_chainerx(self, inputs): # Computes the forward pass in ChainerX. # # In case of an acceptable error, the error is stored and a dummy # output array is returned. # # The detected errors are checked in `check_forward_outputs`, and # also in `_create_test_entry_function` to skip # backward/double-backward tests. accept_errors = self.__get_accept_errors() try: outputs = self.forward_xp(inputs, chainerx) self.__forward_error_chainerx = 'ok' except accept_errors as e: # Keep detected error self.__forward_error_chainerx = e # A dummy output array is returned y = chainerx.zeros((0, ), 'float32') outputs = y, return outputs
def forward_chainerx(self, inputs): # Computes the forward pass in ChainerX. # # In case of an acceptable error, the error is stored and a dummy # output array is returned. # # The detected errors are checked in `check_forward_outputs`, and # also in `_create_test_entry_function` to skip # backward/double-backward tests. accept_errors = self.__get_accept_errors() try: outputs = self.forward_xp(inputs, chainerx) self.__forward_error_chainerx = 'ok' except accept_errors as e: # Keep detected error self.__forward_error_chainerx = e # A dummy output array is returned y = chainerx.zeros((0,), 'float32') outputs = y, return outputs
def test_zeros_with_device(device): a = chainerx.zeros((2,), 'float32', device) b = chainerx.zeros((2,), 'float32') chainerx.testing.assert_array_equal_ex(a, b) array_utils.check_device(a, device)
def __init__(self, n_in, n_out): W = np.random.randn(n_in, n_out).astype(np.float32) W /= np.sqrt(n_in) self.W = chx.array(W) self.b = chx.zeros((n_out, ), dtype=chx.float32)
def __init__(self, size, dtype=chx.float32): shape = size, self.avg_mean = chx.zeros(shape, dtype) self.avg_var = chx.zeros(shape, dtype) self.gamma = chx.ones(shape, dtype) self.beta = chx.zeros(shape, dtype)
def __init__(self, n_in, n_out): W = np.random.randn(n_in, n_out).astype(np.float32) W /= np.sqrt(n_in) self.W = chx.array(W) self.b = chx.zeros((n_out,), dtype=chx.float32)