Example #1
0
def test_getquantile():
    x = np.linspace(0, 1, 100)
    y = getquantile(x, lower=0.25, upper=0.5)
    assert (np.equal(y.mean(), 0.25))

    y = getquantile(x, lower=0.25, upper=0.5, return_indices=True)
    assert (np.all(np.equal(y, np.arange(25, 50))))
Example #2
0
def test_batch_softmax():
    X = np.random.randint(1, 10, size=(10, 5))
    p0 = np.stack(softmax(x_) for i, x_ in enumerate(X))
    p1 = batch_softmax(X, axis=1)
    p0_0 = np.stack(softmax(x_) for i, x_ in enumerate(X.T))
    p1_0 = batch_softmax(X, axis=0)
    assert np.all(np.equal(p0, p1))
    assert np.all(np.equal(p0_0.round(4), p1_0.round(4).T))
Example #3
0
def test_bms_verbose():
    X = np.random.uniform(0, 200, size=(100, 10))
    pxp, xp, bor, q_m, alpha, f0, f1, niter = bms(X, verbose=True)
    assert np.equal(pxp.sum().round(5), 1)
    assert np.equal(xp.sum().round(5), 1)
    assert (0 <= bor <= 1)
    assert np.all(np.equal(q_m.sum(1).round(5), 1))
    assert np.all(np.greater(alpha, 0))
    assert np.greater_equal(f0, 0)
    assert np.greater_equal(f1, 0)
    assert np.greater(niter, 0)
Example #4
0
    def evaluate(self, y_pred):

        accuracy = (np.sum(np.equal(self.y_test, y_pred).astype(np.float))
                      / self.y_test.size)
        return accuracy

        self.plt_accuracy_crossvalid(train_accuracy, test_accuracy, num_loops)
Example #5
0
def test_sigmoid():
    x = np.linspace(-2, 2, 100)
    y = sigmoid(x)
    yidx = np.argsort(y)
    xidx = np.argsort(x)
    assert (np.all(np.logical_and(np.greater_equal(y, 0), np.less_equal(y,
                                                                        1))))
    assert (np.all(np.equal(yidx, xidx)))
Example #6
0
def test_batch_transform():
    X = np.random.normal(0, 5, size=(10, 2))
    Y = batch_transform(X, [sigmoid, stable_exp])
    assert (np.all(np.equal(X.shape, Y.shape)))
    assert (np.all(
        np.logical_and(np.greater_equal(Y[:, 0], 0), np.less_equal(Y[:, 0],
                                                                   1))))
    assert (np.all(np.greater_equal(Y[:, 0], 0)))
Example #7
0
def test_reduce_then_tile():
    X = np.random.randint(1, 10, size=(10, 5))
    p0 = np.stack(softmax(x_) for i, x_ in enumerate(X))
    max_x = reduce_then_tile(X, np.max, axis=1)
    exp_x = np.exp(X - max_x)
    sum_exp_x = reduce_then_tile(exp_x, np.sum, axis=1)
    p1 = exp_x / sum_exp_x
    assert np.all(np.equal(p0, p1))
Example #8
0
 def accuracies(params):
     # unpack logged parameters
     optimized_c_l_r, optimized_w = unpack_params(params)
     predicted_train_classes = np.argmax(bayespredict(
         optimized_c_l_r, optimized_w, train_images),
                                         axis=0)
     predicted_test_classes = np.argmax(bayespredict(
         optimized_c_l_r, optimized_w, test_images),
                                        axis=0)
     # compute real classes
     real_train_classes = np.argmax(train_labels, axis=1)
     real_test_classes = np.argmax(test_labels, axis=1)
     # compute accuracy
     train_accuracy = np.average(
         np.equal(predicted_train_classes,
                  real_train_classes).astype(float))
     test_accuracy = np.average(
         np.equal(predicted_test_classes, real_test_classes).astype(float))
     # output accuracy
     return train_accuracy, test_accuracy
	def loss_func(self, w):
		prob = self.prob_func(w, self.train_x_)
		loss = np.array([0.0] * self.train_x_.shape[0])
		for c in xrange(len(self.classes_)):
			loss += np.equal(self.train_y_, c) * prob[:,c]
		score = -np.sum(np.log(loss))
		if self.penalty_ == "l1": # Lasso
			score += self.alpha_ * np.sum(np.abs(w))
		elif self.penalty_ == "l2": # Ridge
			# ===FIXME===
			# I don't know why scaler of L2 norm is "3.0"
			# according to definition of L2 norm, scaler must be "0.5"
			# but calc the same value with scikit-learn, scaler is "3.0"
			# (may be to avoid over learning?)
			score += 3.0 * self.alpha_ * np.mean(w**2)
		return score
Example #10
0
    def update(self, x, u, r, x_, u_):
        """ Computes value function updates and their derivatives for the Q-learning model """

        # ELIGIBILITY TRACE
        # Reset derivatives if eligibility trace was reset at start of trial
        if np.all(np.equal(self.etrace, 0)):
            self.d_etrace['discount_factor'] = np.zeros(self.etrace.shape)
            self.d_etrace['trace_decay'] = np.zeros(self.etrace.shape)

        # Compute derivatives
        self.d_etrace['discount_factor'] = self.trace_decay * (
            self.etrace +
            self.discount_factor * self.d_etrace['discount_factor'])
        self.d_etrace['trace_decay'] = self.discount_factor * (
            self.etrace + self.trace_decay * self.d_etrace['trace_decay'])

        # Update trace
        self.etrace = np.einsum(
            'a,s->as', u,
            x) + self.discount_factor * self.trace_decay * self.etrace

        # REWARD PREDICTION ERROR
        # Compute derivatives
        dmaxQx_ = grad.max(self.Qx(x_))
        d_rpe_Q = self.discount_factor * np.outer(dmaxQx_, x_) - np.outer(u, x)
        d_rpe_learningrate = np.sum(self.dQ['learning_rate'] * d_rpe_Q)
        d_rpe_discount = np.sum(
            self.dQ['discount_factor'] * d_rpe_Q) + self.Qmax(x_)
        d_rpe_tracedecay = np.sum(self.dQ['trace_decay'] * d_rpe_Q)

        # Compute RPE
        rpe = r + self.discount_factor * self.Qmax(x_) - self.uQx(u, x)
        self.rpe.append(rpe)

        # Q PARAMETERS
        # Compute derivatives
        self.dQ['learning_rate'] += (
            rpe + self.learning_rate * d_rpe_learningrate) * self.etrace
        self.dQ['discount_factor'] += self.learning_rate * (
            d_rpe_discount * self.etrace +
            rpe * self.d_etrace['discount_factor'])
        self.dQ['trace_decay'] += self.learning_rate * (
            d_rpe_tracedecay * self.etrace +
            rpe * self.d_etrace['trace_decay'])

        # Update value function
        self.Q += self.learning_rate * rpe * self.etrace
def energy(params, positions, cell, strain=np.zeros((3, 3))):
    """Compute the energy of a Lennard-Jones system.

    Parameters
    ----------

    params : dictionary of paramters.
      Defaults to {'sigma': 1.0, 'epsilon': 1.0}

    positions : array of floats. Shape = (natoms, 3)

    cell: array of unit cell vectors. Shape = (3, 3)

    strain: array of strains to apply to cell. Shape = (3, 3)

    Returns
    -------
    energy : float
    """

    sigma = params.get('sigma', 1.0)
    epsilon = params.get('epsilon', 1.0)

    rc = 3 * sigma

    e0 = 4 * epsilon * ((sigma / rc)**12 - (sigma / rc)**6)

    strain_tensor = np.eye(3) + strain
    cell = np.dot(strain_tensor, cell.T).T
    positions = np.dot(strain_tensor, positions.T).T

    r2 = get_distances(positions, cell, rc, 0.01)[0]**2

    zeros = np.equal(r2, 0.0)
    adjusted = np.where(zeros, np.ones_like(r2), r2)

    c6 = np.where((r2 <= rc**2) & (r2 > 0.0), (sigma**2 / adjusted)**3,
                  np.zeros_like(r2))
    c6 = np.where(zeros, np.zeros_like(r2), c6)
    energy = -e0 * (c6 != 0.0).sum()
    c12 = c6**2
    energy += np.sum(4 * epsilon * (c12 - c6))

    # get_distances double counts the interactions, so we divide by two.
    return energy / 2
Example #12
0
def get_mask_i_float(i, n):
    """Create a 1D array of zeros with one element at one, with floating type.

    Parameters
    ----------
    i : int
        Index of the non-zero element.
    n: n
        Length of the created array.

    Returns
    -------
    mask_i_float : array-like, shape=[n,]
        1D array of zeros except at index i, where it is one
    """
    range_n = arange(n)
    i_float = cast(array([i]), int32)[0]
    mask_i = equal(range_n, i_float)
    mask_i_float = cast(mask_i, float32)
    return mask_i_float
Example #13
0
def evaluate(y_test, y_pred):
    error_rate = (np.sum(np.equal(y_test, y_pred).astype(np.float))
            / y_test.size)
    return error_rate
Example #14
0
def test_bitflip():
    x = np.array([0, 1, 0, 1, 0])
    y = np.array([1, 0, 1, 0, 1])
    assert (np.all(np.equal(y, bitflip(x))))
Example #15
0
        inputs.shape[1],  # <-- num features
        categories,
    )
    
    num_epochs = 100

    print('loss initially: ', loss(params, inputs = inputs, targets = inputs, channels_indexed = list(idx_map.values()), labels_indexed = labels_indexed, hps = hps))

    acc = []
    for epoch in range(num_epochs):
        gradients = loss_grad(params, inputs = inputs, targets = inputs, channels_indexed = list(idx_map.values()), labels_indexed = labels_indexed, hps = hps)
        params = update_params(params, gradients, hps['lr'])

        acc.append(
            np.mean(
                np.equal(
                    predict(params, inputs = inputs, targets = inputs, channels_indexed = list(idx_map.values()), hps = hps),
                    labels_indexed,
                )
            )
        )

    print('loss after training: ', loss(params, inputs = inputs, targets = inputs, channels_indexed = list(idx_map.values()), labels_indexed = labels_indexed, hps = hps))

    # print(np.round(params['input'][1]['weights'], 3))

    print(
        'response:\n', 
        predict(params, inputs = inputs, targets = inputs, channels_indexed = list(idx_map.values()), hps = hps)
    )
Example #16
0
def get_mask_i_float(i, n):
    range_n = arange(n)
    i_float = cast(array([i]), int32)[0]
    mask_i = equal(range_n, i_float)
    mask_i_float = cast(mask_i, float32)
    return mask_i_float
Example #17
0
                                inputs=data['inputs'],
                                targets=data['one_hot_targets'],
                                hps=hps)
        params = update_params(params, gradients_0, hps['learning_rates'])

    for epoch in range(num_epochs - 1):
        # for epoch in range(100):
        gradients_1 = loss_grad(params,
                                inputs=data['inputs'],
                                targets=data['one_hot_targets'],
                                hps=hps)
        hps['learning_rates'] = update_lr(gradients_0, gradients_1,
                                          hps['learning_rates'],
                                          hps['hyper_learning_rate'])
        params = update_params(params, gradients_1, hps['learning_rates'])

        gradients_0 = copy.deepcopy(gradients_1)

    print(
        'loss after training: ',
        loss(params,
             inputs=data['inputs'],
             targets=data['one_hot_targets'],
             hps=hps))

    print(
        np.mean(
            np.equal(
                np.argmax(forward(params, inputs=data['inputs'], hps=hps)[-1],
                          axis=1), data['labels_indexed'])))
Example #18
0
def test_make_onehot():
    rng = np.random.RandomState(23)
    X = rng.multinomial(1, pvals=[0.5, 0.5], size=20)
    y = np.argmax(X, axis=1)
    G, _ = make_onehot(y)
    assert (np.all(np.equal(X, G)))
def get_distances(positions,
                  cell,
                  cutoff_distance,
                  skin=0.01,
                  strain=np.zeros((3, 3))):
  """Get distances to atoms in a periodic unitcell.

    Parameters
    ----------

    positions: atomic positions. array-like (natoms, 3)
    cell: unit cell. array-like (3, 3)
    cutoff_distance: Maximum distance to get neighbor distances for. float
    skin: A tolerance for the cutoff_distance. float
    strain: array-like (3, 3)

    Returns
    -------

    distances : an array of dimension (atom_i, atom_j, distance) The shape is
    (natoms, natoms, nunitcells) where nunitcells is the total number of unit
    cells required to tile the space to be sure all neighbors will be found. The
    atoms that are outside the cutoff distance are zeroed.

    offsets

    """
  positions = np.array(positions)
  cell = np.array(cell)
  strain_tensor = np.eye(3) + strain
  cell = np.dot(strain_tensor, cell.T).T
  positions = np.dot(strain_tensor, positions.T).T

  inverse_cell = np.linalg.inv(cell)
  num_repeats = cutoff_distance * np.linalg.norm(inverse_cell, axis=0)

  fractional_coords = np.dot(positions, inverse_cell) % 1
  mins = np.min(np.floor(fractional_coords - num_repeats), axis=0)
  maxs = np.max(np.ceil(fractional_coords + num_repeats), axis=0)

  # Now we generate a set of cell offsets
  v0_range = np.arange(mins[0], maxs[0])
  v1_range = np.arange(mins[1], maxs[1])
  v2_range = np.arange(mins[2], maxs[2])

  xhat = np.array([1, 0, 0])
  yhat = np.array([0, 1, 0])
  zhat = np.array([0, 0, 1])

  v0_range = v0_range[:, None] * xhat[None, :]
  v1_range = v1_range[:, None] * yhat[None, :]
  v2_range = v2_range[:, None] * zhat[None, :]

  offsets = (
      v0_range[:, None, None] + v1_range[None, :, None] +
      v2_range[None, None, :])

  offsets = np.int_(offsets.reshape(-1, 3))
  # Now we have a vector of unit cell offsets (offset_index, 3)
  # We convert that to cartesian coordinate offsets
  cart_offsets = np.dot(offsets, cell)

  # we need to offset each coord by each offset.
  # This array is (atom_index, offset, 3)
  shifted_cart_coords = positions[:, None] + cart_offsets[None, :]

  # Next, we subtract each position from the array of positions
  # (atom_i, atom_j, positionvector, 3)
  pv = shifted_cart_coords - positions[:, None, None]

  # This is the distance squared
  # (atom_i, atom_j, distance_ij)
  d2 = np.sum(pv**2, axis=3)

  # The gradient of sqrt is nan at r=0, so we do this round about way to
  # avoid that.
  zeros = np.equal(d2, 0.0)
  adjusted = np.where(zeros, np.ones_like(d2), d2)
  d = np.where(zeros, np.zeros_like(d2), np.sqrt(adjusted))

  distances = np.where(d < (cutoff_distance + skin), d, np.zeros_like(d))
  return distances, offsets
Example #20
0
def test_shuffle():
    x = np.eye(3)
    y = shuffle(x)
    assert (np.equal(np.sum(y), 3))
Example #21
0
 def evaluate(self, y_test, y_pred):
     accuracy = np.sum(np.equal(y_test, y_pred).astype(np.float)) / \
                                                             y_test.size
     return accuracy
Example #22
0
    params = build_params(
        inputs.shape[1],  # <-- num features
        hps['num_hidden_nodes'],
        labels.shape[1])

    num_epochs = 1000

    print('loss initially: ',
          loss(params, inputs=inputs, targets=labels, hps=hps))

    import matplotlib.pyplot as plt

    for epoch in range(num_epochs):
        gradients = loss_grad(params, inputs=inputs, targets=labels, hps=hps)
        params = update_params(params, gradients, hps['lr'])

    print('loss after training: ',
          loss(params, inputs=inputs, targets=labels, hps=hps))

    print('predictions:',
          np.argmax(forward(params, inputs=inputs, hps=hps)[-1], axis=1))
    print('labels:', idx_labels)

    print(
        'accuracy:',
        np.mean(
            np.equal(
                np.argmax(forward(params, inputs=inputs, hps=hps)[-1], axis=1),
                idx_labels)))
Example #23
0
def eq(a: Numeric, b: Numeric):
    return anp.equal(a, b)
Example #24
0
def prediction_accuracy(params, images, labels):
    prediction = np.argmax(np.einsum('skd,nd->snk', params, images),axis=-1)
    target = np.argmax(labels,axis=-1)
    return np.sum(np.equal(prediction,target))/(prediction.shape[0]*prediction.shape[1])
Example #25
0
def test_I():
    x = np.ones(5)
    assert np.all(np.equal(x, I(x)))
Example #26
0
def test_logsumexp():
    x = np.arange(5)
    assert np.equal(logsumexp(x), scipy_logsumexp(x))
Example #27
0
def test_sign():
    x = np.array([0, 1, 0, 1, 0])
    y = np.array([-1, 1, -1, 1, -1])
    assert (np.all(np.equal(y, sign(x))))
Example #28
0
def test_signinv():
    y = np.array([0, 1, 0, 1, 0])
    x = np.array([-1, 1, -1, 1, -1])
    assert (np.all(np.equal(y, signinv(x))))
Example #29
0
def test_transform():
    x = np.array([0, 0, -10, 0, 55])
    x_ = transform(x, [sigmoid, tanh, relu, stable_exp, I])
    y = np.array([0.5, 0, 0, 1, 55])
    assert np.all(np.equal(x_.flatten(), y))
Example #30
0
def test_scale_data():
    x = np.arange(10).reshape(-1, 1)
    x = np.outer(x, np.ones(5))
    x = scale_data(x, with_mean=True, with_var=True)
    assert (np.all(np.equal(np.var(x, 0), np.ones(x.shape[1]))))
Example #31
0
        'num_hidden_nodes': 20,
        'hidden_activation': np.tanh,
        'channel_activation': utils.relu,
        'output_activation': utils.softmax,
    }

    params = build_params(
        inputs.shape[1],  # <-- num features
        hps['num_hidden_nodes'],
        categories,
        weight_range=hps['wr'])

    num_epochs = 4000

    print('loss initially: ',
          loss(params, inputs=inputs, targets=one_hot_targets, hps=hps))
    for epoch in range(num_epochs):
        gradients = loss_grad(params,
                              inputs=inputs,
                              targets=one_hot_targets,
                              hps=hps)
        params = update_params(params, gradients, hps['lr'])

    print('loss after training: ',
          loss(params, inputs=inputs, targets=one_hot_targets, hps=hps))
    print(
        np.mean(
            np.equal(
                np.argmax(forward(params, inputs=inputs, hps=hps)[-1], axis=1),
                labels_indexed)))