Exemplo n.º 1
0
    input_data = np.random.randn(input_size, 100)
    labels = np.random.randint(num_classes, size=100)
else:
    input_size = 28 * 28
    input_data = images

# Randomly initialise theta
theta = 0.005 * np.random.randn(num_classes * input_size)


##======================================================================
## STEP 2: Implement softmaxCost
#
#  Implement softmaxCost in softmaxCost.m.

(cost, grad) = softmax.softmax_cost(theta, num_classes, input_size, lambda_, input_data, labels)

##======================================================================
## STEP 3: Gradient checking
#
#  As with any learning algorithm, you should always check that your
#  gradients are correct before learning the parameters.
#
if debug:
    J = lambda x: softmax.softmax_cost(x, num_classes, input_size, lambda_, input_data, labels)

    num_grad = gradient.compute_gradient(J, theta)

    # Use this to visually compare the gradients side by side
    print num_grad, grad
Exemplo n.º 2
0
# Here, we create synthetic dataset using random data for testing
debug = False
if debug:
    input_size = 8 * 8
    input_data = np.random.randn(input_size, 100)
    labels = np.random.randint(n_classes, size=100)

# Randomly initialise theta
theta = 0.005 * np.random.randn(n_classes * input_size)
"""
STEP 2: Implement softmax_cost

  Implement softmax_cost in softmax_cost.py.
"""

cost, grad = softmax_cost(theta, n_classes, input_size, lambda_, input_data,
                          labels)
"""
STEP 3: Gradient checking

  As with any learning algorithm, you should always check that your
  gradients are correct before learning the parameters.
"""

if debug:

    # Now we can use it to check your cost function and derivative calculations
    # for the sparse autoencoder.
    J = lambda theta: softmax_cost(theta, n_classes, input_size, lambda_,
                                   input_data, labels)[0]
    numgrad = compute_numerical_gradient(J, theta)
Exemplo n.º 3
0
debug = False
if debug:
    input_size = 8 * 8
    input_data = np.random.randn(input_size, 100)
    labels = np.random.randint(n_classes, size=100)

# Randomly initialise theta
theta = 0.005 * np.random.randn(n_classes * input_size)

"""
STEP 2: Implement softmax_cost

  Implement softmax_cost in softmax_cost.py.
"""

cost, grad = softmax_cost(theta, n_classes, input_size, lambda_, input_data, labels)

"""
STEP 3: Gradient checking

  As with any learning algorithm, you should always check that your
  gradients are correct before learning the parameters.
"""

if debug:

    # Now we can use it to check your cost function and derivative calculations
    # for the sparse autoencoder.
    J = lambda theta : softmax_cost(theta, n_classes, input_size, lambda_, input_data, labels)[0]
    numgrad = compute_numerical_gradient(J, theta)