Ejemplo n.º 1
0
def black_scholes(S, X, T, R, V):
    sqrt_t = np.sqrt(T)
    d1 = np.log(S / X) + (R + 0.5 * V * V) * T / (V * sqrt_t)
    d2 = d1 - V * sqrt_t
    cnd_d1 = cnd(d1)
    cnd_d2 = cnd(d2)
    exp_rt = np.exp(-R * T)
    call_result = S * cnd_d1 - X * exp_rt * cnd_d2
    put_result = X * exp_rt * (1.0 - cnd_d2) - S * (1.0 - cnd_d1)
    return call_result, put_result
def test():
    xn = np.array([[1, 2, 3], [4, 5, 6]])
    x = lg.array(xn)
    # print(np.sin(xn))
    # print(lg.sin(x))
    assert np.allclose(np.sin(xn), lg.sin(x))

    # print(np.cos(xn))
    # print(lg.cos(x))
    assert np.allclose(np.cos(xn), lg.cos(x))

    # print(np.sqrt(xn))
    # print(lg.sqrt(x))
    assert np.allclose(np.sqrt(xn), lg.sqrt(x))

    # print(np.exp(xn))
    # print(lg.exp(x))
    assert np.allclose(np.exp(xn), lg.exp(x))

    # print(np.log(xn))
    # print(lg.log(x))
    assert np.allclose(np.log(xn), lg.log(x))

    # print(np.absolute(xn))
    # print(lg.absolute(x))
    assert np.allclose(np.absolute(xn), lg.absolute(x))

    y = lg.tanh(x)
    yn = np.tanh(xn)
    assert np.allclose(y, yn)

    y = lg.cos(0.5)
    # print(y)
    assert np.allclose(y, np.cos(0.5))

    y = lg.sqrt(0.5)
    # print(y)
    assert np.allclose(y, np.sqrt(0.5))

    y = lg.sin(0.5)
    # print(y)
    assert np.allclose(y, np.sin(0.5))

    y = lg.exp(2)
    # print(y)
    assert np.allclose(y, np.exp(2))

    y = lg.log(2)
    # print(y)
    assert np.allclose(y, np.log(2))

    y = lg.absolute(-3)
    # print(y)
    assert y == 3

    np.random.seed(42)
    an = np.random.randn(1, 3, 16)
    bn = 1.0 / (1.0 + np.exp(-an[0, :, :]))
    a = lg.array(an)
    b = 1.0 / (1.0 + lg.exp(-a[0, :, :]))
    assert np.allclose(b, bn)

    return
Ejemplo n.º 3
0
def test():
    npa = np.array([1, np.e, np.e**2])
    a = lg.array(npa)
    assert np.array_equal(lg.log(a), np.log(npa))
    return
Ejemplo n.º 4
0
def log_likelihood(features, target, weights):
    scores = np.dot(features, weights)
    return np.sum(target * scores - np.log(1.0 + np.exp(scores)))
Ejemplo n.º 5
0
def run_lstm(
    file_name,
    H_size,
    T_steps,
    max_iters,
    learning_rate,
    weight_sd,
    dump,
    timing,
):
    with open(file_name, "r") as f:
        data = f.read()
        chars = list(set(data))
        data_size, X_size = len(data), len(chars)
        print("data has %d characters, %d unique" % (data_size, X_size))
        char_to_idx = {ch: i for i, ch in enumerate(chars)}

    z_size = H_size + X_size  # Size of concatenate(H, X) vector

    parameters = Parameters(H_size, X_size, z_size, weight_sd)

    # Exponential average of loss
    # Initialize to a error of a random model
    smooth_loss = -np.log(1.0 / X_size) * T_steps

    pointer = 0

    start = datetime.datetime.now()

    for iteration in range(max_iters):
        # Reset
        if pointer + T_steps >= len(data) or iteration == 0:
            g_h_prev = np.zeros((H_size, 1))
            g_C_prev = np.zeros((H_size, 1))
            pointer = 0

        inputs = [char_to_idx[ch] for ch in data[pointer : pointer + T_steps]]
        targets = [
            char_to_idx[ch] for ch in data[pointer + 1 : pointer + T_steps + 1]
        ]

        loss, g_h_prev, g_C_prev = forward_backward(
            inputs,
            targets,
            g_h_prev,
            g_C_prev,
            T_steps,
            H_size,
            X_size,
            parameters,
        )
        smooth_loss = smooth_loss * 0.999 + loss * 0.001

        # Print every hundred steps
        if iteration % dump == 0:
            update_status(iteration, smooth_loss)

        update_parameters(learning_rate, parameters)

        pointer += T_steps
    update_status(max_iters, smooth_loss)

    stop = datetime.datetime.now()
    delta = stop - start
    total = delta.total_seconds() * 1000.0
    if timing:
        print("Elapsed Time: " + str(total) + " ms")
    return total
Ejemplo n.º 6
0
def forward_backward(
    inputs, targets, h_prev, C_prev, T_steps, H_size, X_size, parameters
):
    # To store the values for each time step
    x_s, z_s, f_s, i_s, = (
        {},
        {},
        {},
        {},
    )
    C_bar_s, C_s, o_s, h_s = {}, {}, {}, {}
    v_s, y_s = {}, {}

    # Values at t - 1
    h_s[-1] = np.copy(h_prev)
    C_s[-1] = np.copy(C_prev)

    loss = 0
    # Loop through time steps
    assert len(inputs) == T_steps
    for t in range(len(inputs)):
        x_s[t] = np.zeros((X_size, 1))
        x_s[t][inputs[t]] = 1  # Input character

        (
            z_s[t],
            f_s[t],
            i_s[t],
            C_bar_s[t],
            C_s[t],
            o_s[t],
            h_s[t],
            v_s[t],
            y_s[t],
        ) = forward(
            x_s[t], h_s[t - 1], C_s[t - 1], H_size, X_size, parameters
        )  # Forward pass

        loss += -np.log(y_s[t][targets[t], 0])  # Loss for at t

    clear_gradients(parameters)

    dh_next = np.zeros_like(h_s[0])  # dh from the next character
    dC_next = np.zeros_like(C_s[0])  # dh from the next character

    for t in reversed(range(len(inputs))):
        # Backward pass
        dh_next, dC_next = backward(
            target=targets[t],
            dh_next=dh_next,
            dC_next=dC_next,
            C_prev=C_s[t - 1],
            H_size=H_size,
            X_size=X_size,
            z=z_s[t],
            f=f_s[t],
            i=i_s[t],
            C_bar=C_bar_s[t],
            C=C_s[t],
            o=o_s[t],
            h=h_s[t],
            v=v_s[t],
            y=y_s[t],
            p=parameters,
        )

    clip_gradients(parameters)

    return loss, h_s[len(inputs) - 1], C_s[len(inputs) - 1]
Ejemplo n.º 7
0
def test():
    a = [1, np.e, np.e**2]
    for x in a:
        assert np.array_equal(lg.log(x), np.log(x))
    return