コード例 #1
0
ファイル: fwdProp.py プロジェクト: pransil/ossl-oneShot
def forward_prop_one_layer(model, cache, layer):
    """
    :param model:
    :param cache:
    :param layer: target layer, activation going from layer-1 to layer
    :return:
    """

    W, b, Lm, Ln, G, Wc, Mean, Var = utils.get_model_WbLGWcMV(model, layer)
    Am, An, Zm, Zn = utils.get_cache_AmAmZmZn(cache, layer)

    if W.size == 0:  # Start building from zero! ToDo - generalize for n layers
        memory = True
        model = modelDefinition.add_unit(model, layer, memory, Am, d_index=0)
        W, b, Lm, Ln, G, Wc, Mean, Var = utils.get_model_WbLGWcMV(model, layer)

    Zn = np.dot(W, Am) + b.T  # ??? not b.T  ????
    An = Zn  # ???????? Review this. ToDo
    #An = utils.sigmoid(Zn)

    d = Am.shape[1]
    assert Zn.shape == (Ln, d)
    assert An.shape == (Ln, d)
    model = utils.set_model_WbLGWcMV(model, layer, W, b, Lm, Ln, G, Wc, Mean,
                                     Var)
    cache = utils.set_cache_AmAnZmZn(cache, layer, Am, An, Zm, Zn)

    return model, cache, An
コード例 #2
0
def back_prop_one_layer(model, cache, layer, error, margin):
    """
    Arguments:
    model   -- python dictionary containing weights, biases, whatever else we need...
    cache   -- a dictionary containing "Z1", "A1", "Z2", "A2"
    layer    -- from this layer (Ln) to prev (Lm)

    Returns:
    grads -- python dictionary containing your gradients with respect to different parameters
    """
    W, b, Lm, Ln, G, Wc, Mean, Var = utils.get_model_WbLGWcMV(model, layer)
    Am, An, Zm, Zn = utils.get_cache_AmAmZmZn(cache, layer)
    d = float(Am.shape[1])  # # of data samples

    # Backward propagation: calculate dW1, db1, dW2, db2.
    dZn = -error
    target = An > (1 - margin)
    dZn_boosted = boost_dZ(dZn, target)

    #dZ1 = np.dot(W2.T, dZ2) * (1 - np.power(A1, 2))        SAVE THIS --- WILL NEED LATER
    dW_boosted = (1 / d) * np.dot(dZn_boosted, Am.T)
    db = (1 / d) * np.sum(dZn, axis=1, keepdims=True)
    db = db.T

    assert dW_boosted.shape == W.shape
    assert db.shape == b.shape
    assert dZn_boosted.shape == Zn.shape

    grads = {"dW1": dW_boosted, "db1": db}

    return grads
コード例 #3
0
def adjust_units(model, target, A):
    """
    Adjust the weights of each unit to the center of its cluster
    Inputs
    model       - The network model structure
    target      - Array with 1 where a win occurs
    A           - Activation for this layer, for the latest batch
    """
    layer = 1  # ToDo - generalize
    W, b, Lm, Ln, G, Wc, Mean, Var = utils.get_model_WbLGWcMV(model, layer)
    # Kill units with too few wins
    wins_per_unit = np.sum(target, axis=1)
    min_win_rate = 0.3
    for u in range(Ln):
        win_rate = float(wins_per_unit[u]) / target.shape[1]
        if win_rate <= min_win_rate:
            model = modelMods.kill_unit(model, layer, u)
        mean, var = find_cluster_stats(target, A, index=u)
        if Mean.size == 0:
            Mean = mean
            Mean = Mean.reshape((1, mean.size))
            Var = var
            Var = Var.reshape((1, var.size))
        else:
            Mean = np.vstack((Mean, mean))
            Var = np.vstack((Var, var))

        model = utils.set_model_WbLGWcMV(model, layer, W, b, Lm, Ln, G, Wc,
                                         Mean, Var)
    #find_cluster_drift(W, b, G, Mean)
    return model
コード例 #4
0
def add_unit(model, layer, memory=False, Am=False, d_index=False):
    """
    Arguments:
        model   - Model dict
        layer   - Layer new unit goes onto
        memory  - Add memory unit if True
        Am      - np.array(Ln,1) - Activation from previous level; A=X when building in L1
                  the pattern being 'memorized' is A[...,m_index], all rows, one col from m
        d_index - The column from A to use (incoming data/activation)
    Returns:
        Updated model
    """
    W, b, Lm, Ln, G, Wc, Mean, Var = utils.get_model_WbLGWcMV(model, layer)

    if W.size == 0:  # Start building from zero!
        model = genesis(model, Am)
        Ln = 1
    else:
        assert layer >= 1  # Don't create units on input layer, L0
        np.random.seed(2)  # So we can get consistent results when debugging
        W, b, Lm, Ln, G, Wc, Mean, Var = utils.get_model_WbLGWcMV(model, layer)

        if memory:
            W_new, b_new, G = memorize_input(Am, G, d_index)
        else:
            W_new, b_new = create_non_memory_unit(
                Lm)  # ToDo - does this need G init =0???

        Ln += 1
        W = np.vstack((W, W_new))  # Stack the new row onto Wn
        b = np.append(b, b_new)
        b = b.reshape((1, Ln))
        model = utils.set_model_WbLGWcMV(model, layer, W, b, Lm, Ln, G, Wc,
                                         Mean, Var)

    #model = modelMods.adjust_food_when_adding_unit(model, Ln)
    return model
コード例 #5
0
def kill_unit(model, layer, u):
    """
    Delete a unit (u) by removing: that row from the W, G and Wc (win count) matrices, the bias, updating Ln.
    Return the updated model
    """
    W, b, Lm, Ln, G, Wc, Mean, Var = utils.get_model_WbLGWcMV(model, layer)
    W = np.delete(W, u, axis=0)
    b = np.delete(b, u)
    Ln -= 1
    b = b.reshape(1, Ln)
    G = np.delete(G, u, axis=0)
    Wc = np.delete(Wc, u, axis=0)

    model = utils.set_model_WbLGWcMV(model, layer, W, b, Lm, Ln, G, Wc, Mean,
                                     Var)
    return model
コード例 #6
0
def genesis(model, X):
    """
    Create the very first node
    model   - model parameters
    X       - input data
    Return: - model (updated)
    """
    np.random.seed(2)  # So we can get consistent results when debugging

    W1, b1, L0, L1, G1, Wc1, Mean, Var = utils.get_model_WbLGWcMV(model, 1)
    L0 = X.shape[0]
    W1, b1 = create_non_memory_unit(L0)
    # Change bias to 'memorize'
    W1, b1, G1 = memorize_input(X, G1)
    L1 += 1
    layer = 1
    model = utils.set_model_WbLGWcMV(model, layer, W1, b1, L0, L1, G1, Wc1,
                                     Mean, Var)
    return model
コード例 #7
0
def update_parameters(model, grads, d, learning_rate=0.02):
    """
    Arguments:
    model -- python dictionary containing your parameters
    grads -- python dictionary containing your gradients
    d     --    Number of data samples

    Returns:
    model -- wth updated W and b
    """

    layer = 1  # ToDo - generalize
    W, b, Lm, Ln, G, Wc, Mean, Var = utils.get_model_WbLGWcMV(model, layer)

    dW = grads['dW1']  # Todo - generalize
    db = grads['db1']

    # Update rule for each parameter
    W = W - learning_rate * dW
    b = b - (learning_rate * db) / (d * 100)  # ToDo

    model = utils.set_model_WbLGWcMV(model, layer, W, b, Lm, Ln, G, Wc, Mean,
                                     Var)
    return model