Example #1
0
def test_possible_segfaults_rprop():
    # Setting an incorrect value should not be allowed
    from ann import rpropnetwork
    net = rpropnetwork(2, 0, 1)

    X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=float)
    Y = np.array([[0], [1], [1], [0]], dtype=float)

    failed = False
    try:
        net.learn(0, Y)
    except ValueError:
        failed = True
    assert failed

    failed = False
    try:
        net.learn(X, 0)
    except ValueError:
        failed = True
    assert failed

    failed = False
    try:
        net.learn(0, 0)
    except ValueError:
        failed = True
    assert failed
Example #2
0
def test_possible_segfaults_rprop():
    # Setting an incorrect value should not be allowed
    from ann import rpropnetwork
    net = rpropnetwork(2, 0, 1)

    X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=float)
    Y = np.array([[0], [1], [1], [0]], dtype=float)

    failed = False
    try:
        net.learn(0, Y)
    except ValueError:
        failed = True
    assert failed

    failed = False
    try:
        net.learn(X, 0)
    except ValueError:
        failed = True
    assert failed

    failed = False
    try:
        net.learn(0, 0)
    except ValueError:
        failed = True
    assert failed
Example #3
0
def test_rpropnetwork_mse_xor():
    from ann import rpropnetwork

    net = rpropnetwork(2, 8, 1)


    # Need to connect it
    l = net.input_count + net.hidden_count + net.output_count + 1
    weights = net.weights.ravel()
    conns = net.connections.ravel()
    act = net.activation_functions
    # Stop before output as it is included
    for i in range(l-1):
        # connect hidden to inputs and bias
        weights[l * i: l * i + 3] = np.random.normal()
        conns[l * i: l * i + 3] = 1
        conns[l * i + i] = 1
        act[i] = net.TANH

    #Output
    weights[l * (l-1):] = np.random.normal(size=l)
    conns[l * (l-1):] = 1
    act[l-1] = net.LOGSIG

    net.weights = weights
    net.connections = conns
    net.activation_functions = act

    #print(net.weights.reshape(l, l))
    #print(net.connections.reshape(l, l))
    #print(net.activation_functions)

    xor_in, xor_out = getXOR()

    net.max_error = 0.001
    net.max_epochs = 1000

    net.error_function = net.ERROR_MSE

    net.learn(xor_in, xor_out)

    print("\nResults")
    for val in xor_in:
        print("In:", val, " out:", net.output(val))
    for val in xor_in:
        if sum(val) != 1:
            assert net.output(val) < 0.1, "xor solution doesnt work"
        else:
            assert net.output(val) > 0.9, "xor solution doesnt work"
Example #4
0
def test_rpropnetwork_mse_xor():
    from ann import rpropnetwork

    net = rpropnetwork(2, 8, 1)

    # Need to connect it
    l = net.input_count + net.hidden_count + net.output_count + 1
    weights = net.weights.ravel()
    conns = net.connections.ravel()
    act = net.activation_functions
    # Stop before output as it is included
    for i in range(l - 1):
        # connect hidden to inputs and bias
        weights[l * i:l * i + 3] = np.random.normal()
        conns[l * i:l * i + 3] = 1
        conns[l * i + i] = 1
        act[i] = net.TANH

    #Output
    weights[l * (l - 1):] = np.random.normal(size=l)
    conns[l * (l - 1):] = 1
    act[l - 1] = net.LOGSIG

    net.weights = weights
    net.connections = conns
    net.activation_functions = act

    #print(net.weights.reshape(l, l))
    #print(net.connections.reshape(l, l))
    #print(net.activation_functions)

    xor_in, xor_out = getXOR()

    net.max_error = 0.001
    net.max_epochs = 1000

    net.error_function = net.ERROR_MSE

    net.learn(xor_in, xor_out)

    print("\nResults")
    for val in xor_in:
        print("In:", val, " out:", net.output(val))
    for val in xor_in:
        if sum(val) != 1:
            assert net.output(val) < 0.1, "xor solution doesnt work"
        else:
            assert net.output(val) > 0.9, "xor solution doesnt work"
def time_learn(rows, cols):
    """Return time elapsed to learn 10000 iterations on data"""
    x, y = get_test_data(rows, cols)

    net = rpropnetwork(x.shape[1], 8, 1)
    connect_feedforward(net)

    net.maxError = 0
    net.maxEpochs = 1000

    # Time it
    start = time.time()
    net.learn(x, y)
    # Final time
    elapsed = time.time() - start

    return elapsed
def time_learn(rows, cols):
    '''Return time elapsed to learn 10000 iterations on data'''
    x, y = get_test_data(rows, cols)

    net = rpropnetwork(x.shape[1], 8, 1)
    connect_feedforward(net)

    net.maxError=0
    net.maxEpochs=1000

    # Time it
    start = time.time()
    net.learn(x, y)
    # Final time
    elapsed = time.time() - start

    return elapsed
Example #7
0
def test_output():
    """Numpy array results in different output from Python list"""

    from ann import rpropnetwork
    from ann.utils import connect_feedforward

    data = np.random.normal(size=(10,6))
    np.random.shuffle(data)

    incols = list(range(2,6))
    outcols = [1, 0]

    net = rpropnetwork(len(incols), 3, len(outcols))
    connect_feedforward(net)

    for row in data[:3, incols]:
        np_out = net.output(row)
        py_out = net.output(list(row))

        for n, p in zip(np_out, py_out):
            assert n == p
Example #8
0
def test_output():
    """Numpy array results in different output from Python list"""

    from ann import rpropnetwork
    from ann.utils import connect_feedforward

    data = np.random.normal(size=(10, 6))
    np.random.shuffle(data)

    incols = list(range(2, 6))
    outcols = [1, 0]

    net = rpropnetwork(len(incols), 3, len(outcols))
    connect_feedforward(net)

    for row in data[:3, incols]:
        np_out = net.output(row)
        py_out = net.output(list(row))

        for n, p in zip(np_out, py_out):
            assert n == p
Example #9
0
def rpropnetwork_surv(datafunc, inputcount, censfrac, errorfunc):
    '''
    Must specify output activation function and
    function which returns dataset.
    '''
    from ann import rpropnetwork, cindex as get_C_index

    net = rpropnetwork(inputcount, 8, 2)

    # Need to connect it
    l = net.input_count + net.hidden_count + net.output_count + 1
    weights = net.weights.ravel()
    conns = net.connections.ravel()
    act = net.activation_functions
    # Stop before output as it is included
    for i in range(l-1):
        # connect hidden to inputs and bias
        weights[l * i: l * i + (net.input_count + 1)] = np.random.normal(size=(net.input_count + 1))
        conns[l * i: l * i + (net.input_count + 1)] = 1
        act[i] = net.TANH

    #Output
    weights[l * (l-2): l * (l-1)] = np.random.normal(size=l)
    conns[l * (l-2):l * (l-1)] = 1
    act[l-2:] = net.LINEAR

    net.weights = weights
    net.connections = conns
    net.activation_functions = act

    net.max_error = 0.01
    net.max_epochs = 100
    net.error_function = errorfunc

    surv_in, surv_out = datafunc(net.input_count, 100, censfrac)

    censcount = len(surv_out[surv_out[:, 1] == 0])
    frac = censcount / len(surv_out)

    preds_before = np.zeros((len(surv_in), 2))
    olddev = 0
    for i, (val, target) in enumerate(zip(surv_in, surv_out)):
        preds_before[i] = net.output(val)
        if target[1] > 0:
            olddev += (target[0] - net.output(val)[0])**2

    olddev = np.sqrt(olddev/len(surv_out))
    cindex_before = get_C_index(surv_out, preds_before[:, 0])

    net.learn(surv_in, surv_out)

    preds_after = np.zeros((len(surv_out), 2))
    newdev = 0
    for i, (val, target) in enumerate(zip(surv_in, surv_out)):
        preds_after[i] = net.output(val)
        if target[1] > 0:
            newdev += (target[0] - net.output(val)[0])**2

    cindex_after = get_C_index(surv_out, preds_after[:, 0])
    newdev = np.sqrt(newdev/len(surv_out))
    #import pdb; pdb.set_trace()

    stats = """
    censfrac: {:.2f},
    cindex: {:.3f} -> {:.3f},
    deviation: {:.3f} -> {:.3f}""".format(censfrac,
                                          cindex_before, cindex_after,
                                          olddev, newdev)
    msg = "Expected {} to change differently: " + stats

    # These fail probabilistically
    #assert newdev < olddev, msg.format("deviation")
    #assert cindex_before < cindex_after, msg.format("cindex")
    assert net.activation_functions[-2] == net.LINEAR,\
      "Not correct activation function"
Example #10
0
def rpropnetwork_surv(datafunc, inputcount, censfrac, errorfunc):
    '''
    Must specify output activation function and
    function which returns dataset.
    '''
    from ann import rpropnetwork, cindex as get_C_index

    net = rpropnetwork(inputcount, 8, 2)

    # Need to connect it
    l = net.input_count + net.hidden_count + net.output_count + 1
    weights = net.weights.ravel()
    conns = net.connections.ravel()
    act = net.activation_functions
    # Stop before output as it is included
    for i in range(l - 1):
        # connect hidden to inputs and bias
        weights[l * i:l * i + (net.input_count + 1)] = np.random.normal(
            size=(net.input_count + 1))
        conns[l * i:l * i + (net.input_count + 1)] = 1
        act[i] = net.TANH

    #Output
    weights[l * (l - 2):l * (l - 1)] = np.random.normal(size=l)
    conns[l * (l - 2):l * (l - 1)] = 1
    act[l - 2:] = net.LINEAR

    net.weights = weights
    net.connections = conns
    net.activation_functions = act

    net.max_error = 0.01
    net.max_epochs = 100
    net.error_function = errorfunc

    surv_in, surv_out = datafunc(net.input_count, 100, censfrac)

    censcount = len(surv_out[surv_out[:, 1] == 0])
    frac = censcount / len(surv_out)

    preds_before = np.zeros((len(surv_in), 2))
    olddev = 0
    for i, (val, target) in enumerate(zip(surv_in, surv_out)):
        preds_before[i] = net.output(val)
        if target[1] > 0:
            olddev += (target[0] - net.output(val)[0])**2

    olddev = np.sqrt(olddev / len(surv_out))
    cindex_before = get_C_index(surv_out, preds_before[:, 0])

    net.learn(surv_in, surv_out)

    preds_after = np.zeros((len(surv_out), 2))
    newdev = 0
    for i, (val, target) in enumerate(zip(surv_in, surv_out)):
        preds_after[i] = net.output(val)
        if target[1] > 0:
            newdev += (target[0] - net.output(val)[0])**2

    cindex_after = get_C_index(surv_out, preds_after[:, 0])
    newdev = np.sqrt(newdev / len(surv_out))
    #import pdb; pdb.set_trace()

    stats = """
    censfrac: {:.2f},
    cindex: {:.3f} -> {:.3f},
    deviation: {:.3f} -> {:.3f}""".format(censfrac, cindex_before,
                                          cindex_after, olddev, newdev)
    msg = "Expected {} to change differently: " + stats

    # These fail probabilistically
    #assert newdev < olddev, msg.format("deviation")
    #assert cindex_before < cindex_after, msg.format("cindex")
    assert net.activation_functions[-2] == net.LINEAR,\
      "Not correct activation function"