def test_shape_of_weights(): # Shape should not matter from ann import matrixnetwork net = matrixnetwork(2, 3, 1) X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=float) net.connections[:] = 1 net.weights = np.random.normal(size=net.weights.shape) shaped = np.array([net.output(x) for x in X]) net.weights = net.weights.ravel() flat = np.array([net.output(x) for x in X]) assert np.all(shaped == flat), "Shape should not matter"
def test_wrapper_segfault(): # Setting an incorrect value should not be allowed from ann import matrixnetwork net = matrixnetwork(2, 0, 1) failed = False try: net.weights = 1 net.output([1, -1]) except (ValueError, AttributeError): failed = True net.weights = net._weights + 1 assert failed, "Incorrect weight value should not work" failed = False try: net.connections = 1 net.output([1, -1]) except (ValueError, AttributeError): failed = True net.connections = net._connections + 1 assert failed, "Incorrect connection should not work" failed = False try: net.activation_functions = 1 net.output([1, -1]) except (ValueError, AttributeError): failed = True net.activation_functions = net._activation_functions + 1 assert failed, "Incorrect actfuncs should not work" failed = False try: net.dropout_probabilities = 1 net.output([1, -1]) except (ValueError, AttributeError): failed = True net.dropout_probabilities = net._dropout_probabilities - 0.5 assert failed, "Incorrect dropout should not work"
def test_shape_of_weights(): # Shape should not matter from ann import matrixnetwork net = matrixnetwork(2,3,1) X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=float) net.connections[:] = 1 net.weights = np.random.normal(size=net.weights.shape) shaped = np.array([net.output(x) for x in X]) net.weights = net.weights.ravel() flat = np.array([net.output(x) for x in X]) assert np.all(shaped == flat), "Shape should not matter"
def test_possible_segfaults_matrix(): # Setting an incorrect value should not be allowed from ann import matrixnetwork net = matrixnetwork(2, 0, 1) failed = False try: net._weights = 1 except ValueError: failed = True assert failed failed = False try: net._connections = 1 except ValueError: failed = True assert failed failed = False try: net._activation_functions = 1 except ValueError: failed = True assert failed failed = False try: net._dropout_probabilities = 1 except ValueError: failed = True assert failed failed = False try: net.output(0) except ValueError: failed = True assert failed
def test_matrixnetwork(): from ann import matrixnetwork net = matrixnetwork(2, 2, 1) # Default start is zero xor_in, xor_out = getXOR() for val in xor_in: assert net.output(val) == 0, "Expected zero output as default" length = net.input_count + net.hidden_count + net.output_count + 1 # Weights #print("Length: ", len(net.weights)) #print(net.weights) assert len(net.weights.ravel()) == length**2, "Wrong length of weight vector" assert np.all(net.weights == 0) == True, "Expected weights to equal zero" weights = net.weights.ravel() for i in range(len(weights)): weights[i] = 1.0 net.weights = weights #print(net.weights) assert np.all(net.weights == 1.0) == True, "Expected weights to equal 1" # Connections #print("Length: ", len(net.connections)) #print(net.connections) assert len(net.connections.ravel()) == length**2, "Wrong length of conns vector" assert np.all(net.connections == 0), "Expected conns to equal zero" conns = net.connections.ravel() for i in range(len(conns)): conns[i] = 1 net.connections = conns #print(net.connections) assert np.all(net.connections == 1), "Expected conns to equal 1" # Default dropout value assert np.all(net.dropout_probabilities == 1), "Expected default dropout probs to be 1" dp = net.dropout_probabilities dp[:] = 0.5 net.dropout_probabilities = dp assert np.all(net.dropout_probabilities == 0.5), "Expected dropout probs to be 0.5" # Set one again dp[:] = 1.0 net.dropout_probabilities = dp assert np.all(net.dropout_probabilities == 1), "Expected dropout probs to be 1" # ActFuncs #print("Length: ", len(net.activation_functions)) #print(net.activation_functions) assert len(net.activation_functions) == length, \ "Wrong length of funcs vector" assert np.all(net.activation_functions == net.LOGSIG), \ "Expected funcs to be LINEAR" actfuncs = net.activation_functions #actfuncs = np.zeros(len(net.activation_functions)) for i in range(len(actfuncs)): actfuncs[i] = net.TANH #print(actfuncs) net.activation_functions = actfuncs #print(net.activation_functions) assert np.all(net.activation_functions == net.TANH), \ "Expected actfuncs to be TANH" # OUTPUTS for val in xor_in: assert net.output(val) != 0, "Expected some output" # Solve XOR # set all conns to zero first conns[:] = 0 net.connections = conns # Connect feedforward from ann.utils import connect_feedforward connect_feedforward(net) # We care only about first two neurons and output actfuncs[3] = net.LOGSIG actfuncs[4] = net.LOGSIG actfuncs[5] = net.LINEAR net.activation_functions = actfuncs weights[:] = 0 weights[3*length + 0] = -60 weights[3*length + 1] = 60 weights[3*length + 2] = -30 weights[4*length + 0] = 60 weights[4*length + 1] = -60 weights[4*length + 2] = -30 weights[(length-1)*length + 3] = 1 weights[(length-1)*length + 4] = 1 net.weights = weights print(net.connections.reshape((6,6))) print(net.weights.reshape((6,6))) print(net.activation_functions) for val in xor_in: print("In:", val, " out:", net.output(val)) if sum(val) != 1: assert net.output(val) < 0.1, "xor solution doesnt work" else: assert net.output(val) > 0.9, "xor solution doesnt work" conns[:] = 0 net.connections = conns for val in xor_in: #print("In:", val, " out:", net.output(val)) assert net.output(val) == 0, "no conns should mean zero output!" # Pickle test import pickle state = pickle.dumps(net, -1) net2 = pickle.loads(state) # Make sure it's the same assert np.all(net.weights == net2.weights), "weights don't match" assert np.all(net.connections == net2.connections), "conns don't match" assert np.all(net.activation_functions == net2.activation_functions),\ "functions don't match"
def test_matrixnetwork(): from ann import matrixnetwork net = matrixnetwork(2, 2, 1) # Default start is zero xor_in, xor_out = getXOR() for val in xor_in: assert net.output(val) == 0, "Expected zero output as default" length = net.input_count + net.hidden_count + net.output_count + 1 # Weights #print("Length: ", len(net.weights)) #print(net.weights) assert len( net.weights.ravel()) == length**2, "Wrong length of weight vector" assert np.all(net.weights == 0) == True, "Expected weights to equal zero" weights = net.weights.ravel() for i in range(len(weights)): weights[i] = 1.0 net.weights = weights #print(net.weights) assert np.all(net.weights == 1.0) == True, "Expected weights to equal 1" # Connections #print("Length: ", len(net.connections)) #print(net.connections) assert len( net.connections.ravel()) == length**2, "Wrong length of conns vector" assert np.all(net.connections == 0), "Expected conns to equal zero" conns = net.connections.ravel() for i in range(len(conns)): conns[i] = 1 net.connections = conns #print(net.connections) assert np.all(net.connections == 1), "Expected conns to equal 1" # Default dropout value assert np.all(net.dropout_probabilities == 1), "Expected default dropout probs to be 1" dp = net.dropout_probabilities dp[:] = 0.5 net.dropout_probabilities = dp assert np.all( net.dropout_probabilities == 0.5), "Expected dropout probs to be 0.5" # Set one again dp[:] = 1.0 net.dropout_probabilities = dp assert np.all( net.dropout_probabilities == 1), "Expected dropout probs to be 1" # ActFuncs #print("Length: ", len(net.activation_functions)) #print(net.activation_functions) assert len(net.activation_functions) == length, \ "Wrong length of funcs vector" assert np.all(net.activation_functions == net.LOGSIG), \ "Expected funcs to be LINEAR" actfuncs = net.activation_functions #actfuncs = np.zeros(len(net.activation_functions)) for i in range(len(actfuncs)): actfuncs[i] = net.TANH #print(actfuncs) net.activation_functions = actfuncs #print(net.activation_functions) assert np.all(net.activation_functions == net.TANH), \ "Expected actfuncs to be TANH" # OUTPUTS for val in xor_in: assert net.output(val) != 0, "Expected some output" # Solve XOR # set all conns to zero first conns[:] = 0 net.connections = conns # Connect feedforward from ann.utils import connect_feedforward connect_feedforward(net) # We care only about first two neurons and output actfuncs[3] = net.LOGSIG actfuncs[4] = net.LOGSIG actfuncs[5] = net.LINEAR net.activation_functions = actfuncs weights[:] = 0 weights[3 * length + 0] = -60 weights[3 * length + 1] = 60 weights[3 * length + 2] = -30 weights[4 * length + 0] = 60 weights[4 * length + 1] = -60 weights[4 * length + 2] = -30 weights[(length - 1) * length + 3] = 1 weights[(length - 1) * length + 4] = 1 net.weights = weights print(net.connections.reshape((6, 6))) print(net.weights.reshape((6, 6))) print(net.activation_functions) for val in xor_in: print("In:", val, " out:", net.output(val)) if sum(val) != 1: assert net.output(val) < 0.1, "xor solution doesnt work" else: assert net.output(val) > 0.9, "xor solution doesnt work" conns[:] = 0 net.connections = conns for val in xor_in: #print("In:", val, " out:", net.output(val)) assert net.output(val) == 0, "no conns should mean zero output!" # Pickle test import pickle state = pickle.dumps(net, -1) net2 = pickle.loads(state) # Make sure it's the same assert np.all(net.weights == net2.weights), "weights don't match" assert np.all(net.connections == net2.connections), "conns don't match" assert np.all(net.activation_functions == net2.activation_functions),\ "functions don't match"