Esempio n. 1
0
signalNoise6 = signal + noise

noise = -2.5 * np.random.normal(0, 1, 100)
signalNoise7 = signal + noise

noise = -1 * np.random.normal(0, 1, 100)
signalNoise8 = signal + noise

noise = 0.25 * np.random.normal(0, 1, 100)
signalNoise9 = signal + noise

random.seed(16)
layer1 = NeuronLayer(200, 100)
layer2 = NeuronLayer(100, 200)
# print(signalNoise1)
neural_network = NeuralNetwork([layer1, layer2], learning_rate=0.5)
training_set_inputs = array([
    signalNoise1, signalNoise2, signalNoise3, signalNoise4, signalNoise5,
    signalNoise6, signalNoise7, signalNoise8, signalNoise9
])
# print(np.shape(training_set_inputs))
training_set_outputs = array(
    [signal, signal, signal, signal, signal, signal, signal, signal, signal])
# print(np.shape(training_set_outputs))

neural_network.train(training_set_inputs, training_set_outputs, 40000)
outputs = neural_network.think(signalNoise)
plt.subplot(4, 1, 4)
plt.plot(time, outputs[len(outputs) - 1])

plt.title('Sine wave after AI')
Esempio n. 2
0
from modules.neural_network import NeuralNetwork, NeuronLayer
from numpy import array, random
if __name__ == "__main__":
    random.seed(1)
    layer1 = NeuronLayer(2, 2)
    layer2 = NeuronLayer(1, 2)
    neural_network = NeuralNetwork([layer1, layer2])
    print("Iniciando com pesos randomicos")
    neural_network.print_weights()
    training_set_inputs = array([[0, 0], [0, 1], [1, 0], [1, 1]])
    training_set_outputs = array([[0, 1, 1, 0]]).T

    neural_network.train(training_set_inputs, training_set_outputs, 60000)
    print("pesos após o treino")
    neural_network.print_weights()
    print("Considerando uma nova situação [0, 0] -> 0: ")
    outputs = neural_network.think(array([0, 0]))
    print("Saida:\n")
    print(outputs[len(outputs)-1])

    print("Considerando uma nova situação [1, 0] -> 1: ")
    outputs = neural_network.think(array([1, 0]))
    print("Saida:\n")
    print(outputs[len(outputs)-1])
Esempio n. 3
0
    }, 
    {
        'inp': [1,0],
        'out': [0]
    }, 
    {
        'inp': [1,1],
        'out': [1]
    },
    {
        'inp': [1,1],
        'out': [1]
    } 
]

nn = NeuralNetwork(2,2,1)


for i in range(500_000):
    el = random.choice(training_data)
    inp = el['inp']
    out = el['out']
    nn.train(inp,out)

print('O xor 0 (1): ')
nn.feedforward([0,0]).print()
print('\n')

print('O xor 1 (0): ')
nn.feedforward([0,1]).print()
print('\n')
Esempio n. 4
0
from modules.neural_network import NeuralNetwork
import numpy as np

# Create OR dataset
X = np.array([
    [0,0],
    [0,1],
    [1,0],
    [1,1]
])

y = np.array([
    [0],
    [1],
    [1],
    [0]
])

nn = NeuralNetwork([2,2,1], alpha=0.5)
print("[INFO] Training the network...")
nn.fit(X,y,epochs=20000)
print("[INFO] Testing the network...")
for (x,target) in zip(X,y):
    pred = nn.predict(x)[0][0]
    step = 1 if pred > 0.5 else 0
    print("[INFO] data={}, ground-truth={}, pred={}, step={}".format(x, target[0], pred, step))
Esempio n. 5
0
from modules.neural_network import NeuralNetwork, NeuronLayer
from numpy import array, random
from modules.math_functions import math_functions

if __name__ == "__main__":
    random.seed(16)
    layer1 = NeuronLayer(4, 2)
    layer2 = NeuronLayer(3, 4)
    layer3 = NeuronLayer(1, 3)
    neural_network = NeuralNetwork([layer1, layer2, layer3],
                                   activation=math_functions.relu,
                                   delta=math_functions.dRelu,
                                   learning_rate=0.0001)
    print("Iniciando com pesos randomicos")
    neural_network.print_weights()
    training_set_inputs = array([[0, 0], [0, 1], [1, 0], [1, 1]])
    training_set_outputs = array([[0, 1, 1, 0]]).T
    neural_network.train(training_set_inputs, training_set_outputs, 1000000)
    print("Pesos após o treino")
    neural_network.print_weights()
    print("Considerando uma nova situação [0, 0] -> 0: ")
    outputs = neural_network.think(array([0, 0]))
    print("Saida:\n")
    print(outputs[len(outputs) - 1])
    print("Considerando uma nova situação [1, 0] -> 1: ")
    outputs = neural_network.think(array([1, 0]))
    print("Saida:\n")
    print(outputs[len(outputs) - 1])
Esempio n. 6
0
    parser = argparse.ArgumentParser()
    parser.add_argument("data", help="data per line, [x[0],x[1],label] data separated by tab")
    parser.add_argument("-z", "--zview", action="store_true")
    parser.add_argument("-e", "--epoch", type=int, default=1000)
    parser.add_argument("--no-activate", dest="activate", action="store_false")
    args = parser.parse_args()

    data = Data(args.data)

    # add axis
    if args.zview:
        plt.add_sub_ax()


    #parameter: yita = learning ratio, w = initial weight
    nn = NeuralNetwork(2, data.labels[1], activate=args.activate)
    frames = [plt.quiver_main(nn.w, color="magenta")]

    # set initial plot
    plt.plot_main(data.x1, data.x2)
    plt.quiver_main(data.x1.mean(axis=1), color="pink")
    if args.zview:
        update_sub(nn, data)

    # this is animation
    def update_plot(i, nn, frames):
        nn.update(data.x_all, data.y_all)

        # update fig
        while frames:
            frames.pop().remove()
from sklearn import datasets

# Load the MNIST dataset and apply min/max scaling to scale the pixel intensity values to the range [0, 1] (each
# image is represented as an 8x8 = 64-dim feature vector
digits = datasets.load_digits()
data = digits.data.astype('float')
data = (data - data.min()) / (data.max() - data.min())
print('[INFO]: Samples={}, Dimension={}'.format(data.shape[0], data.shape[1]))

# Construct the training and testing splits
(trainX, testX, trainY, testY) = train_test_split(data,
                                                  digits.target,
                                                  test_size=0.25,
                                                  random_state=42)

# Convert the labels from integers to vectors
trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)

# Train the network
print('[INFO]: Training....')
nn = NeuralNetwork([trainX.shape[1], 32, 16, 10])
print('[INFO]: {}'.format(nn))
nn.fit(trainX, trainY, epochs=1000)

# Test the network
print('[INFO]: Testing....')
predictions = nn.predict(testX)
predictions = predictions.argmax(axis=1)
print(classification_report(testY.argmax(axis=1), predictions))
# Hyper-parameters
sequence_length = 28
input_size = 300
hidden_size = 50
num_layers = 2
# 'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'
num_classes = 6
batch_size = 25
num_epochs = 2
learning_rate = 0.003

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)

dataset_df = pd.read_csv(DATA_TRAIN_FILE)
pre_process = PreProcessStage(dataset_df)
pre_process.pre_process_dataset()

embedding_model = WordEmbedding(WORD2VEC_FILE_SLIM)
embedding_matrix = embedding_model.load_from_vocabulary(pre_process.word_2_idx)

model = NeuralNetwork(hidden_size, embedding_matrix, num_classes)

loss_function = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

for epoch in range(num_epochs):
    for input, label in zip(pre_process.input_data, pre_process.labels):
        print(input)
        print(label)