Example #1
0
def main(argv):
    model = nn.load(argv[1])
    gui = paint.gui()
    print("past init")
    while True:
        time.sleep(2 - time.monotonic() % 2)
        image = gui.get_image()
        data = np.array(image).astype(np.float32)
        data = skimage.measure.block_reduce(data,
                                            block_size=(10, 10),
                                            func=np.mean)
        data = torch.from_numpy(data)
        data = data[None, None, :, :]  # correct the number of dimensions
        print(data.size())
        nn.predict(model, data)
Example #2
0
def update(words, gold_tags, gold_arclabels, gold_tree,i):
    global y_train,x_test,y_test
    buffer = words
    stack = []
    dummy = []
    dummy = np.asarray(dummy)
    pdt = [0]*len(words)
    #tags = self.tagger.update(words, gold_tags)
    #arc_tags = self.arc_tagger.update(words, gold_arclabels)  # TODO: This is wrong, currently only looking at words , not pdt and words
    x=0
    while (True):
        dummy = []
        g_move = gold_move(x,stack,pdt,gold_tree)
        if g_move is None:
            break
        #feature = features(words,tags,x,stack,pdt)
        if i==0:
            y_train.append(g_move)
        else:
            y_test.append(g_move)
        
        dummy = nn.predict(buffer, stack, pdt, x, gold_tags, gold_arclabels)
        if i==0:
            x_train.append(dummy)
        else:
            x_test.append(dummy)
        x, stack, pdt = move(x,stack,pdt,g_move)

    return pdt
Example #3
0
def fitness_func(solution, sol_idx):
    global GANN_instance, data_inputs, data_outputs

    predictions = nn.predict(last_layer=GANN_instance.population_networks[sol_idx],
                                           data_inputs=data_inputs)
    correct_predictions = numpy.where(predictions == data_outputs)[0].size
    solution_fitness = (correct_predictions/data_outputs.size)*100

    return solution_fitness
Example #4
0
 def test_predicts(self):
     np.random.seed(1)
     X_assess = np.random.randn(2, 3)
     parameters = {'W1': np.array([[-0.00615039,  0.0169021 ],[-0.02311792,  0.03137121],
                                 [-0.0169217 , -0.01752545],[ 0.00935436, -0.05018221]]),
                 'W2': np.array([[-0.0104319 , -0.04019007,  0.01607211,  0.04440255]]),
                 'b1': np.array([[ -8.97523455e-07], [  8.15562092e-06], [  6.04810633e-07],[ -2.54560700e-06]]),
                 'b2': np.array([[  9.14954378e-05]])}
     preds = nn.predict(parameters, X_assess)
     self.assertEqual(np.mean(preds), 0.6666666666666666)
Example #5
0
def get_acc(nn, X, Y):
    """
    Calculates the accuracy of a neural network 
    
    Parameters:
        - nn: the neural network
        - X: the design matrix with shape (n, m), where n is the number
            of features and m is the number of examples
        - Y: the labels as an array with shape (m,), where m is the
            number of examples
        
    Returns:
        - acc: the accuracy nn gets on predicting the labels
    """
    m = X.shape[1]
    Y_hat = nn.predict(X)
    acc = np.sum(Y == Y_hat) / m
    return acc
    return X, y
train_set_x, train_set_y = prepare_data(train_images)
test_set_x, test_set_y = prepare_data(test_images)

train_set_x_flatten = train_set_x.reshape(train_set_x.shape[0], ROWS*COLS*CHANNELS).T
test_set_x_flatten = test_set_x.reshape(test_set_x.shape[0], -1).T
train_set_x = train_set_x_flatten/255
test_set_x = test_set_x_flatten/255



NN = nn.model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 10000, learning_rate = 0.003, print_loss= True)
im = cv2.imread('test/dog1.jpg',0)
im = cv2.resize(im,(ROWS,COLS))
test= im.reshape(1, ROWS*COLS).T
pred = nn.predict(NN["w"], NN["b"], test)
print(pred)
learning_rates = [0.001,0.01,0.005]
models = {}
for i in learning_rates:
    print("learning rate is: ",i)
    models[i] = nn.model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 10000, learning_rate = i, print_loss= True)
    print("---------------------------------------------------------")


for i in learning_rates:
    plt.plot(np.squeeze(models[i]["loss"]), label= str(models[i]["learning_rate"]))
plt.ylabel('Loss')
plt.xlabel("iterations (hundreds)")
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
Example #7
0
        cv2.line(img,(last_x,last_y),(x,y),color=(255),thickness=75)   

img = np.zeros((600,600,1), np.uint8)
cv2.namedWindow('draw')
cv2.setMouseCallback('draw',draw_line)


print('  _   _ _    _ __  __ ____  ______ _____    _____  ______ _______ ______ _____ _______ _____ ____  _   _ ')
print(' | \ | | |  | |  \/  |  _ \|  ____|  __ \  |  __ \|  ____|__   __|  ____/ ____|__   __|_   _/ __ \| \ | |')
print(' |  \| | |  | | \  / | |_) | |__  | |__) | | |  | | |__     | |  | |__ | |       | |    | || |  | |  \| |')
print(' | . ` | |  | | |\/| |  _ <|  __| |  _  /  | |  | |  __|    | |  |  __|| |       | |    | || |  | | . ` |')
print(' | |\  | |__| | |  | | |_) | |____| | \ \  | |__| | |____   | |  | |___| |____   | |   _| || |__| | |\  |')
print(' |_| \_|\____/|_|  |_|____/|______|_|  \_\ |_____/|______|  |_|  |______\_____|  |_|  |_____\____/|_| \_|')
print('---------------------------------------------------------------------------------------------------------')
print('[Enter] Predict Number | [C]lear | [Q]uit')
while(1):
    cv2.imshow('draw',img)
    key = cv2.waitKey(1)

    if key == ord('q'):
        break
    if key == ord('c'):
        img = np.zeros((600,600,1), np.uint8)
    if key == 13:
        image = cv2.resize(img,(28,28))
        prediction = nn.predict(image)
        max_index = np.argmax(prediction)
        print("It's a "+'\033[1m'+str(max_index)+'\033[0m')

cv2.destroyAllWindows()
    
Example #8
0
NeuralNetwork = nn.NeuralNetwork

nn = NeuralNetwork(2, 2, 1)

training = [
    {
        'inputs': [1, 0],
        'targets': [1]
    },
    {
        'inputs': [0, 1],
        'targets': [1]
    },
    {
        'inputs': [1, 1],
        'targets': [0]
    },
    {
        'inputs': [0, 0],
        'targets': [0]
    },
]

for i in range(100):
    data = training[random.randint(0, 3)]
    nn.train(data['inputs'], data['targets'])

answer = nn.predict([1, 0])
print(answer)
 def predict(self):
     self.set_status("Predicting...")
     self.prediction = nn.predict(self.model, WAVE_OUTPUT_FILENAME)
     self.string_prediction.set(COMMANDS[self.prediction])
     self.set_status("Prediction calculated")
Example #10
0
    tw = re.sub(r'[^\w\s]', '', tw)
    tw = tw.split()
    posCnt = 0
    negCnt = 0
    small = []
    for word in tw:
        word = word.strip('\'"?,.!')
        word = word.lower()
        if word in positive:
            posCnt += 1
        if word in negative:
            negCnt += 1
    small.append(posCnt)
    small.append(negCnt)

    ans = nn.predict(small)
    if ans >= 0.5:
        pos = pos + 1
    elif ans <= -0.4:
        neg = neg + 1
    else:
        neut = neut + 1
    print(ans)

print("\n\n")
posper = (pos * 100) / c
negper = (neg * 100) / c
neutper = (neut * 100) / c

print("Positive Tweets Percentage is: {}%".format(posper))
print("Negative Tweets Percentage is: {}%".format(negper))
Example #11
0
from sklearn.datasets.samples_generator import make_blobs
import matplotlib.pyplot as plt
from nn import build_model
from nn import plot_decision_boundary
from nn import predict
from nn import build_model_691
np.random.seed(0)
X, y = make_moons(200, noise=0.20)
plt.scatter(X[:, 0], X[:, 1], s=40, c=y,
            cmap=plt.cm.Spectral)  #shows original scatter
plt.figure(figsize=(16, 32))
hidden_layer_dimensions = [1, 2, 3, 4]
for i, nn_hdim in enumerate(hidden_layer_dimensions):
    plt.subplot(2, 2, i + 1)
    plt.title('HiddenLayerSize%d' % nn_hdim)
    model = build_model(X, y, nn_hdim)
    plot_decision_boundary(lambda x: predict(model, x), X, y)
plt.show()

np.random.seed(0)
X, y = make_blobs(n_samples=100, centers=3, n_features=2, random_state=0)
plt.scatter(X[:, 0], X[:, 1], s=40, c=y, cmap=plt.cm.Spectral)
plt.figure(figsize=(16, 32))
hidden_layer_dimensions = [1, 2, 3, 4]
for i, nn_hdim in enumerate(hidden_layer_dimensions):
    plt.subplot(2, 2, i + 1)
    plt.title('HiddenLayerSize%d' % nn_hdim)
    model = build_model_691(X, y, nn_hdim)
    plot_decision_boundary(lambda x: predict(model, x), X, y)
plt.show()
Example #12
0
num_iterations = 100
alpha = 1

if __name__ == '__main__':
    sample_data = utils.linear_sep_data(num_of_examples)
    train_data = sample_data[:num_of_train]
    test_data = sample_data[num_of_train:]
    plt.plot(train_data.T[0], train_data.T[1], 'ro')
    plt.show()
    # X, y = utils.load_train_set(num_of_examples)
    X = train_data.T[:2]
    y = train_data.T[2]
    X_test = test_data.T[:2]
    y_test = test_data.T[2]
    X = X.reshape((2, num_of_train))
    y = y.reshape((1, num_of_train))
    X_test = X_test.reshape((2, num_of_test))
    y_test = y_test.reshape((1, num_of_test))
    # X  = utils.normalize_features(X)
    # X = X.T
    # y = y.reshape((1, num_of_examples))
    W, b = nn.zero_initializer(num_of_features)
    optimal_params, costs = nn.gradient_descent_optimizer(
        X, y, W, b, alpha, num_iterations)
    W_opt = optimal_params[0]
    b_opt = optimal_params[1]
    print(costs)
    y_pred = nn.predict(W_opt, b_opt, X_test)
    print('real labels', y_test)
    print('pred labels', y_pred)
Example #13
0
        if snake.alive == False:
            continue

        # pygame.draw.rect(screen, (255, 0, 0), (int(food[n].x) - block // 2, int(food[n].y) - block // 2, int(block), int(block)), 1)

        # if keys[pygame.K_SPACE]:
        #     drawLine(snake, screen)

        if keys[pygame.K_q]:
            play = False

        if keys[pygame.K_x]:
            print('mutate_rate : ', rate)

        inp = snake.processDistances(food, walls, screen)
        outputs = predict(inp, snake.brain)

        # a = inp.T
        # a = a[0]

        # print(a[0], a[3], a[6], a[9], a[12], a[15], a[18], a[21])

        # if outputs == 0:
        #     snake.move(Vector2(0, -1))

        # elif outputs == 1:
        #     snake.move(Vector2(1, 0))

        # elif outputs == 2:
        #     snake.move(Vector2(0, 1))
Example #14
0
    def reply(self, received_data):
        global GANN_instance, data_inputs, data_outputs, model
        if (type(received_data) is dict):
            if (("data" in received_data.keys())
                    and ("subject" in received_data.keys())):
                subject = received_data["subject"]
                print("Client's Message Subject is {subject}.".format(
                    subject=subject))
                self.kivy_app.label.text = "Client's Message Subject is {subject}".format(
                    subject=subject)

                print("Replying to the Client.")
                self.kivy_app.label.text = "Replying to the Client"
                if subject == "echo":
                    if model is None:
                        data = {"subject": "model", "data": GANN_instance}
                    else:
                        predictions = nn.predict(last_layer=model,
                                                 data_inputs=data_inputs)
                        error = numpy.sum(numpy.abs(predictions -
                                                    data_outputs))
                        # In case a client sent a model to the server despite that the model error is 0.0. In this case, no need to make changes in the model.
                        if error == 0:
                            data = {"subject": "done", "data": None}
                        else:
                            data = {"subject": "model", "data": GANN_instance}

                    try:
                        response = pickle.dumps(data)
                    except BaseException as e:
                        print("Error Encoding the Message: {msg}.\n".format(
                            msg=e))
                        self.kivy_app.label.text = "Error Encoding the Message"
                elif subject == "model":
                    try:
                        GANN_instance = received_data["data"]
                        best_model_idx = received_data["best_solution_idx"]

                        best_model = GANN_instance.population_networks[
                            best_model_idx]
                        if model is None:
                            model = best_model
                        else:
                            predictions = nn.predict(last_layer=model,
                                                     data_inputs=data_inputs)

                            error = numpy.sum(
                                numpy.abs(predictions - data_outputs))

                            # In case a client sent a model to the server despite that the model error is 0.0. In this case, no need to make changes in the model.
                            if error == 0:
                                data = {"subject": "done", "data": None}
                                response = pickle.dumps(data)
                                return

                            self.model_averaging(model, best_model)

                        # print(best_model.trained_weights)
                        # print(model.trained_weights)

                        predictions = nn.predict(last_layer=model,
                                                 data_inputs=data_inputs)
                        print("Model Predictions: {predictions}".format(
                            predictions=predictions))

                        error = numpy.sum(numpy.abs(predictions -
                                                    data_outputs))
                        print("Prediction Error = {error}".format(error=error))
                        self.kivy_app.label.text = "Prediction Error = {error}".format(
                            error=error)

                        if error != 0:
                            data = {"subject": "model", "data": GANN_instance}
                            response = pickle.dumps(data)
                        else:
                            data = {"subject": "done", "data": None}
                            response = pickle.dumps(data)

                    except BaseException as e:
                        print("Error Decoding the Client's Data: {msg}.\n".
                              format(msg=e))
                        self.kivy_app.label.text = "Error Decoding the Client's Data"
                else:
                    response = pickle.dumps("Response from the Server")

                try:
                    self.connection.sendall(response)
                except BaseException as e:
                    print("Error Sending Data to the Client: {msg}.\n".format(
                        msg=e))
                    self.kivy_app.label.text = "Error Sending Data to the Client: {msg}".format(
                        msg=e)

            else:
                print(
                    "The received dictionary from the client must have the 'subject' and 'data' keys available. The existing keys are {d_keys}."
                    .format(d_keys=received_data.keys()))
                self.kivy_app.label.text = "Error Parsing Received Dictionary"
        else:
            print(
                "A dictionary is expected to be received from the client but {d_type} received."
                .format(d_type=type(received_data)))
            self.kivy_app.label.text = "A dictionary is expected but {d_type} received.".format(
                d_type=type(received_data))
Example #15
0
    model = 'lr'
    model = 'nn'

# Neural Network
    if model == 'nn':

      (X, y) = get_numpy_data(train_data, model_features, my_output, with_constant=False, output_factor=1)
      y =  y.reshape(y.shape[0], 1) # make sure y has 2 dimensions

      iterations=10
      neurons = [ X.shape[1], 1 ]

      weights = nn.neural_network_with_shapes(X, y, neurons, iterations=iterations, verbose=True, plot=False)

      (test_X, test_y) = get_numpy_data(test_data, model_features, my_output)
      test_predictions = nn.predict(test_X, weights)

      train_r2 = regression.rsquared(nn.predict(X, weights), y)
      test_r2 = regression.rsquared(test_predictions, test_y)

      train_rss = regression.avg_prediction_error(nn.predict(X, weights), y)
      test_rss  = regression.avg_prediction_error(test_predictions, test_y)

      print weights

      print weights
      print 'Train r2: {}, test r2: {}'.format(train_r2, test_r2)
      print 'train rss: {}, test rss: {}'.format(train_rss, test_rss)

# Linear Regression
    elif model == 'lr':
Example #16
0
import MNB
import argparse
#import lsh
from sys import argv
import warnings
warnings.filterwarnings("ignore")

if __name__ == '__main__':
   print('Welcome to the world of high and low dimensions!')
   # The entire code should be able to run from this file!
   test_data= argv[2]
   test_label = argv[4]
   data = argv[6]
   if data == "dolphins":
       run.predict_dolphins(test_data,test_label)
       nn.predict("../data/dolphins/dolphins.csv","../data/dolphins/dolphins_label.csv",test_data,test_label)
       bayes.predictNB("../data/dolphins/dolphins.csv","../data/dolphins/dolphins_label.csv",test_data,test_label)
       #lsh.run("dolphins")
   elif data == "twitter":
       MNB.predict(test_data,test_label)
       #run.predict_twitter(test_data,test_label)
       #lsh.run("twitter")
       nn.predict_twitter(test_data,test_label)
       
       
   elif data == "pubmed":
       
       bayes.predictNB("../data/pubmed/pubmed.csv","../data/pubmed/pubmed_label.csv",test_data,test_label)
       #lsh.run("pubmed")
       run.predict_pubmed(test_data,test_label)
       nn.predict("../data/pubmed/pubmed.csv","../data/pubmed/pubmed_label.csv",test_data,test_label)
Example #17
0
def main():
    parser = argparse.ArgumentParser(description="Command Line Parser")
    parser.add_argument("--train-data",
                        dest="train_data",
                        default='$',
                        help="Give path to the training data set.")
    parser.add_argument("--test-data",
                        dest="test_data",
                        required=True,
                        help="Give path to the testing data set.")
    parser.add_argument("--dataset",
                        dest="dataset",
                        required=True,
                        help="Give Name of data set.")
    parser.add_argument("--configuration",
                        dest="configuration",
                        default='$',
                        help="Give structure of Neural Net.")
    args = parser.parse_args()
    train_data = args.train_data
    test_data = args.test_data
    dataset = args.dataset
    configuration = args.configuration
    if configuration != '$':
        configuration = list(map(int, configuration.strip('[]').split(',')))
    #Test mode
    if train_data == '$':

        test_filename = test_data
        testdata = list()
        if dataset == 'MNIST':
            num_classes = 10

            with open("../mem/weight_mnist.pkl", 'r') as saved:
                w = pkl.load(saved)
            with open("../mem/bias_mnist.pkl", 'r') as saved:
                b = pkl.load(saved)
            with open("../mem/configuration_mnist.pkl", 'r') as saved:
                nn_structure = pkl.load(saved)

            files = os.listdir(test_filename)
            for f in files:
                #print "Reading File", f
                file_path = os.path.join(test_filename, f)
                img = PIL.Image.open(file_path).convert("L")
                matrix = np.array(img)
                shape = matrix.shape
                reshaped_matrix = np.reshape(matrix, [
                    shape[0] * shape[1],
                ])
                testdata.append(reshaped_matrix)
                e = float(files.index(f) + 1) / len(files)
            sys.stdout.write('{}% File Reading Completed .....\r'.format(
                int(e * 100)))
            test_scaler = StandardScaler()
            testdata = test_scaler.fit_transform(np.array(testdata))
            n_components = 64
            pca_test = PCA(n_components)
            testdata = pca_test.fit_transform(testdata)

            predicted_y = nn.predict(np.array(testdata), w, b,
                                     len(nn_structure))
            print "Prediction is: ", predicted_y
            print len(nn_structure)

        else:
            num_classes = 2

            with open("../mem/weight_catdog.pkl", 'r') as saved:
                w = pkl.load(saved)
            with open("../mem/bias_catdog.pkl", 'r') as saved:
                b = pkl.load(saved)
            with open("../mem/configuration_catdog.pkl", 'r') as saved:
                nn_structure = pkl.load(saved)

            files = os.listdir(test_filename)
            for f in files:
                #print "Reading File", f
                file_path = os.path.join(test_filename, f)
                img = PIL.Image.open(file_path).convert("L")
                matrix = np.array(img)
                shape = matrix.shape
                reshaped_matrix = np.reshape(matrix, [
                    shape[0] * shape[1],
                ])
                testdata.append(reshaped_matrix)
                e = float(files.index(f) + 1) / len(files)
            sys.stdout.write('{}% File Reading Completed .....\r'.format(
                int(e * 100)))
            test_scaler = StandardScaler()
            testdata = test_scaler.fit_transform(np.array(testdata))
            n_components = 64
            pca_test = PCA(n_components)
            testdata = pca_test.fit_transform(testdata)
            predicted_y = nn.predict(np.array(testdata), w, b,
                                     len(nn_structure))
            predicted = ['cat' if x == 0 else 'dog' for x in predicted_y]
            print "Prediction is:", predicted

    else:
        train_filename = train_data
        test_filename = test_data
        raw_data, raw_labels, num_classes = nn.read_data(train_filename)
        #nn_structure = configuration
        testdata = list()
        if dataset == 'MNIST':
            #configuration = [64]+configuration+[10]
            #print configuration
            nn_structure = [64] + configuration + [10]
            scaler = StandardScaler()
            scaled_data = scaler.fit_transform(raw_data)
            print "Scaling Completed ....."
            n_components = 64
            pca = PCA(n_components)
            data = pca.fit_transform(scaled_data)
            print "Dimensionaly Reduced ....."
            labels = np.array(list(map(int, raw_labels)))
            onehot_labels = nn.one_hot(labels, num_classes)
            w, b, cost_list = nn.train(nn_structure, data, onehot_labels)

            with open("../mem/weight_mnist.pkl", 'w') as saver:
                pkl.dump(w, saver)

            with open("../mem/bias_mnist.pkl", 'w') as saver:
                pkl.dump(b, saver)

            with open("../mem/configuration_mnist.pkl", 'w') as saver:
                pkl.dump(nn_structure, saver)

            #num_classes = 10
            files = os.listdir(test_filename)
            for f in files:
                #print "Reading File", f
                file_path = os.path.join(test_filename, f)
                img = PIL.Image.open(file_path).convert("L")
                matrix = np.array(img)
                shape = matrix.shape
                reshaped_matrix = np.reshape(matrix, [
                    shape[0] * shape[1],
                ])
                testdata.append(reshaped_matrix)
                e = float(files.index(f) + 1) / len(files)
            sys.stdout.write('{}% File Reading Completed .....\r'.format(
                int(e * 100)))
            test_scaler = StandardScaler()
            testdata = test_scaler.fit_transform(np.array(testdata))
            n_components = 64
            pca_test = PCA(n_components)
            testdata = pca_test.fit_transform(testdata)
            predicted_y = nn.predict(np.array(testdata), w, b,
                                     len(nn_structure))
            print "Prediction is:", predicted_y

        else:
            nn_structure = [64] + configuration + [2]
            scaler = StandardScaler()
            scaled_data = scaler.fit_transform(raw_data)
            print "Scaling Completed ....."
            n_components = 64
            pca = PCA(n_components)
            data = pca.fit_transform(scaled_data)
            print "Dimensionaly Reduced ....."
            print "Cat is class 0 and Dog is class 1 ....."
            labels = np.array([0 if x == 'cat' else 1 for x in raw_labels])
            onehot_labels = nn.one_hot(labels, num_classes)
            w, b, cost_list = nn.train(nn_structure, data, onehot_labels)

            with open("../mem/weight_catdog.pkl", 'w') as saver:
                pkl.dump(w, saver)

            with open("../mem/bias_catdog.pkl", 'w') as saver:
                pkl.dump(b, saver)

            with open("../mem/configuration_catdog.pkl", 'w') as saver:
                pkl.dump(nn_structure, saver)

            files = os.listdir(test_filename)
            for f in files:
                #print "Reading File", f
                file_path = os.path.join(test_filename, f)
                img = PIL.Image.open(file_path).convert("L")
                matrix = np.array(img)
                shape = matrix.shape
                reshaped_matrix = np.reshape(matrix, [
                    shape[0] * shape[1],
                ])
                testdata.append(reshaped_matrix)
                e = float(files.index(f) + 1) / len(files)
            sys.stdout.write('{}% File Reading Completed .....\r'.format(
                int(e * 100)))
            test_scaler = StandardScaler()
            testdata = test_scaler.fit_transform(np.array(testdata))
            n_components = 64
            pca_test = PCA(n_components)
            testdata = pca_test.fit_transform(testdata)
            predicted_y = nn.predict(np.array(testdata), w, b,
                                     len(nn_structure))
            predicted = ['cat' if x == 0 else 'dog' for x in predicted_y]
            print "Prediction is:", predicted
Example #18
0
                                with_constant=False,
                                output_factor=1)
        y = y.reshape(y.shape[0], 1)  # make sure y has 2 dimensions

        iterations = 10
        neurons = [X.shape[1], 1]

        weights = nn.neural_network_with_shapes(X,
                                                y,
                                                neurons,
                                                iterations=iterations,
                                                verbose=True,
                                                plot=False)

        (test_X, test_y) = get_numpy_data(test_data, model_features, my_output)
        test_predictions = nn.predict(test_X, weights)

        train_r2 = regression.rsquared(nn.predict(X, weights), y)
        test_r2 = regression.rsquared(test_predictions, test_y)

        train_rss = regression.avg_prediction_error(nn.predict(X, weights), y)
        test_rss = regression.avg_prediction_error(test_predictions, test_y)

        print weights

        print weights
        print 'Train r2: {}, test r2: {}'.format(train_r2, test_r2)
        print 'train rss: {}, test rss: {}'.format(train_rss, test_rss)

# Linear Regression
    elif model == 'lr':
Example #19
0
                        traindata[i]
                        for i in range(batchstartIndex, batchendIndex)
                    ]
                    batchImageLabels = [
                        trainlabel[i]
                        for i in range(batchstartIndex, batchendIndex)
                    ]
                    X = np.asarray(batchImagePixels, dtype=None, order=None)
                    y = []
                    for i in range(len(batchImageLabels)):
                        labellist = [0 for i in range(opdim)]
                        labellist[int(batchImageLabels[i])] = 1
                        y.append(labellist)
                    Y = np.asarray(y, dtype=None, order=None)
                    weights = nn.train(model, X, Y, weights, learning_rate)
                    batchstartIndex = batchendIndex
                    batchendIndex = batchstartIndex + batchsize
            X_test = np.asarray(testdata, dtype=None, order=None)
            accuracyOfMyCode, f1_score_macro, f1_score_micro = nn.predict(
                X_test, testlabel, weights)
            print("Test Accuracy ", accuracyOfMyCode)
            f.write("Test Accuracy " + str(accuracyOfMyCode))
            f.write("\n")
            print("Test F1 Score(Macro) ", f1_score_macro)
            f.write("Test F1 Score(Macro) " + str(f1_score_macro))
            f.write("\n")
            print("Test F1 Score(Micro) ", f1_score_micro)
            f.write("Test F1 Score(Micro) " + str(f1_score_micro))
            f.write("\n")
    f.close()
def restest():
    one = request.form.get('gender')
    two = int(request.form.get('age'))
    three = request.form.get('location')
    four = request.form.get('dia')
    five = request.form.get('head')
    six = request.form.get('odour')
    seven = request.form.get('sym')
    eight = request.form.get('fever')
    nine = request.form.get('dis')
    ten = request.form.get('bp')
    ele = request.form.get('trv')

    j = [one, two, three, four, five, six, seven, eight, nine, ten]
    states = [
        "Tamil Nadu", "Maharashtra", "Delhi", "Gujarat", "Uttar Pradesh",
        "Rajasthan", "Madhya Pradesh", "West Bengal", "Karnataka", "Bihar",
        "Haryana", "Andhra Pradesh", "Jammu and Kashmir", "Telangana",
        "Odisha", "Assam", "Punjab", "Kerala", "Jharkhand", "Uttarakhand",
        "Chhattisgarh", "Tripura", "Himachal Pradesh", "Goa", "Manipur",
        "Puducherry", "Ladakh", "Nagaland", "Mizoram", "Arunachal Pradesh",
        "Meghalaya", "Andaman and Nicobar Islands", "Sikkim", "Lakshadweep",
        "New York", "New Jersey", "California", "Illinois", "Massachusetts",
        "Texas", "Pennsylvania", "Michigan", "Florida", "Maryland", "Georgia",
        "Virginia", "Connecticut", "Louisiana", "North Carolina", "Ohio",
        "Indiana", "Arizona", "Minnesota", "Colorado", "Tennessee",
        "Washington", "Wisconsin", "lowa", "Alabama", "Mississippi",
        "South Carolina", "Rhode island", "Nebraska", "Missouri", "Utah",
        "Kansas", "Kentucky", "Delaware", "New Mexico", "Arkansas",
        "Washington,D.c", "Nevada", "Oklahoma", "South Dakota",
        "New Hampshire", "Oregon", "Puerto Rico", "Idaho", "North Dakota",
        "Maine", "West Virginia", "Vermont", "Wyoming", "Hawai", "Montana",
        "Alaska", "Guam", "U.S.Virgin islands", "Northern Mariana islands",
        "American Samoa"
    ]
    cases = [
        "38716", "97648", "34687", "22032", "12088", "12076", "10241", "9768",
        "6245", "5983", "5968", "5429", "4574", "4320", "3498", "3464", "2969",
        "2327", "1599", "1411", "1398", "913", "470", "417", "366", "157",
        "135", "128", "102", "61", "44", "38", "14", "0", "381000", "166000",
        "133000", "131766", "104000", "83409", "77780", "64998", "66602",
        "59136", "54242", "51721", "44092", "43492", "40791", "39266", "37623",
        "31264", "29763", "28183", "24375", "22484", "21641", "2,015", "19614",
        "18109", "15228", "14991", "14611", "13767", "12864", "10867", "10315",
        "9773", "9367", "9549", "9016", "8935", "6676", "5095", "4876", "4576",
        "3935", "2975", "2625", "2512", "2071", "1042", "903", "653", "525",
        "487", "169", "73", "22", "0"
    ]

    #cought
    le = LabelEncoder()
    prob = nn.predict("new_file.ogg", "trained_cnn.h5")
    print(prob)

    #video
    a = bpmrpm()

    x = 1.90
    total = 0
    if two > 50:
        total = total + 2
    if one == 'male':
        total += 1.2
    if ten == 'yes':
        total += 2
    if nine == 'yes':
        total += 3
    if four == 'yes':
        total += 3
    threee = states.index(three)
    count = int(cases[threee])
    if count < 1000:
        total += 2
    elif count < 3000:
        total += 3
    elif count < 10000:
        total += 4
    else:
        total += 5

    if ele == 'yes':
        total += 3
    if five == 'yes':
        total += 1
    if seven == 'yes':
        total += 2
    if six == 'no':
        total += 2
    if eight == 'yes':
        total += 2
    test = 0
    total = total * x
    if a[0] < 60 or a[0] > 100:
        test += 100
    if prob > 80:
        test += 100
    elif prob > 60:
        test += 75
    elif prob > 50:
        test += 50
    test = test / 4

    total = total + test
    print(test, total)

    cur = mysql.connection.cursor()
    cur.execute(
        'INSERT INTO record(name,email,cough,bpm,resp,risk) VALUES (%s,%s,%s,%s,%s,%s);',
        (
            acc,
            email1,
            prob,
            a[0],
            a[1],
            total,
        ))
    mysql.connection.commit()
    cur.close()

    return render_template("testresult.html",
                           cou=prob,
                           res=a[1],
                           bp=a[0],
                           risk=total,
                           email=email1,
                           name=acc)
Example #21
0
import numpy as np
import nn

Xtr = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]).T
Ytr = np.array([[0], [1], [1], [0]]).T

ld = [2, 1]

lrate = 10

num_iterations = 10000

#print(Ytr.shape[1])

parameters = nn.model(X=Xtr,
                      Y=Ytr,
                      nn_architecture=ld,
                      start_learning_rate=lrate,
                      num_iterations=num_iterations,
                      learning_decay=0.01)

#parameters = nn.initialize_parameters(ld)

#print(nn.forward_propagation(Xtr, parameters)[0])

nn.predict([[0], [0]], parameters)
nn.predict([[1], [0]], parameters)
nn.predict([[0], [1]], parameters)
nn.predict([[1], [1]], parameters)
Example #22
0
        #val_dataset = val_dataset.batch(batch_size)

        model = nn.create_cnn(num_labels)

        # train model
        print("Training..")
        #nn.trainloop(X_train, y_train, model)
        nn.train(model, X_train, X_test, y_train, y_test, "trained_cnn.h5")

        # compute test loss and accuracy
        test_loss, test_accuracy = nn.compute(X_test, y_test, "trained_cnn.h5")
        print("Test loss", test_loss)
        print("Test accuracy", test_accuracy)

        # predicting using trained model with any test file in dataset
        nn.predict("dataset/001 - Dog bark/1-30226-A.ogg", le,
                   "trained_cnn.h5")

    elif sys.argv[1] == "mlp":

        #convert into numpy array
        X, y, le = get_numpy_array(features_df)

        # split into training and testing data
        X_train, X_test, y_train, y_test = get_train_test(X, y)
        num_labels = y.shape[1]

        # create model architecture
        model = nn.create_mlp(num_labels)

        # train model
        print("Training..")
Example #23
0
        X_test = np.expand_dims(X_test, axis=2)

        # create model architecture
        model = nn.create_cnn(num_labels)

        # train model
        print("Training..")
        nn.train(model, X_train, X_test, y_train, y_test, "trained_cnn.h5")

        # compute test loss and accuracy
        test_loss, test_accuracy = nn.compute(X_test, y_test, "trained_cnn.h5")
        print("Test loss", test_loss)
        print("Test accuracy", test_accuracy)

        # predicting using trained model with any test file in dataset
        nn.predict("sample_wav/new_file_Chainsaw.wav", le, "trained_cnn.h5")

    elif sys.argv[1] == "mlp":

        #convert into numpy array
        X, y, le = get_numpy_array(features_df)

        # split into training and testing data
        X_train, X_test, y_train, y_test = get_train_test(X, y)
        num_labels = y.shape[1]

        # create model architecture
        model = nn.create_mlp(num_labels)

        # train model
        print("Training..")
Example #24
0
def add_message():
    content = request.json
    res, prob = predict(content["image"])
    return jsonify({"res": res.item(), "probabilities": prob.tolist()})