コード例 #1
0
def predizioni(imgpath):
    utility.test(imgpath, model, pose_name)
    utility.test_classificatore(imgpath, gnb, pose_name)
    utility.test_classificatore(imgpath, classifierKNN, pose_name)
    utility.test_classificatore(imgpath, extra_clf, pose_name)
    utility.test_classificatore(imgpath, rfl, pose_name)
    utility.test_classificatore(imgpath, svclassifier, pose_name)
コード例 #2
0
def main():
    user_args = get_args()

    class_labels, train_data, test_data, valid_data = utility.load_img(user_args.data_dir)
    model = utility.load_pretrained_model(user_args.arch, user_args.hidden_units)

    criterion = nn.NLLLoss()
    optimizer = optim.Adam(model.classifier.parameters(), lr=user_args.learning_rate)
    utility.train(model, user_args.learning_rate, criterion, train_data, valid_data, user_args.epochs, user_args.gpu)
    utility.test(model, test_data, user_args.gpu)
    model.to('cpu')

    # Save Checkpoint for predection
    utility.save_checkpoint({
                    'arch': user_args.arch,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'hidden_units': user_args.hidden_units,
                    'class_labels': class_labels
                }, user_args.save_dir)
    print('Saved checkpoint!')
コード例 #3
0
criterion = nn.MSELoss()
optimizer_rnn = torch.optim.Adam(rnn.parameters())
optimizer_gru = torch.optim.Adam(gru.parameters())
optimizer_lstm = torch.optim.Adam(lstm.parameters())

print('Training started at:', time_start)

while (model_selector_rnn.keep_training or model_selector_gru.keep_training
       or model_selector_lstm.keep_training):

    if model_selector_rnn:
        rnn_loss.append([
            train(x_tr, y_tr, batch_size, optimizer_rnn, criterion, rnn,
                  False),
            validate(x_va, y_va, batch_size, criterion, rnn, False),
            test(x_te, y_te, batch_size, criterion, rnn, False)
        ])

        rnn_time = str(datetime.datetime.now() - time_start)
        model_selector_rnn.update(rnn_loss[-1][1], n_epochs)

    if model_selector_gru:
        gru_loss.append([
            train(x_tr, y_tr, batch_size, optimizer_gru, criterion, gru,
                  False),
            validate(x_va, y_va, batch_size, criterion, gru, False),
            test(x_te, y_te, batch_size, criterion, gru, False)
        ])

        gru_time = str(datetime.datetime.now() - time_start)
        model_selector_rnn.update(gru_loss[-1][1], n_epochs)
コード例 #4
0
import utility as u
u.greet()
u.test('for testing')
コード例 #5
0
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 30 03:46:34 2020

@author: Yann
"""

import renanet
import numpy as np
import graph
import utility

rena = renanet.NeuralNet(400,40,10)

X,C = utility.chargerFichiersManuscrits(r'/home/yann/Desktop/bourdinyrichrobi/data_chamilo/Data/DigitTrain_%d.mat')
X_test,C_test = utility.chargerFichiersManuscrits(r'/home/yann/Desktop/bourdinyrichrobi/data_chamilo/Data/DigitTest_%d.mat')

#rena.load("ch.npy")
rena.learn(X,C)

utility.test(rena,X_test,C_test,mode='max')
コード例 #6
0
"""

import renanet
import numpy as np
import graph
import utility

rena = renanet.NeuralNet(2, 1)

# X = np.array([[0,0.2],[1.72,0.32],[0.98,1.26],[-2,1],[-0.68,2.58],
#               [-1.76,-0.74],[1.02,-1.52],[-0.34,-2.76],[0,-1],[-3.06,-0.32]])
# C = np.array([[1],[1],[1],[1],[1],[0],[0],[0],[0],[0]])

X = utility.lectureMatData(
    r'D:\Yann\Desktop\bourdinyrichrobi\data_chamilo\DataSimulation\DataTrain_2Classes_Perceptron.mat',
    nomColonne='data')
C = utility.lectureMatData(
    r'D:\Yann\Desktop\bourdinyrichrobi\data_chamilo\DataSimulation\DataTrain_2Classes_Perceptron.mat',
    nomColonne='c')

X_test = utility.lectureMatData(
    r'D:\Yann\Desktop\bourdinyrichrobi\data_chamilo\DataSimulation\DataTest_2Classes_Perceptron.mat',
    nomColonne='dataTest')
C_test = utility.lectureMatData(
    r'D:\Yann\Desktop\bourdinyrichrobi\data_chamilo\DataSimulation\DataTest_2Classes_Perceptron.mat',
    nomColonne='cTest')

rena.learn(X, C)
utility.show(rena, X_test, C_test, utility.test(rena, X_test, C_test))

# X,C = utility.chargerFichiersManuscrits(r'D:\Yann\Desktop\bourdinyrichrobi\data_chamilo\Data\DigitTrain_%d.mat')
コード例 #7
0
import utility
import shopping.shopping_cart
from shopping.more_shopping.import_demo import printer
from shopping.more_shopping.import_demo import printer2 as imported_printer


if __name__ == "__main__":
    print(utility)
    print(utility.test("Hello"))

    print(shopping.shopping_cart.buy("Jacket"))

    print(printer("Hello from import demo!"))

    print((imported_printer("Hello from imported printer!")))



コード例 #8
0
                c.append(1)
            X.append(x)
            C.append(c)
    X = np.array(X)
    C = np.array(C)
    return X, C


def rente(net, X, C):
    mise_par_pari = 1.0
    compte = 0.0
    for i in range(len(X)):
        compte -= mise_par_pari
        y = 0 if net(X[i]) < 0.5 else 1
        if (C[i] == y):
            cote = X[i][-2] if y == 0 else X[i][-1]
            compte += cote * mise_par_pari
    return compte


#rena.load("ch.npy")
X, C = readData('2017.csv')
_X, _C = readData('2018.csv')
X, C = np.concatenate([X, _X]), np.concatenate([C, _C])
_X, _C = readData('2019.csv')
X, C = np.concatenate([X, _X]), np.concatenate([C, _C])
rena.learn(X, C)
X_test, C_test = readData('2020.csv')
utility.test(rena, X_test, C_test)
print("En misant 1€ par pari, on gagne {:.2f}€".format(
    rente(rena, X_test, C_test)))
コード例 #9
0
            lstm_loss.append([train(x_tr,
                                    y_tr,
                                    batch_size,
                                    optimizer_lstm,
                                    criterion,
                                    lstm,
                                    args.cuda),
                             validate(x_va,
                                      y_va,
                                      batch_size,
                                      criterion,
                                      lstm,
                                      args.cuda),
                             test(x_te,
                                  y_te,
                                  batch_size,
                                  criterion,
                                  lstm,
                                  args.cuda)])

            lstm_time = str(datetime.datetime.now()-time_start)
            model_selector_lstm.update(lstm_loss[-1][1], n_epochs)

        n_epochs += 1

#        s1 = pandas.Series([n_epochs, rnn_loss[-1][0], rnn_loss[-1][1],
#                            rnn_loss[-1][2], rnn_time, i])
#        s2 = pandas.Series([n_epochs, gru_loss[-1][0], gru_loss[-1][1],
#                            gru_loss[-1][2], gru_time, i])
        s3 = pandas.Series([n_epochs, lstm_loss[-1][0], lstm_loss[-1][1],
                            lstm_loss[-1][2], lstm_time, i])
コード例 #10
0
args.add_argument('--save_dir', dest="save_dir", action="store", default="./checkpoint.pth", help='save a trained model to this directory')
args.add_argument('--learning_rate', dest="learning_rate", action="store", default=0.01, help='learning rate')
args.add_argument('--epochs', dest="epochs", action="store", type=int, default=10, help='epochs')
args.add_argument('--arch', dest="arch", action="store", default="vgg19", type=str, help='select a network architecture')
args.add_argument('--hidden_units', dest="hidden_units", action="store", type=int, default=1024, help='hidden nodes')

args = args.parse_args()
data_dir = args.data_dir
save_dir = args.save_dir
lr = args.learning_rate
arch = args.arch
hidden_units= args.hidden_units
gpu = args.gpu
epochs = args.epochs
checkpoint = args.checkpoint_path

import json
with open('cat_to_name.json', 'r') as f:
    flower_to_name = json.load(f)
flower_species=len(flower_to_name)

image_datasets, dataloaders = u.loader(data_dir)
model = u.network(arch, gpu, hidden_units)
criterion, optimizer = u.optimizing(model, lr)

# Let's train
model = u.train(model, './ex_model.pth', epochs, optimizer, dataloaders, criterion, gpu)

# Let's test
u.test(dataloaders, model, criterion, gpu)
u.saver(arch, image_datasets, path, model, lr)