Ejemplo n.º 1
0
def main():
    train_dir = "images/train"
    test_dir = "images/test"
    lr = 1e-3  # learning rate 0.001
    IMG_SIZE = 200

    experiment_no = "1"
    model_name = "LikeNotlike-{}.model".format(experiment_no)
    model_path = "models/" + model_name

    train_data = dataLoader.load_data(train_dir, IMG_SIZE)
    test_data = dataLoader.load_data(test_dir, IMG_SIZE)
    model = create_model(IMG_SIZE, lr)
    network = ClassificationNetwork(model, IMG_SIZE, model_path)
    network.fit_model(train_data, test_data)
Ejemplo n.º 2
0
def watcher():
    print "Debug: Watcher started ..."
    try:
        ## Full code for main parent
        res, payloadArr = load_data()
        if not res:
          raise Exception

        # --------- Now for the Rabbit --------- #
        connection = pika.BlockingConnection(
            pika.ConnectionParameters('localhost'))
        channel = connection.channel()
        channel.queue_declare(queue=config['queue'])

        print "array of payloads ", payloadArr
        for payload in payloadArr:
          channel.basic_publish(exchange='',
                                routing_key=config['queue'],
                                body=json.dumps(payload))

        connection.close()
        # -------------------------------------- #
        return True
    except Exception:
        return False
    finally:
        pass     # cleanup
import tensorflow as tf
from tensorflow import keras as ks
import h5py
import dataLoader
import numpy as np
from matplotlib import pyplot as plt

x_train, y_train, x_test, y_test = dataLoader.load_data()

plt.imshow(x_train[8])
plt.show()

x_train = x_train[:, :, :, np.newaxis]
x_test = x_test[:, :, :, np.newaxis]

model = ks.models.load_model("pqModel.hdf5")

# predict = model.predict(x_test)
# predict = model.predict(x_train[10].reshape(1, 200, 200))
# predict = model.predict(x_train[8].reshape(1, 200, 200, 1))
predict = model.predict(x_test)

print("predict: ", predict)

Ejemplo n.º 4
0
# import matplotlib.pyplot as plt
# import os

# Récupération des paramètres du config.ini
data_interface = DataInterface()

param_liste = data_interface.read_conf(
    'config.ini', 'GanMnist')  # Lecture du fichier de config dans la
# session [GanMnist]
param_desc_disc_liste = data_interface.read_conf('config_algo_descente.ini',
                                                 'Param de desc du disc')
param_desc_gen_liste = data_interface.read_conf('config_algo_descente.ini',
                                                'Param de desc du gen')

# Initialisation des données pour l'apprentissage
training_images, training_labels, _, _ = dataLoader.load_data(
    param_liste['file'][0], param_liste['dataset'][0])

number_exp = param_liste['number_exp'][0]

for exp in range(number_exp):

    print("Lancement de l'experience n°", exp)

    param = data_interface.extract_param(param_liste, exp)

    param_desc_disc = data_interface.extract_param(param_desc_disc_liste, exp)
    param_desc_gen = data_interface.extract_param(param_desc_gen_liste, exp)
    numbers_to_draw = param['numbers_to_draw']

    # On ne conserve dans le set que les 'numbers_to_draw' du config
    not_right_nb = []
Ejemplo n.º 5
0
import dataLoader
import FeatureBuilder
if __name__ == '__main__':
    data = dataLoader.load_data()
    train_x, train_y, test_x, test_y = FeatureBuilder.build_feature(data)
Ejemplo n.º 6
0
def prepare_data(X, A, *args):

    X /= X.sum(1)
    X = np.expand_dims(X, axis=0)
    A = preprocess_adj(A, symmetric=True).todense()
    A = np.expand_dims(A, axis=0)  # repeat batch-dim if needed
    args = [np.expand_dims(a, axis=0) for a in args]

    return X, A, args


if __name__ == '__main__':

    # data
    X, A, y = load_data()  # (N,D)mat, (N,N)sp_mat, (N,cls)mat
    y_train, y_val, y_test, idx_train, idx_val, idx_test, train_mask = get_splits(
        y)  # arr & range objects
    X, A, y_lst = prepare_data(X, A, y, train_mask)
    y, train_mask = y_lst
    n_classes = y_train.shape[-1]

    def ce_on_train(y_true, y_pred):
        mask = K.constant(train_mask, dtype='float32')  # (b,N)
        ce = categorical_crossentropy(y_true, y_pred)  # (b,N,cls)
        return ce * mask

    # model
    model = GCN(n_classes=n_classes)
    model.compile(optimizer=Adam(lr=0.01), loss=ce_on_train)
Ejemplo n.º 7
0
from brain.network import Network
import dataLoader
from engine import Engine
from dataInterface import DataInterface
from errorGraphs import ErrorGraphs

data_interface = DataInterface('Mnist_debug')

param = data_interface.read_conf()
data_interface.rename(param['nom_dossier_resultats'])
param_algo_descente = data_interface.read_conf('config_algo_descente.ini',
                                               'Parametres de descente')

# Chargement des données pour l'apprentissage
training_images, training_labels, testing_images, testing_labels = \
    dataLoader.load_data(param['file'], param['dataset'])

# Configuration des images d'entrainement
training_size = param['training_size']

# Configuration des images de test
testing_size = param['testing_size']

# Chargement des paramètres de gestion de l'apprentissage
nb_exp = param['nb_exp']
test_period = param['test_period']
randomize_learning_set = param['randomize_learning_set']

# Chargement des fonctions utilisées
error_fun = param['error_fun']
import tensorflow as tf
from tensorflow import keras as ks
import h5py
import imageTransfer
import dataLoader
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image

use_TestSet = True

model = ks.models.load_model("pqModel.hdf5")

if use_TestSet:
    x_train_ori, y_train, x_test_ori, y_test = dataLoader.load_data()

    x_train = x_train_ori[:, :, :, np.newaxis]
    x_test = x_test_ori[:, :, :, np.newaxis]

    predict = model.predict(x_test)

else:
    source = ""
    dest = ""
    imageTransfer.image_Transfer(source, dest)

    source2 = ""
    img_ori = dataLoader.image_loader(source2)

    img = img_ori[:, :, :, np.newaxis]
Ejemplo n.º 9
0
def train_GRAM(seqFile='seqFile.txt',
               labelFile='labelFile.txt',
               treeFile='tree.txt',
               embFile='embFile.txt',
               outFile='out.txt',
               inputDimSize=100,
               numAncestors=100,
               embDimSize=100,
               hiddenDimSize=200,
               attentionDimSize=200,
               max_epochs=100,
               L2=0.,
               numClass=26679,
               batchSize=100,
               dropoutRate=0.5,
               logEps=1e-8,
               verbose=True,
               ignore_level=0):
    options = locals().copy()
    # 这里的leavesList, ancestorsList蕴含着每一个疾病的类别信息
    leavesList = []
    ancestorsList = []
    for i in range(5, 0, -1):
        leaves, ancestors = build_tree(treeFile + '.level' + str(i) + '.pk')
        leavesList.append(leaves)
        ancestorsList.append(ancestors)

    print('Building the model ... ')
    gram = GRAM(inputDimSize, numAncestors, embDimSize, hiddenDimSize,
                attentionDimSize, numClass, dropoutRate, embFile)
    # if torch.cuda.device_count() > 1:
    #     print("Let's use", torch.cuda.device_count(), "GPUs!")
    #     # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
    #     gram = nn.DataParallel(gram)
    gram.to(device)
    # gram.train()
    print(list(gram.state_dict()))
    loss_fn = CrossEntropy()
    loss_fn.to(device)

    print('Constructing the optimizer ... ')
    optimizer = torch.optim.Adadelta(gram.parameters(), lr=1, weight_decay=L2)

    print('Loading data ... ')
    trainSet, validSet, testSet = load_data(seqFile,
                                            labelFile,
                                            test_ratio=0.15,
                                            valid_ratio=0.1)
    print('Data length:', len(trainSet[0]))
    n_batches = int(np.ceil(float(len(trainSet[0])) / float(batchSize)))
    val_batches = int(np.ceil(float(len(validSet[0])) / float(batchSize)))
    test_batches = int(np.ceil(float(len(testSet[0])) / float(batchSize)))

    print('Optimization start !!')
    # setting the tensorboard
    loss_writer = SummaryWriter('{}/{}'.format(outFile + 'TbLog', 'Loss'))
    acc_writer = SummaryWriter('{}/{}'.format(outFile + 'TbLog', 'Acc'))
    # test_writer = SummaryWriter('{}/{}'.format(outFile+'TbLog', 'Test'))

    logFile = outFile + '.log'
    bestTrainCost = 0.0
    bestValidCost = 100000.0
    bestTestCost = 0.0
    bestTrainAcc = 0.0
    bestValidAcc = 0.0
    bestTestAcc = 0.0
    epochDuration = 0.0
    bestEpoch = 0
    # set the random seed for test
    random.seed(seed)
    # with torchsnooper.snoop():
    for epoch in range(max_epochs):
        iteration = 0
        cost_vec = []
        acc_vec = []
        startTime = time.time()
        gram.train()
        for index in random.sample(range(n_batches), n_batches):
            optimizer.zero_grad()
            batchX = trainSet[0][index * batchSize:(index + 1) * batchSize]
            batchY = trainSet[1][index * batchSize:(index + 1) * batchSize]
            x, y, mask, lengths = padMatrix(batchX, batchY, options)
            x = torch.from_numpy(x).to(device).float()
            mask = torch.from_numpy(mask).to(device).float()
            # print('x,', x.size())
            y_hat = gram(x, mask, leavesList, ancestorsList)
            # print('y_hat', y_hat.size())
            y = torch.from_numpy(y).float().to(device)
            # print('y', y.size())
            lengths = torch.from_numpy(lengths).float().to(device)
            # print(y.size(), y_hat.size())
            loss, acc = loss_fn(y_hat, y, lengths)
            loss.backward()
            optimizer.step()
            if iteration % 100 == 0 and verbose:
                buf = 'Epoch:%d, Iteration:%d/%d, Train_Cost:%f, Train_Acc:%f' % (
                    epoch, iteration, n_batches, loss, acc)
                print(buf)
            cost_vec.append(loss.item())
            acc_vec.append(acc)
            iteration += 1
        duration_optimize = time.time() - startTime
        gram.eval()
        cost = np.mean(cost_vec)
        acc = np.mean(acc_vec)
        startTime = time.time()
        with torch.no_grad():
            # calculate the loss and acc of valid dataset
            cost_vec = []
            acc_vec = []
            for index in range(val_batches):
                validX = validSet[0][index * batchSize:(index + 1) * batchSize]
                validY = validSet[1][index * batchSize:(index + 1) * batchSize]
                val_x, val_y, mask, lengths = padMatrix(
                    validX, validY, options)
                val_x = torch.from_numpy(val_x).float().to(device)
                mask = torch.from_numpy(mask).float().to(device)
                val_y_hat = gram(val_x, mask, leavesList, ancestorsList)
                val_y = torch.from_numpy(val_y).float().to(device)
                lengths = torch.from_numpy(lengths).float().to(device)
                valid_cost, valid_acc = loss_fn(val_y_hat, val_y, lengths)
                cost_vec.append(valid_cost.item())
                acc_vec.append(valid_acc)
            valid_cost = np.mean(cost_vec)
            valid_acc = np.mean(acc_vec)

            # calculate the loss and acc of test dataset
            cost_vec = []
            acc_vec = []
            for index in range(test_batches):
                testX = testSet[0][index * batchSize:(index + 1) * batchSize]
                testY = testSet[1][index * batchSize:(index + 1) * batchSize]
                test_x, test_y, mask, lengths = padMatrix(
                    testX, testY, options)
                test_x = torch.from_numpy(test_x).float().to(device)
                mask = torch.from_numpy(mask).float().to(device)
                test_y_hat = gram(test_x, mask, leavesList, ancestorsList)
                test_y = torch.from_numpy(test_y).float().to(device)
                lengths = torch.from_numpy(lengths).float().to(device)
                test_cost, test_acc = loss_fn(test_y_hat, test_y, lengths)
                cost_vec.append(test_cost.item())
                acc_vec.append(test_acc)
            test_cost = np.mean(cost_vec)
            test_acc = np.mean(acc_vec)
        # record the loss and acc
        loss_writer.add_scalar('Train Loss', cost, epoch)
        loss_writer.add_scalar('Test Loss', test_cost, epoch)
        loss_writer.add_scalar('Valid Loss', valid_cost, epoch)
        acc_writer.add_scalar('Train Acc', acc, epoch)
        acc_writer.add_scalar('Test Acc', test_acc, epoch)
        acc_writer.add_scalar('Valid Acc', valid_acc, epoch)

        # print the loss
        duration_metric = time.time() - startTime
        buf = 'Epoch:%d, Train_Cost:%f, Valid_Cost:%f, Test_Cost:%f' % (
            epoch, cost, valid_cost, test_cost)
        print(buf)
        print2file(buf, logFile)
        buf = 'Train_Acc:%f, Valid_Acc:%f, Test_Acc:%f' % (acc, valid_acc,
                                                           test_acc)
        print(buf)
        print2file(buf, logFile)
        buf = 'Optimize_Duration:%f, Metric_Duration:%f' % (duration_optimize,
                                                            duration_metric)
        print(buf)
        print2file(buf, logFile)

        # save the best model
        if valid_cost < bestValidCost:
            bestValidCost = valid_cost
            bestTestCost = test_cost
            bestTrainCost = cost
            bestEpoch = epoch
            bestTrainAcc = acc
            bestValidAcc = valid_acc
            bestTestAcc = test_acc

        torch.save(gram.state_dict(), outFile + f'.{epoch}')

    buf = 'Best Epoch:%d, Avg_Duration:%f, Train_Cost:%f, Valid_Cost:%f, Test_Cost:%f' % (
        bestEpoch, epochDuration / max_epochs, bestTrainCost, bestValidCost,
        bestTestCost)
    print(buf)
    print2file(buf, logFile)
    buf = 'Train_Acc:%f, Valid_Acc:%f, Test_Acc:%f' % (
        bestTrainAcc, bestValidAcc, bestTestAcc)
    print(buf)
    print2file(buf, logFile)
from dataLoader import load_data

import time

time_start = time.time()

# choose data set
useMNIST = False

if useMNIST is True:
    fashion_mnist = ks.datasets.fashion_mnist
    (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
    classNum = 10
    loss_func = "sparse_categorical_crossentropy"
else:
    x_train, y_train, x_test, y_test = load_data()
    classNum = 1
    loss_func = "binary_crossentropy"

# normalize to [0, 1]
x_train = x_train / 255.0
x_test = x_test / 255.0

# construct model
# sequential way
useCNN = True

if not useCNN:
    input_shape = (x_train[0].shape[0], x_train[0].shape[1])

    model = ks.Sequential([