Esempio n. 1
0
def predict(model_name, model_file_name, layers, input_features, target_size):
    img_width=input_features.shape[1]
    img_height=input_features.shape[2]
    input_features = input_features.reshape(len(input_features), len(input_features[0].flatten()))
    # Restore the well-trained model
    if model_name == 'dbn':
        layers  = map(int, layers.strip().split(','))
        layers  = [input_features.shape[1]] + layers + [target_size]
        network = dbn.DBN(layers=layers, batch_size=100)
    elif model_name == 'cnn':
        conv_layers, hid_layers = layers.strip().split('#')
        conv_layers = map(int, conv_layers.strip().split(','))
        hid_layers  = map(int, hid_layers.strip().split(','))
        network = cnn.CNN(img_width=img_width, img_height=img_height, conv_layers=conv_layers, hidden_layers=hid_layers, batch_size=128)
    else:
        return -1, 'Invalid model'
    
    with tf.Session() as sess:
        tf_saver = tf.train.Saver()
        tf_saver.restore(sess, model_file_path + model_file_name)
        outputs = network.get_output(sess, input_features)
    return 0, outputs
Esempio n. 2
0
is_binary = True
classes = 2
optimizer = 'adam'
dropout_rate = 0.25
kwargs = {
    'validation_steps': validation_steps,
    'weights_file': weights_file,
    'json_file': json_file,
    'text_file': text_file,
    'image_file': image_file,
    'test_path': test_path,
    'train_path': train_path,
    'input_shape': input_shape,
    'target_size': target_size,
    'is_binary': is_binary,
    'classes': classes,
    'pool_size': pool_size,
    'kernel_size': kernel_size,
    'epochs': epochs,
    'batch_size': batch_size,
    'steps_per_epoch': steps_per_epoch,
    'optimizer': optimizer,
    'dropout_rate': dropout_rate
}


classifier = cnn.CNN(**kwargs)
classifier.create_model()
classifier.train_model()
classifier.export_model()
Esempio n. 3
0
print 'Create an instance of the neural network.'
if model == 'dbn':
    layers = map(int, layers.strip().split(','))
    layers = [features.shape[1]] + layers + [labels.shape[1]]
    network = dbn.DBN(layers=layers,
                      iters=1000,
                      batch_size=100,
                      mu=LEARNING_RATE)  #.0001)
elif model == 'cnn':
    conv_layers, hid_layers = layers.strip().split('#')
    conv_layers = map(int, conv_layers.strip().split(','))
    hid_layers = map(int, hid_layers.strip().split(','))
    network = cnn.CNN(img_width=img_width,
                      img_height=img_height,
                      conv_layers=conv_layers,
                      hidden_layers=hid_layers,
                      learning_rate=LEARNING_RATE,
                      training_iters=20000,
                      batch_size=128,
                      display_step=10)

with tf.Session() as sess:
    if pretrained != '-1':
        tf_saver = tf.train.Saver()
        tf_saver.restore(sess, model_path + pretrained)

        print 'Start training...'
        network.train(sess,
                      training_features,
                      training_labels,
                      testing_features,
                      testing_labels,
Esempio n. 4
0
import os
import torch
import torch.nn as nn
import sklearn
from utils import evaluate
from sklearn.metrics import *
from plots import plot_learning_curves
from model import cnn
from loader import custom_data_loader


model = cnn.CNN()
# model.load_state_dict(torch.load("../output/cnn.pth"))
# model.load_state_dict(checkpoint['model_state_dict'])



# torch.save(model, os.path.join(PATH_OUTPUT, 'cnn.pth'))
PATH_TEST_FILE = "../data/test/"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
test_loader = custom_data_loader.XrayLoader(PATH_TEST_FILE)
criterion = nn.CrossEntropyLoss()

criterion.to(device)

best_model = torch.load(os.path.join("../output/", 'cnn.pth'))

_, _, test_results = evaluate(model, device, test_loader, criterion)

roc = 0.0
y1, y2 = zip(*test_results)
Esempio n. 5
0
        format='%(asctime)s - %(message)s',
        level=logging.INFO,
    )
    logging.info('Data downloading')

    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])
    train_dataset = datasets.MNIST(args.data_dir,
                                   train=True,
                                   download=True,
                                   transform=transform)
    test_dataset = datasets.MNIST(args.data_dir,
                                  train=False,
                                  transform=transform)
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True)
    test_loader = DataLoader(test_dataset,
                             batch_size=args.test_batch_size,
                             shuffle=True)

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    model = cnn.CNN().to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)

    for epoch in range(args.epochs):
        train(model, device, train_loader, criterion, optimizer, epoch)
        test(model, device, test_loader, criterion)
Esempio n. 6
0
def train_model(type, dataset, path):
    torch.manual_seed(0)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(0)

    # Set a correct path to the seizure data file you downloaded
    PATH_TRAIN_FILE = path + "/data/train/"
    PATH_VALID_FILE = path + "/data/validation/"
    # PATH_TEST_FILE = "../data/test/"

    # Path for saving model
    # PATH_OUTPUT = "../output/"
    # os.makedirs(PATH_OUTPUT, exist_ok=True)

    # Some parameters
    NUM_EPOCHS = 1
    BATCH_SIZE = 8
    USE_CUDA = True  # Set 'True' if you want to use GPU
    NUM_WORKERS = 0  # Number of threads used by DataLoader. You can adjust this according to your machine spec.

    NEGATIVE_BATCH_SIZE = 50
    POSITIVE_BATCH_SIZE = 4
    TOTAL = NEGATIVE_BATCH_SIZE + POSITIVE_BATCH_SIZE


    if "original" != dataset and type == "additive_augmentation":
        POSITIVE_BATCH_SIZE = POSITIVE_BATCH_SIZE * 2
    weights = [(float(POSITIVE_BATCH_SIZE) / TOTAL),(float(NEGATIVE_BATCH_SIZE) / TOTAL)]

    model = cnn.CNN()

    # print("saving " + path + " " + dataset)
    # torch.save(model, path + "/output/" + model_type + "_" + dataset + ".pth")

    train_loader = custom_data_loader.XrayLoader(PATH_TRAIN_FILE,
                                                 dataset=dataset,
                                                 augmentation=type,
                                                 negative_batch_size=NEGATIVE_BATCH_SIZE,
                                                 positive_batch_size=POSITIVE_BATCH_SIZE)
    valid_loader = custom_data_loader.XrayLoader(PATH_VALID_FILE,
                                                 negative_batch_size=NEGATIVE_BATCH_SIZE,
                                                 positive_batch_size=POSITIVE_BATCH_SIZE)
    # test_loader = custom_data_loader.XrayLoader(PATH_TEST_FILE)



    class_weights = torch.FloatTensor(weights)
    criterion = nn.CrossEntropyLoss(weight=class_weights)
    # criterion = nn.BCEWithLogitsLoss(weight=class_weights)
    # BCELoss
    optimizer = optim.Adam(model.parameters())

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # device = "cpu"
    model.to(device)
    criterion.to(device)

    best_val_acc = 0.0
    train_losses, train_accuracies = [], []
    valid_losses, valid_accuracies = [], []

    best_roc = 0.0
    roc_list = []
    total_train = 0.0
    total_load = 0.0
    avg_batch_train_time = 0.0
    avg_batch_load_time = 0.0
    index = 0
    for epoch in range(NUM_EPOCHS):
        index += 1
        train_loader.reset()
        valid_loader.reset()

        _, _, tt, tl, avg_tt, avg_lt = train(model, device, train_loader, criterion, optimizer, epoch)
        valid_loss, valid_accuracy, valid_results = evaluate(model, device, valid_loader, criterion)

        total_train += tt
        total_load += tl
        avg_batch_train_time += avg_tt
        avg_batch_load_time += avg_lt

        roc = 0.0
        y1, y2 = zip(*valid_results)
        try:
            roc = sklearn.metrics.roc_auc_score(y1, y2)
        except:
            print("exception")
        roc_list.append(roc)
        print("roc: " + str(roc))

        if roc > best_roc:
            best_roc = roc
            # torch.save(model, os.path.join(PATH_OUTPUT, 'cnn.pth'))
            print("saving " + type + " " + dataset)
            path_to_save = path + "/output/" + type + "/cnn" + "_" + dataset
            os.makedirs(path_to_save, exist_ok=True)
            torch.save(model, path_to_save + ".pth")

    avg_batch_train_time = avg_batch_train_time / index
    avg_batch_load_time = avg_batch_load_time / index
    return best_roc, roc_list, total_train, total_load, avg_batch_train_time, avg_batch_load_time