コード例 #1
0
ファイル: main.py プロジェクト: walyson-maxwel/Deep-Learning
def train(model_ft, model_name, model_input_size, grafico, classes, batch,
          epochs_without_learning, kfold, pretrain):

    ##Parametros para o treinomento

    #Seleciona os parametros a serem atualizados.Como estamos utilizando transfer leaning, querendo selecionar apenas o classifer.
    params_to_update = []
    if (pretrain == True):
        print("Apenas última camada marcada para otimização")
        for name, param in model_ft.named_parameters():
            if param.requires_grad == True:
                params_to_update.append(param)
    elif (pretrain == False):
        print("Todos os named parameters marcados para otimização")
        for name, param in model_ft.named_parameters():
            params_to_update.append(param)

    #optimizer_ft = optim.SGD(params_to_update, lr=0.001,momentum=0.9)
    optimizer_ft = optim.Adam(params_to_update, lr=0.0001)

    #nSamples = [0.5, 1]
    #class_weights = torch.FloatTensor(nSamples)
    #print(class_weights)
    #criterion = nn.CrossEntropyLoss(weight=class_weights)
    criterion = nn.CrossEntropyLoss()
    #Treinamento,Determina se a forma de treinamento sera por meio de KFOLD ou normal com train/val
    if kfold >= 2:
        data_dir = "./data/kfold"
        #Datasets/Dataloaders
        print('Iniciando Treinamento por cross validation KFOLD = {}'.format(
            kfold))
        print('Carregando dataset em', data_dir)
        image_datasets = dataset(data_dir, model_input_size)
        dataloaders_dict = dataloader(data_dir, image_datasets, 1)
        model_ft = train_model_kfold(model_ft, dataloaders_dict, criterion,
                                     optimizer_ft, grafico, kfold)
    else:
        data_dir = "./data/normal_trainning"
        #Datasets/Dataloaders
        print('Iniciando Treinamento pelo método normal train/val')
        print('Carregando dataset em', data_dir)
        image_datasets = dataset(data_dir, model_input_size)
        dataloaders_dict = dataloader(data_dir, image_datasets, batch)
        model_ft = train_model(model_ft, model_name, dataloaders_dict,
                               criterion, optimizer_ft, grafico,
                               epochs_without_learning)
    return model_ft
コード例 #2
0
# Hyper params
code_size = 120
num_epochs = 50
batch_size = 64
lr = 0.002
board_num=1
optimizer_cls = optim.Adam
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Load Data
# train_data = datasets.MNIST('~/data/mnist/', train=True , transform=transforms.ToTensor(), download=True)
# test_data  = datasets.MNIST('~/data/mnist/', train=False, transform=transforms.ToTensor(), download=True)
# train_loader = torch.utils.data.DataLoader(train_data, shuffle=True, batch_size=batch_size, num_workers=4, drop_last=True, pin_memory=True)

print("Gathering training data...")
train_data = dataset(root_dir="/home/joshua/Desktop/ConvAutoencoder/data/trainingData", transform=None, prefix="trainingDataBoards1.npy")
print("Gathering testing data...")
test_data = dataset(root_dir="/home/joshua/Desktop/ConvAutoencoder/data/testData", transform=None, prefix="testDataBoards1.npy")
train_loader = torch.utils.data.DataLoader(train_data, shuffle=True, num_workers=7, batch_size=batch_size, drop_last=True, pin_memory=True) # dropped num_workers = 4

# Visualising the trainingData (Only run this with small dataset)
# vis_1, vis_2, vis_3 = random.choice(train_data), random.choice(train_data), random.choice(train_data)
# _, (ax1, ax2, ax3) = plt.subplots(3, 2)
# ax1[0].imshow(vis_1[0])
# ax1[1].imshow(vis_1[1])
# ax2[0].imshow(vis_2[0])
# ax2[1].imshow(vis_2[1])i
# ax3[0].imshow(vis_3[0])
# ax3[1].imshow(vis_3[1])
# plt.savefig("visualisation.png")
コード例 #3
0
ファイル: main.py プロジェクト: walyson-maxwel/Deep-Learning
def eval(grafico, model_input_size, model_name, model_ft, inside_trainning):

    #Diretorio do dataset para test
    data_dir = "./data/test"
    #Diretorio de onde o modelo treinado e carregado
    save_dir = "./modelo_treinado/" + model_name + ".pth"
    save_dir_confusao = "./modelo_treinado/" + model_name + "-confusao" + ".png"
    save_dir_roc = "./modelo_treinado/" + model_name + "-roc" + ".png"

    batch = 1
    # Datasets/Dataloaders
    print('Modo de evaluation')
    print('Carregando dataset em', data_dir)
    image_datasets = dataset(data_dir, model_input_size)
    dataloaders_dict = dataloader(data_dir, image_datasets, batch)

    #Carrega o modelo salvo em disco apenas se nao estiver dentro do loop de train e estiver apenas em evaluation
    if not inside_trainning:
        model_ft.load_state_dict(
            torch.load(save_dir,
                       map_location=torch.device(
                           "cuda:0" if torch.cuda.is_available() else "cpu")))
    #Seta o modelo para evaluation
    model_ft.eval()

    print('Calculando a taxa de acerto para ', data_dir)
    x = []
    y = []
    true_positive = 0
    false_positive = 0
    true_negative = 0
    false_negative = 0
    correct = 0
    total = 0
    images_processed = 0
    true_labels = []
    roc_probabilities = []

    #n lembro
    with tqdm(total=math.ceil(len(dataloaders_dict['val'].dataset))) as pbar:
        with torch.no_grad():
            since = time.time()
            for data in dataloaders_dict['val']:
                images_processed += 1
                pbar.update(1)
                images, labels = data
                images_print = images
                device = torch.device(
                    "cuda:0" if torch.cuda.is_available() else "cpu")
                images = images.to(device)
                labels = labels.to(device)
                outputs = model_ft(images)
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                x.append(total)

                sm = torch.nn.Softmax(dim=1)
                probabilities = sm(outputs)

                #Benigna = 0
                #Maligna = 1
                if (torch.cuda.is_available()):
                    if (labels.cpu().numpy()[0] == 0
                            and predicted.cpu().numpy()[0] == 0):
                        true_negative = true_negative + 1
                    elif (labels.cpu().numpy()[0] == 1
                          and predicted.cpu().numpy()[0] == 1):
                        true_positive = true_positive + 1
                    elif (labels.cpu().numpy()[0] == 0
                          and predicted.cpu().numpy()[0] == 1):
                        false_positive = false_positive + 1
                    elif (labels.cpu().numpy()[0] == 1
                          and predicted.cpu().numpy()[0] == 0):
                        false_negative = false_negative + 1
                else:
                    if (labels.numpy()[0] == 0 and predicted.numpy()[0] == 0):
                        true_negative = true_negative + 1
                    elif (labels.numpy()[0] == 1
                          and predicted.numpy()[0] == 1):
                        true_positive = true_positive + 1
                    elif (labels.numpy()[0] == 0
                          and predicted.numpy()[0] == 1):
                        false_positive = false_positive + 1
                    elif (labels.numpy()[0] == 1
                          and predicted.numpy()[0] == 0):
                        false_negative = false_negative + 1

                #if not((predicted == labels).sum().item()):
                #   print('Imagem classificada incorretamente:')
                #   print("Probabilidade BENIGNA : %.2f%% Probabilidade MALIGNA : %.2f%%" % (probabilities.data[0][0].item()*100,probabilities.data[0][1].item()*100) ) #Converted to probabilities
                #imshow(torchvision.utils.make_grid(images_print))
                roc_probabilities.append(probabilities.data[0][1].item())
                true_labels.append(labels)
                correct += (predicted == labels).sum().item()
                y.append(correct / total)
                #print('Imagens Processadas : {}'.format(images_processed))
    time_elapsed = time.time() - since
    roc_probabilities = np.array(roc_probabilities)
    true_labels = np.array(true_labels)

    x1, y1 = [0, 1], [0, 1]
    pyplot.plot(x1, y1, linestyle='--')
    fpr, tpr, thresholds = metrics.roc_curve(true_labels, roc_probabilities)
    auc = metrics.roc_auc_score(true_labels, roc_probabilities)
    pyplot.plot(fpr, tpr, marker='.', label='ResNet AUC = ' + str(auc))
    pyplot.xlabel('1 - especificidade')
    pyplot.ylabel('Sensibilidade')
    pyplot.legend(loc=4)
    pyplot.savefig(save_dir_roc)

    print(
        "True Positive (Imagem Maligna classificada como maligna)= %d\nTrue Negative (Imagen Benigna classificada como benigna) = %d\nFalse Positive (Imagem Benigna classificada como maligna) = %d\nFalse Negative(Imagen Maligna classificada como benigna) = %d\n"
        % (true_positive, true_negative, false_positive, false_negative))
    array = [[true_positive, false_negative], [false_positive, true_negative]]
    df_cm = pd.DataFrame(array,
                         index=[i for i in ["Maligna", "Benigna"]],
                         columns=["Maligna", "Benigna"])

    plt.figure(figsize=(10, 7))
    plt.title("Matriz de Confusão", fontsize=25)
    sn.set(font_scale=1.4)
    sn.heatmap(df_cm, annot=True, cmap="Blues", fmt='d')
    plt.xlabel('Classe Predita', fontsize=20)
    plt.ylabel('Classe Real', fontsize=20)
    plt.tick_params(axis='both',
                    which='major',
                    labelsize=18,
                    labelbottom=True,
                    bottom=False,
                    top=False,
                    labeltop=False)
    plt.savefig(save_dir_confusao)

    save_dir_metricas = "./modelo_treinado/" + model_name + "-metricas" + ".txt"
    with open(save_dir_metricas, 'w') as filetowrite:
        quantidade_de_imagens = true_positive + true_negative + false_positive + false_negative
        acertos = true_positive + true_negative
        erros = false_positive + false_negative
        filetowrite.write(
            "True Positive (Imagem Maligna classificada como maligna)= %d\nTrue Negative (Imagen Benigna classificada como benigna) = %d\nFalse Positive (Imagem Benigna classificada como maligna) = %d\nFalse Negative(Imagen Maligna classificada como benigna) = %d\n\n"
            % (true_positive, true_negative, false_positive, false_negative))
        filetowrite.write("Quantidade de Imagens Analisadas = %d\n" %
                          (quantidade_de_imagens))
        filetowrite.write(
            "Quantidade de Imagens Classificadas Corretamente = %d\n" %
            (acertos))
        filetowrite.write(
            "Quantidade de Imagens Classificadas Incorretamente = %d\n\n" %
            (erros))

        acuracia = acertos / quantidade_de_imagens
        if (true_negative + false_positive == 0):
            especificidade = 0
        else:
            especificidade = true_negative / (true_negative + false_positive)

        if (true_positive + false_negative == 0):
            sensibilidade = 0
        else:
            sensibilidade = true_positive / (true_positive + false_negative)

        if (true_positive + false_positive == 0):
            precisao = 0
        else:
            precisao = true_positive / (true_positive + false_positive)
        if (precisao + sensibilidade == 0):
            f1measure = 0
        else:
            f1measure = 2 * ((precisao * sensibilidade) /
                             (precisao + sensibilidade))

        filetowrite.write(
            "Acurácia (Taxa de acertos das amostras totais)=  %.3f\n" %
            (acuracia))
        filetowrite.write(
            "Especificidade (Representa a probabilidade do classificador identificar corretamente a classe benigna) = %.3f\n"
            % (especificidade))
        filetowrite.write(
            "Sensibilidade (Representa a probabilidade do classificador identificar corretamente a classe maligna)= %.3f\n"
            % (sensibilidade))
        filetowrite.write(
            "Precisão (De todas as amostras classificadas como maligna, quantas são realmente malignas)= %.3f\n"
            % (precisao))
        filetowrite.write("F1-Measure = %.3f\n" % (f1measure))
        filetowrite.write('Evaluation completo em %f' % time_elapsed)

    grafico.add_trace(go.Scatter(x=x,
                                 y=y,
                                 name='Conjunto de Teste',
                                 line=dict(color='green', width=2)),
                      row=3,
                      col=1)
    print('Taxa de acerto: %d %%' % (100 * correct / total))
コード例 #4
0
ファイル: train.py プロジェクト: dcvl18/DeepDualNet-Pytorch
    f.close()

if __name__ == '__main__':
    # setup dataset
    dirname = '../../2_1.DCVL_Face_DB/'  # dir of dataset
    cuda = torch.cuda.is_available()
    net_dir = 'model'  # dir where model will be saved
    DeepDual = DeepDual()
    if not os.path.exists(net_dir):
        os.makedirs(net_dir)
    epoch_num = 100
    mask = list(range(1, 51))
    for trial in range(50):
        if mask[0] > 0:
            net_path = os.path.join(net_dir, 'model_%02d.pth' % (trial))
            train_dataset = loader.dataset(root_dir=dirname, mask=mask[1:])
            train_loader = torch.utils.data.DataLoader(train_dataset,
                                                       batch_size=1000,
                                                       shuffle=True,
                                                       pin_memory=cuda,
                                                       drop_last=True,
                                                       num_workers=4)
            t = time.perf_counter()

            for epoch in range(epoch_num):
                mean_loss = 0
                cnt = 0
                for step, sample in enumerate(train_loader):
                    # bring the label, left, and right images from the loader
                    label = sample['label']
                    label.view(-1, 1, 1, 1)
コード例 #5
0
ファイル: main.py プロジェクト: petrjanda/bert-playground
import architecture
import bert_prep
import loader

# Initialize session
sess = tf.Session()

# Params for bert model and tokenization
bert_path = "https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1"
max_seq_length = 256

# Reduce logging output.
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

train_X, train_y, test_X, test_y = loader.dataset(max_seq_length)

# Convert to features
tokenizer = bert_prep.load_tokenizer(sess, bert_path)
processor = bert_prep.InputExampleProcessor(tokenizer, max_seq_length)

train_X, train_y = bert_prep.InputExamples(train_X, train_y).to_features(processor)
test_X, test_y = bert_prep.InputExamples(test_X, test_y).to_features(processor)

def initialize_vars(sess):
    sess.run(tf.local_variables_initializer())
    sess.run(tf.global_variables_initializer())
    sess.run(tf.tables_initializer())
    K.set_session(sess)

コード例 #6
0
from torch.autograd import Variable
from torchvision import datasets, transforms

import matplotlib.pyplot as plt

code_size = 120

autoencoder = UNet(1, in_channels=1, depth=5, start_filts=8, merge_mode='add')
autoencoder.load_state_dict(
    torch.load("/home/joshua/Desktop/ConvAutoencoder/unet_model.pth"))
autoencoder.eval()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
autoencoder.to(device)

test_data = dataset(
    root_dir="/home/joshua/Desktop/ConvAutoencoder/data/testData",
    transform=None,
    prefix="testDataBoards1.npy")

vis_1, vis_2, vis_3 = random.choice(test_data), random.choice(
    test_data), random.choice(test_data)
check_1 = torch.from_numpy(vis_1[0]).unsqueeze(0).unsqueeze(1).float()
check_2 = torch.from_numpy(vis_2[0]).unsqueeze(0).unsqueeze(1).float()
check_3 = torch.from_numpy(vis_3[0]).unsqueeze(0).unsqueeze(1).float()
res_1, res_2, res_3 = autoencoder(Variable(check_1.to(device))), autoencoder(
    Variable(check_2.to(device))), autoencoder(Variable(check_3.to(device)))
_, (ax1, ax2, ax3) = plt.subplots(3, 3)
ax1[0].imshow(vis_1[0])
ax1[1].imshow(res_1[0][0].to("cpu").detach().numpy())
ax1[2].imshow(vis_1[1])
ax2[0].imshow(vis_2[0])
ax2[1].imshow(res_2[0][0].to("cpu").detach().numpy())