Beispiel #1
0
def testFunc(test_dataset_path, model_path, out_folder ):
	#Load the test data
	test_dataset = common.CorrelationDataset(test_dataset_path)
	#Open the output file						
	out_file = open(os.path.join(out_folder,"results.txt"),'w')
	#Explore the classes
	values, counts = np.unique(test_dataset.y, return_counts=True)
	num_classes = len(values)
	percentage = counts / len(test_dataset.X)
	out_file.write("The classes: " + str(values) + " The frequency: " + str(counts) + " The percentage: " + str(percentage) + "\n")
	#Initiate the network
	input_size = len(test_dataset.X[0])
	net = common.corr_nn(input_size, num_classes)
	#load the model						  
	net = load_checkpoint(net, model_path)
	net = common.to_cuda(net)
	# Test the Model
	net.eval() # turning the network to evaluation mode, affect dropout and batch-norm layers if exists
	correlations = torch.FloatTensor(test_dataset.X)
	correlations = common.to_cuda(correlations)
	outputs = net(correlations)
	_, predicted = torch.max(outputs, 1)
	#Output the results metrics
	confusion_matrix = skm.confusion_matrix(test_dataset.y, predicted)
	plot_confusion_matrix(confusion_matrix,values,os.path.join(out_folder,'confusion matrix.png'))
	out_file.write("Confusion matrix: \n" + str(confusion_matrix) + "\n\n")
	accuracy = skm.accuracy_score(test_dataset.y, predicted)
	out_file.write(skm.classification_report(test_dataset.y, predicted))
	f1_score_macro = skm.f1_score(test_dataset.y, predicted, average = 'macro')
	f1_score_weighted = skm.f1_score(test_dataset.y, predicted, average = 'weighted')
	# print ("accuracy: %.3f " % accuracy)
	# print ("f1_score_macro: %.3f " % f1_score_macro)
	# print ("f1_score_weighted: %.3f  " % f1_score_weighted)
	return accuracy, f1_score_macro, f1_score_weighted
Beispiel #2
0
def trainFunc(net, train_loader, criterion, optimizer):
    """Train the network
	param net: nn.Module. The network to train
	param train_loader: data.Dataset. The train dataset
	param criterion: Loss function
	param optimizer: optimization algorhitm
	
	return: average over batches loss, average over batches error rate.
    """
    lossSum = 0  # sum of all loss
    errSum = 0  # sum of all error rates
    total = 0  # sum of total scores
    net.train(
    )  # turning the network to training mode, affect dropout and batch-norm layers if exists
    for i, (time_series, scores) in enumerate(train_loader):
        time_series = time_series.unsqueeze(1)
        time_series = common.to_cuda(time_series)
        scores = common.to_cuda(scores)

        # Forward + Backward + Optimize
        optimizer.zero_grad()
        outputs = net(time_series)
        loss = criterion(outputs, scores)
        loss.backward()
        optimizer.step()
        lossSum += loss.item()
        total += scores.size(0)
        _, predicted = torch.max(outputs, 1)
        errSum += (predicted.cpu() != scores.cpu()).sum()

        if (i + 1) % 20 == 0:
            print('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f' %
                  (epoch + 1, num_epochs, i + 1,
                   len(train_dataset) // batch_size, loss.item()))
    return ((lossSum / i), (100 * float(errSum) / total))
Beispiel #3
0
def testFunc(net, test_loader):
    """Test the network
    param net: nn.Module. The network to test
    param test_loader: data.Dataset. The test dataset

    return: accuracy rate
    """
    total = 0  # sum of total scores
    correct = 0
    net.eval(
    )  # turning the network to evaluation mode, affect dropout and batch-norm layers if exists
    num_of_below_avg = 0
    num_of_avg = 0
    num_of_above_avg = 0
    for i, (time_series, scores) in enumerate(test_loader):
        time_series = common.to_cuda(time_series)
        outputs = net(time_series)
        _, predicted = torch.max(outputs, 1)
        total += scores.size(0)
        correct += (predicted.cpu() == scores).sum()
        if (predicted == 0):
            num_of_below_avg += 1
        elif (predicted == 1):
            num_of_avg += 1
        else:
            num_of_above_avg += 1
    accuracy = (100 * float(correct) / total)
    print("number of below:", num_of_below_avg, " number of average:",
          num_of_avg, " number of above:", num_of_above_avg)
    return accuracy
Beispiel #4
0
def evaluateFunc(net, validate_loader, criterion):
    """Evaluate the network
    param net: nn.Module. The network to evaluate
    param validate_loader: data.Dataset. The validate dataset
    param criterion: Loss function

    return: average over batches loss, average over batches error rate.
    """
    lossSum = 0  # sum of all loss
    errSum = 0  # sum of all error rates
    total = 0  # sum of total scores
    net.eval(
    )  # turning the network to evaluation mode, affect dropout and batch-norm layers if exists
    for i, (time_series, scores) in enumerate(validate_loader):
        time_series = common.to_cuda(time_series)
        outputs = net(time_series)
        loss = criterion(outputs.cpu(), scores)
        lossSum += loss.item()
        _, predicted = torch.max(outputs, 1)
        total += scores.size(0)
        errSum += (predicted.cpu() != scores).sum()
    #return the average loss and average error
    return ((lossSum / i), (100 * float(errSum) / total))
Beispiel #5
0
        os.path.join(args.data_folder, "train_set_class.pkl"))

    validate_dataset = common.TimeseriesDataset(
        os.path.join(args.data_folder, "validate_set_class.pkl"))

    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True)

    validate_loader = torch.utils.data.DataLoader(dataset=validate_dataset,
                                                  batch_size=batch_size,
                                                  shuffle=False)

    #create object of the fMRI_CNN class
    fMRI_CNN = common.fMRI_CNN()
    fMRI_CNN = common.to_cuda(fMRI_CNN)
    # Loss and Optimizer
    criterion = nn.NLLLoss()
    optimizer = torch.optim.Adam(fMRI_CNN.parameters(), lr=learning_rate)
    trainLossArr = []
    trainErrArr = []
    evaluateLossArr = []
    evaluateErrArr = []
    #Iterate num_epoch times and in each epoch train on total data amount / batch_size
    for epoch in range(num_epochs):
        trainLoss, trainErr = trainFunc(fMRI_CNN, train_loader, criterion,
                                        optimizer)
        evaluateLoss, evaluateErr = evaluateFunc(fMRI_CNN, validate_loader,
                                                 criterion)

        trainLossArr.append(trainLoss)
Beispiel #6
0
        os.path.join(args.data_folder, "train_set_class.pkl"))

    validate_dataset = common.TimeseriesDataset(
        os.path.join(args.data_folder, "validate_set_class.pkl"))

    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True)

    validate_loader = torch.utils.data.DataLoader(dataset=validate_dataset,
                                                  batch_size=batch_size,
                                                  shuffle=False)

    #create object of the fMRI_CNN class
    DeepFCNet = common.DeepFCNet()
    DeepFCNet = common.to_cuda(DeepFCNet)
    # Loss and Optimizer
    criterion = nn.NLLLoss()
    optimizer = torch.optim.Adam(DeepFCNet.parameters(), lr=learning_rate)
    trainLossArr = []
    trainErrArr = []
    evaluateLossArr = []
    evaluateErrArr = []
    #Iterate num_epoch times and in each epoch train on total data amount / batch_size
    for epoch in range(num_epochs):
        trainLoss, trainErr = trainFunc(DeepFCNet, train_loader, criterion,
                                        optimizer)
        evaluateLoss, evaluateErr = evaluateFunc(DeepFCNet, validate_loader,
                                                 criterion)
        trainLossArr.append(trainLoss)
        trainErrArr.append(trainErr)
Beispiel #7
0
    accuracy = (100 * float(correct) / total)
    print("number of below:", num_of_below_avg, " number of average:",
          num_of_avg, " number of above:", num_of_above_avg)
    return accuracy


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--data_folder',
        required=True,
        help='path to folder contaning all the data - train, validate and test'
    )
    parser.add_argument('--model',
                        required=True,
                        help='path to the model after training')
    args = parser.parse_args()

    test_dataset = common.TimeseriesDataset(
        os.path.join(args.data_folder, "test_set_class.pkl"))

    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              shuffle=False)
    net = common.DeepFCNet()
    #load the model
    net = load_checkpoint(net, args.model)
    net = common.to_cuda(net)
    # Test the Model
    testErr = testFunc(net, test_loader)
    print(testErr)
Beispiel #8
0
    num_classes = len(values)
    print("The classes: " + str(values))
    percentage = counts / len(train_dataset.X)
    print("Train: The frequency: " + str(counts) + " The percentage: " +
          str(percentage) + "\n")

    values, counts = np.unique(validate_dataset.y, return_counts=True)
    percentage = counts / len(validate_dataset.X)
    print("Validate: The frequency: " + str(counts) + " The percentage: " +
          str(percentage) + "\n")

    #Initiate the network
    input_size = len(train_dataset.X[0])
    #create object of the cogtests_nn class
    cogtests_nn = common.cogtests_nn(input_size, num_classes)
    cogtests_nn = common.to_cuda(cogtests_nn)
    # Loss and Optimizer
    #criterion = nn.CrossEntropyLoss(weight=torch.tensor([0.34, 0, 0.66]))
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(cogtests_nn.parameters(), lr=learning_rate)
    trainLossArr = []
    trainErrArr = []
    evaluateLossArr = []
    evaluateErrArr = []
    #Iterate num_epoch times and in each epoch train on total data amount / batch_size
    for epoch in range(num_epochs):
        trainLoss, trainErr = trainFunc(cogtests_nn, train_loader, criterion,
                                        optimizer)
        evaluateLoss, evaluateErr = evaluateFunc(cogtests_nn, validate_loader,
                                                 criterion)
Beispiel #9
0
    num_classes = len(values)
    print("The classes: " + str(values))
    percentage = counts / len(train_dataset.X)
    print("Train: The frequency: " + str(counts) + " The percentage: " +
          str(percentage) + "\n")

    values, counts = np.unique(validate_dataset.y, return_counts=True)
    percentage = counts / len(validate_dataset.X)
    print("Validate: The frequency: " + str(counts) + " The percentage: " +
          str(percentage) + "\n")

    #Initiate the network
    input_size = len(train_dataset.X[0])
    #create object of the corr_nn class
    corr_nn = common.corr_nn(input_size, num_classes)
    corr_nn = common.to_cuda(corr_nn)
    # Loss and Optimizer
    #criterion = nn.CrossEntropyLoss(weight=torch.tensor([0.3, 0, 0.7]))
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(corr_nn.parameters(), lr=learning_rate)
    trainLossArr = []
    trainErrArr = []
    evaluateLossArr = []
    evaluateErrArr = []
    #Iterate num_epoch times and in each epoch train on total data amount / batch_size
    for epoch in range(num_epochs):
        trainLoss, trainErr = trainFunc(corr_nn, train_loader, criterion,
                                        optimizer)
        evaluateLoss, evaluateErr = evaluateFunc(corr_nn, validate_loader,
                                                 criterion)