def train(epoch, img_path, target_path, transforms, net, criterion):
    train_dataset = selfData(img_path, target_path, transforms)
    train_loader = DataLoader(train_dataset, batch_size = 64, shuffle = True, num_workers = 0,drop_last= False, collate_fn=collate_fn)
    for ep in range(epoch):  
        if ep >= 12:
            learning_rate = 0.0025
        elif ep >= 6:
            learning_rate = 0.005
        else:
            learning_rate = 0.01
        running_loss = 0.0
        print("Epoch {}.".format(ep+1))
        for i, data in enumerate(train_loader,1):
            inputs, labels = data
            labels = list(map(int, labels))
            labels = torch.Tensor(labels)
            if torch.cuda.is_available():
                device = torch.device("cuda:0")
                inputs = inputs.to(device)
                labels = labels.to(device)
            optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=0.0005)
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, labels.long())
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
            print("Epoch {}.\tBatch {}.\tLoss = {:.3f}.".format(ep+1, i+1, running_loss))
            if i % 2000 == 1999:    # 2000 mini-batches
                print('[%d, %5d] loss: %.3f' %
                    (epoch + 1, i + 1, running_loss / 2000))
                running_loss = 0.0
    print('Finished Training.')
Пример #2
0
def eval(img_path, target_path, net, str="rainy"):
    print("\nTesting starts now...")

    net.eval()
    test_dataset = selfData(img_path, target_path, transforms)
    test_size = len(test_dataset)
    correct = 0
    total = 0
    TP = 0
    FP = 0
    FN = 0
    TN = 0
    with torch.no_grad():
        for i in tqdm(range(test_size)):
            split_path = args.split_path
            split_path = osp.join(split_path, str)
            image = test_dataset.pull_img(i)
            if image is None:
                continue
            tmp_image = test_dataset.pull_img(i)
            label = test_dataset.pull_label(i)
            label = int(label)

            x = transforms(image)
            x = Variable(x.unsqueeze(0))
            if torch.cuda.is_available():
                device = torch.device(args.cuda_device)
                x = x.to(device)
                # print(x.shape)
            y = net(x)
            _, predicted = torch.max(y, 1)
            # predicted=torch.
            # print(predicted==label)
            # exit()
            total += 1
            correct += (predicted == label)
            if predicted == 1 and label == 1:
                TP += 1
                split_path = osp.join(split_path, "TP")
            elif predicted == 1 and label == 0:
                FP += 1
                split_path = osp.join(split_path, "FP")
            elif predicted == 0 and label == 1:
                FN += 1
                split_path = osp.join(split_path, "FN")
            elif predicted == 0 and label == 0:
                TN += 1
                split_path = osp.join(split_path, "TN")
            if not os.path.exists(split_path):
                os.makedirs(split_path)
            split_path = osp.join(split_path, repr(i) + ".jpg")
            cv2.imwrite(split_path, tmp_image)
    print("Acc:{}\tTP:{}\tFP:{}\tFN:{}\tTN:{}".format((correct / total), TP,
                                                      FP, FN, TN))
    return (correct / total)
Пример #3
0
def test(img_path,
         target_path,
         net,
         transforms=minetransforms,
         device=int(args.cuda_device)):
    print("\nTesting starts now...", type(device), device)

    net.eval()
    test_dataset = selfData(img_path, target_path, transforms)
    test_loader = DataLoader(test_dataset,
                             batch_size=64,
                             shuffle=False,
                             num_workers=16,
                             drop_last=False,
                             pin_memory=True,
                             collate_fn=collate_fn)
    # test_loader=list(test_loader)
    test_size = len(test_dataset)
    data_size = test_size // 64
    if test_size % 64 != 0:
        data_size += 1
    correct = 0
    total = 0
    item = 1
    test_iter = iter(test_loader)
    with torch.no_grad():
        for i in range(data_size):

            images, labels = next(test_iter)
            labels = list(map(int, labels))
            labels = torch.Tensor(labels)

            if torch.cuda.is_available():
                images = images.cuda()
                labels = labels.cuda()
            outputs = net(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            item += 1
    return (correct / total)
Пример #4
0
def test(img_path, target_path, transforms, net):
    print("\nTesting starts now...")
    test_dataset = selfData(img_path, target_path, transforms)
    test_loader = DataLoader(test_dataset, batch_size = 100, shuffle = True, num_workers = 0, collate_fn=collate_fn)
    correct = 0
    total = 0
    item = 1
    with torch.no_grad():
        for data in test_loader:
            images, labels = data
            print("Testing on batch {}".format(item))
            labels = list(map(int, labels))
            labels = torch.Tensor(labels)
            if torch.cuda.is_available():
                device = torch.device("cuda:0")
                images = images.to(device)
                labels = labels.to(device)
            outputs = net(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            item += 1
    return (correct/total)
Пример #5
0
from torchvision import transforms
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np

if __name__=="__main__":
    args = args_parser()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    transforms = transforms.Compose([
        transforms.Resize(256),
        transforms.RandomResizedCrop(224),
        transforms.ToTensor(),  # normalize to [0, 1]
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]) 
    if args.imshow == True:
        train_dataset = selfData(args.train_img, args.train_lab, transforms)
        train_loader = DataLoader(train_dataset, batch_size = 64, shuffle = True, num_workers = 0, drop_last= False)
        imgs, labels = train_loader.__iter__().__next__()
        imshow(train_loader)

    if args.model == 'mAlexNet':
        net = mAlexNet().to(device)
    elif args.model == 'AlexNet':
        net = AlexNet().to(device)

    criterion = nn.CrossEntropyLoss()
    if args.path == '':
        train(args.epochs, args.train_img, args.train_lab, transforms, net, criterion)
        PATH = './model.pth'
        torch.save(net.state_dict(), PATH)
        if args.model == 'mAlexNet':
Пример #6
0
def eval(img_path, target_path, net, str="rainy"):
    print("\nTesting starts now...")

    net.eval()
    test_dataset = selfData(img_path, target_path, transforms)
    test_size = len(test_dataset)
    correct = 0
    total = 0
    TP = 0
    FP = 0
    FN = 0
    TN = 0
    count = [0 for x in range(0, 200)]
    # count['_88']=1
    # count['_88']=count['_88']+1
    # print(count)
    # exit()
    # f=open('89.250.150.72.txt','w')
    with torch.no_grad():
        for i in range(test_size):
            # for ind,val in enumerate(count):
            #     print(ind,val)
            split_path = args.split_path
            split_path = osp.join(split_path, str)
            image = test_dataset.pull_img(i)
            # print(test_dataset.img_list[i])
            # if test_dataset.img_list[i].find("_25")>=0:
            #     continue
            # if test_dataset.img_list[i].find("_26")>=0:
            #     continue
            # if test_dataset.img_list[i].find("_27")>=0:
            #     continue
            # if test_dataset.img_list[i].find("_30")>=0:
            #     continue
            # if test_dataset.img_list[i].find("_119") >= 0:
            #     continue
            # if test_dataset.img_list[i].find("_111")>=0:
            #     continue
            # if test_dataset.img_list[i].find("_122")>=0:
            #     continue
            # if test_dataset.img_list[i].find("_150")>=0:
            #     continue
            # if test_dataset.img_list[i].find("_107")>=0:
            #     continue
            # if test_dataset.img_list[i].find("_105")>=0:
            #     continue
            # print(test_dataset.img_list[i].find("_76.jpg"))
            if image is None:
                continue
            tmp_image = test_dataset.pull_img(i)
            label = test_dataset.pull_label(i)
            label = int(label)

            x = minetransforms(image)
            x = Variable(x.unsqueeze(0))
            if torch.cuda.is_available():
                x = x.cuda(device)
                # print(x.shape)
            y = net(x)
            _, predicted = torch.max(y, 1)
            # predicted=torch.
            # print(predicted==label)
            # exit()
            total += 1
            correct += (predicted == label)

            # if predicted!=label:
            #     print(predicted, test_dataset.img_list[i])
            #     index=int(test_dataset.img_list[i].split('_')[-1].split('.')[0])
            #     count[index]=count[index]+1
            # cv2.imshow('test',tmp_image)
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()
            if predicted == 1 and label == 1:
                TP += 1
                split_path = osp.join(split_path, "TP")
            elif predicted == 1 and label == 0:
                FP += 1
                split_path = osp.join(split_path, "FP")
                # f.write(test_dataset.img_list[i]+'\n')
                # print(predicted,test_dataset.img_list[i])
            elif predicted == 0 and label == 1:
                FN += 1
                split_path = osp.join(split_path, "FN")
                # print(predicted,test_dataset.img_list[i])
            elif predicted == 0 and label == 0:
                TN += 1
                split_path = osp.join(split_path, "TN")
            # if not os.path.exists(split_path):
            #     os.makedirs(split_path)
            # split_path=osp.join(split_path,repr(i)+".jpg")
            # cv2.imwrite(split_path, tmp_image)
    print("Acc:{}\tTP:{}\tFP:{}\tFN:{}\tTN:{}".format((correct / total), TP,
                                                      FP, FN, TN))
    # for ind, val in enumerate(count):
    #     print(ind, val)
    return (correct / total)