Пример #1
0
def myDataloader():
    global DATA_LEN

    data_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    #print(DATASET_ROOT)
    all_data_set = IMAGE_Dataset(Path(DATASET_ROOT2), data_transform)

    #print('set:',len(train_set))
    indices = list(range(len(all_data_set)))
    #print('old',indices)
    np.random.seed(1)
    np.random.shuffle(indices)
    #print('new',indices)
    split = math.ceil(len(all_data_set) * 1)  # extract 10% dataset as test-set
    valid_idx = indices[:split]
    test_sampler = SubsetRandomSampler(valid_idx)
    #print('test')
    #print(test_sampler)
    #train_set, test_set = torch.utils.data.random_split(train_set, [400, 115])
    print('test_set: ', len(test_sampler))
    DATA_LEN = len(test_sampler)

    test_data_loader = DataLoader(dataset=all_data_set,
                                  batch_size=BATCH_SIZE,
                                  shuffle=False,
                                  num_workers=0,
                                  sampler=test_sampler)

    return test_data_loader
Пример #2
0
def train():
	
	data_transform=transforms.Compose([
		transforms.Resize((224,224)),
		transforms.ToTensor(),
		transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
	])
	train_set=IMAGE_Dataset(label_location,TRAINSET_ROOT,8144,data_transform);
	data_loader=DataLoader(dataset=train_set, batch_size=32, shuffle=True, num_workers=1)
	
	resnet101=models.resnet101(pretrained=True)
	fc_features=resnet101.fc.in_features
	resnet101.fc=nn.Linear(fc_features,196)
	resnet101=resnet101.cuda(CUDA_DEVICES)
	resnet101.train()

	best_model_params=copy.deepcopy(resnet101.state_dict())
	best_acc=0.0
	num_epochs=30
	criterion = nn.CrossEntropyLoss()
	optimizer=torch.optim.SGD(params=resnet101.parameters(), lr=0.01, momentum=0.9)

		
	for epoch in range(num_epochs):
		print(f'Epoch: {epoch+1}/{num_epochs}')
		print('-'*len(f'Epoch: (epoch+1)/{num_epochs}'))
		
		training_loss=0.0
		training_corrects=0.0
		
		for i, (inputs, labels) in enumerate(data_loader):
			inputs=Variable(inputs.cuda(CUDA_DEVICES))
			labels=Variable(labels.cuda(CUDA_DEVICES))
			
			optimizer.zero_grad()
			outputs=resnet101(inputs)
			_, preds=torch.max(outputs.data, 1)
			loss = criterion(outputs, labels)
			
			loss.backward()
			optimizer.step()
			training_loss+=loss.item()*inputs.size(0)
			training_corrects+=torch.sum(preds==labels.data)
			
		training_loss=training_loss/len(train_set)
		training_acc=training_corrects.double() / len(train_set)

		result_train_loss.append(str(training_loss))     #train_data
		result_train_acc.append(str(training_acc))
		
		if (epoch%2)==0:
			torch.save(resnet101, f'model-{epoch}.pth')
		
		print(f'Training loss: {training_loss:.4f}\taccuracy: {training_acc:.4f}\n')
		if training_acc>best_acc:
			best_acc = training_acc
			best_model_params=copy.deepcopy(resnet101.state_dict())
	resnet101.load_state_dict(best_model_params)
	torch.save(resnet101, f'best_model.pth')
Пример #3
0
def test():
    data_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    test_set = IMAGE_Dataset(label_location, TESTSET_ROOT, 8041,
                             data_transform)
    data_loader = DataLoader(dataset=test_set,
                             batch_size=32,
                             shuffle=True,
                             num_workers=1)
    classes = [i for i in range(196)]

    model = torch.load(PATH_TO_MODEL)
    model = model.cuda(CUDA_DEVICES)
    model.eval()

    total_correct = 0
    total = 0
    class_correct = list(0. for i in enumerate(classes))
    class_total = list(0. for i in enumerate(classes))

    criterion = nn.CrossEntropyLoss()
    test_loss = 0

    with torch.no_grad():
        for inputs, labels in data_loader:
            inputs = Variable(inputs.cuda(CUDA_DEVICES))
            labels = Variable(labels.cuda(CUDA_DEVICES))
            outputs = model(inputs)
            _, predicted = torch.max(outputs.data, 1)

            loss = criterion(outputs, labels)
            test_loss += loss.item() * labels.size(0)

            total += labels.size(0)
            #total_correct+=(predicted==labels).sum().item()
            total_correct += torch.sum(predicted == labels.data)
            c = (predicted == labels).squeeze()

            for i in range(labels.size(0)):
                label = labels[i]
                class_correct[label] += c[i].item()
                class_total[label] += 1
    total_acc = total_correct.double() / total
    test_loss = test_loss / total
    print('total: %d' % total)
    print('Accuracy on the ALL test images : %d %%' %
          (100 * total_correct / total))
    for i, c in enumerate(classes):
        print('Accuracy of class %3d : %2d %%' %
              (c, 100 * class_correct[i] / class_total[i]))
        classes1.append(100 * class_correct[i] / class_total[i])
    print(f'test_loss: {test_loss:.4f}\n')
    print(f'test_acc: {total_acc:.4f}\n')
Пример #4
0
def train():
	data_transform = transforms.Compose([
		transforms.Resize((224,224)),
		transforms.ToTensor(),
		transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
	])
	#print(DATASET_ROOT)
	train_set = IMAGE_Dataset(Path(DATASET_ROOT), data_transform)
	data_loader = DataLoader(dataset=train_set, batch_size=1, shuffle=True, num_workers=0)
	#print(train_set.num_classes)
	model = VGG16(num_classes=train_set.num_classes)
	model = model.cuda(CUDA_DEVICES)
	model.train()

	best_model_params = copy.deepcopy(model.state_dict())
	best_acc = 0.0
	num_epochs = 50
	criterion = nn.CrossEntropyLoss()
	optimizer = torch.optim.SGD(params=model.parameters(), lr=0.01, momentum=0.9)

	for epoch in range(num_epochs):
		print(f'Epoch: {epoch + 1}/{num_epochs}')
		print('-' * len(f'Epoch: {epoch + 1}/{num_epochs}'))

		training_loss = 0.0
		training_corrects = 0

		for i, (inputs, labels) in enumerate(data_loader):
			inputs = Variable(inputs.cuda(CUDA_DEVICES))
			labels = Variable(labels.cuda(CUDA_DEVICES))			

			optimizer.zero_grad()

			outputs = model(inputs)
			_, preds = torch.max(outputs.data, 1)
			loss = criterion(outputs, labels)

			loss.backward()
			optimizer.step()

			training_loss += loss.item() * inputs.size(0)
			#revise loss.data[0]-->loss.item()
			training_corrects += torch.sum(preds == labels.data)
			#print(f'training_corrects: {training_corrects}')

		training_loss = training_loss / len(train_set)
		training_acc =training_corrects.double() /len(train_set)
		#print(training_acc.type())
		#print(f'training_corrects: {training_corrects}\tlen(train_set):{len(train_set)}\n')
		print(f'Training loss: {training_loss:.4f}\taccuracy: {training_acc:.4f}\n')

		if training_acc > best_acc:
			best_acc = training_acc
			best_model_params = copy.deepcopy(model.state_dict())

	model.load_state_dict(best_model_params)
	torch.save(model, f'model-{best_acc:.02f}-best_train_acc.pth')
Пример #5
0
def test():
    data_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    test_set = IMAGE_Dataset(Path(DATASET_ROOT1), data_transform)
    data_loader = DataLoader(dataset=test_set,
                             batch_size=32,
                             shuffle=True,
                             num_workers=1)
    classes = [_dir.name for _dir in Path(DATASET_ROOT1).glob('*')]

    model = torch.load(PATH_TO_WEIGHTS)
    model = model.cuda(CUDA_DEVICES)
    model.eval()

    total_correct = 0
    total = 0
    class_correct = list(0. for i in enumerate(classes))
    class_total = list(0. for i in enumerate(classes))
    criterion = nn.CrossEntropyLoss()
    #optimizer=torch.optim.SGD(params=model.parameters(), lr=0.01, momentum=0.9);
    test_loss = 0

    with torch.no_grad():
        for inputs, labels in data_loader:
            inputs = Variable(inputs.cuda(CUDA_DEVICES))
            labels = Variable(labels.cuda(CUDA_DEVICES))
            outputs = model(inputs)
            _, predicted = torch.max(outputs.data, 1)

            loss = criterion(outputs, labels)
            #loss.backward()
            #optimizer.step()
            test_loss += loss.item() * inputs.size(0)

            # totoal
            total += labels.size(0)
            total_correct += (predicted == labels).sum().item()
            c = (predicted == labels).squeeze()
            # batch size
            for i in range(labels.size(0)):
                label = labels[i]
                class_correct[label] += c[i].item()
                class_total[label] += 1

    print('Accuracy on the ALL test images: %d %%' %
          (100 * total_correct / total))

    for i, c in enumerate(classes):
        print('Accuracy of %5s : %2d %%' %
              (c, 100 * class_correct[i] / class_total[i]))
    print('test_loss: ' + str(test_loss / total) + '\n')
    print('test_acc: ' + str(total_correct / total) + '\n')
Пример #6
0
def train():
    data_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    train_set = IMAGE_Dataset(Path(DATASET_ROOT), data_transform)
    data_loader = DataLoader(dataset=train_set,
                             batch_size=32,
                             shuffle=True,
                             num_workers=1)
    log_lrs, losses = find_lr(data_loader, train_set)
    print("log_lrs = ", log_lrs)
    print("losses = ", losses)
Пример #7
0
def test(model=None):
    data_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[
                             0.229, 0.224, 0.225])
    ])
    test_set = IMAGE_Dataset(Path(DATASET_ROOT), data_transform)
    data_loader = DataLoader(
        dataset=test_set, batch_size=32, shuffle=True, num_workers=2)
    classes = [_dir.name for _dir in Path(DATASET_ROOT).glob('*')]

    if not model:
        model = torch.load(PATH_TO_WEIGHTS)
        model = model.cuda(CUDA_DEVICES)
    model.eval()

    total_correct = 0
    total = 0
    class_correct = list(0. for i in enumerate(classes))
    class_total = list(0. for i in enumerate(classes))
    with torch.no_grad():
        for inputs, labels in data_loader:
            inputs = Variable(inputs.cuda(CUDA_DEVICES))
            labels = Variable(labels.cuda(CUDA_DEVICES))
            outputs = model(inputs)
            _, predicted = torch.max(outputs.data, 1)
            # totoal
            total += labels.size(0)
            total_correct += (predicted == labels).sum().item()
            c = (predicted == labels).squeeze()
            # batch size
            for i in range(labels.size(0)):
                label = labels[i]
                class_correct[label] += c[i].item()
                class_total[label] += 1

    print("\n\n=============== Test =================\n\n")
    print('Accuracy on the ALL test images: %d %%'
          % (100 * total_correct / total))

    for i, c in enumerate(classes):
        print('Accuracy of %5s : %2d %%' % (
            c, 100 * class_correct[i] / class_total[i]))

    print("\n\n==========================================\n\n")

    return total_correct / total
def train():
    data_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        #transforms.RandomHorizontalFlip(p=0.2),
        #transforms.ColorJitter(contrast=1),
        transforms.ToTensor(),
        #transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ])
    print("dataset root", DATASET_ROOT)
    train_set = IMAGE_Dataset(Path(DATASET_ROOT), data_transform)
    print("train_set.numclasses: {}".format(train_set.num_classes))
    print("len of train_set", len(train_set))
    # If out of memory , adjusting the batch size smaller
    data_loader = DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True, num_workers=1)
    
    #print(train_set.num_classes)
    #model = VGG16(num_classes=train_set.num_classes)
    #model = resnet50(pretrained=False, num_classes=train_set.num_classes)
    model = resnet50(pretrained=True)

    # transfer learning
    num_ftrs = model.fc.in_features
    model.fc = nn.Linear(num_ftrs, train_set.num_classes)

    model = model.cuda(CUDA_DEVICES)
    model.train()

    best_model_params = copy.deepcopy(model.state_dict())
    best_acc = 0.0
    
    # Training epochs
    criterion = nn.CrossEntropyLoss()
    # Optimizer setting
    optimizer = torch.optim.SGD(params=model.parameters(), lr=init_lr, momentum=0.9)
    #optimizer = torch.optim.Adam(params=model.parameters(), lr=init_lr)


    # Log 
    with open('TrainingAccuracy.txt','w') as fAcc:
        print('Accuracy\n', file = fAcc)
    with open('TrainingLoss.txt','w') as fLoss:
        print('Loss\n', file = fLoss)

    #record loss & accuracy
    loss_record = list()
    acc_record = list()

    for epoch in range(num_epochs):
        localtime = time.asctime( time.localtime(time.time()) )
        print('Epoch: {}/{} --- < Starting Time : {} >'.format(epoch + 1,num_epochs,localtime))
        print('-' * len('Epoch: {}/{} --- < Starting Time : {} >'.format(epoch + 1,num_epochs,localtime)))

        training_loss = 0.0
        training_corrects = 0
        adjust_lr(optimizer, epoch)

        for i, (inputs, labels) in enumerate(data_loader):
            inputs = Variable(inputs.cuda(CUDA_DEVICES))
            labels = Variable(labels.cuda(CUDA_DEVICES))

            optimizer.zero_grad()

            outputs = model(inputs)
            #print("outputs: {}, label: {}".format(outputs.size(), labels))


            _, preds = torch.max(outputs.data, 1)
            loss = criterion(outputs, labels)

            loss.backward()
            optimizer.step()

            training_loss += float(loss.item() * inputs.size(0))
            training_corrects += torch.sum(preds == labels.data)
            print("preds : ", preds)
            print("labels : ", labels)
        
        training_loss = training_loss / len(train_set)
        training_acc = training_corrects.double() /len(train_set)
        print('Training loss: {:.4f}\taccuracy: {:.4f}\n'.format(training_loss,training_acc))
        loss_record.append(training_loss)
        acc_record.append(training_acc)

        #save model each 10 epochs
        if epoch % 10 == 0:
            epoch_model = copy.deepcopy(model.state_dict())
            model.load_state_dict(epoch_model)
            model_name = './weights/epoch_models/model-{:.1f}epoch.pth'.format(epoch)
            torch.save(model, model_name)

        # Check best accuracy model ( but not the best on test )
        if training_acc > best_acc:
            best_acc = training_acc
            best_model_params = copy.deepcopy(model.state_dict())


        with open('TrainingAccuracy.txt','a') as fAcc:
            print('{:.4f} '.format(training_acc), file = fAcc)
        with open('TrainingLoss.txt','a') as fLoss:
            print('{:.4f} '.format(training_loss), file = fLoss)

        # Checkpoint
        if (epoch + 1) % checkpoint_interval == 0:
            torch.save(model, 'Checkpoint/model-epoch-{:d}-train.pth'.format(epoch + 1))

    #draw the loss & acc curve
    draw_plot(loss_record, acc_record)

    # Save best training/valid accuracy model ( not the best on test )
    model.load_state_dict(best_model_params)
    best_model_name = './weights/model-{:.2f}-best_train_acc.pth'.format(best_acc)
    torch.save(model, best_model_name)
    print("Best model name : " + best_model_name)
Пример #9
0
def train():
	data_transform = transforms.Compose([
		transforms.Resize((224,224)),
		transforms.ToTensor(),
		transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
	])
	#print(DATASET_ROOT)
	train_set = IMAGE_Dataset(Path(DATASET_ROOT), data_transform)
	data_loader = DataLoader(dataset=train_set, batch_size=4, shuffle=True, num_workers=1)
	#print(train_set.num_classes)
	model = VGG16(num_classes=train_set.num_classes)
	model = model.cuda(CUDA_DEVICES)
	model.train()

	best_model_params = copy.deepcopy(model.state_dict())
	best_acc = 0.0
	num_epochs = 10
	criterion = nn.CrossEntropyLoss()
	optimizer = torch.optim.SGD(params=model.parameters(), lr=init_lr, momentum=0.85)
    

	with open('TrainingAccuracy.txt','w') as fAcc:
		print('Accuracy\n', file = fAcc)
	with open('TrainingLoss.txt','w') as fLoss:
		print('Loss\n', file = fLoss)

	for epoch in range(num_epochs):
		localtime = time.asctime( time.localtime(time.time()) )
		
		print('Epoch: {}/{} --- < Starting Time : {} >'.format(epoch + 1,num_epochs,localtime))
		print('-' * len('Epoch: {}/{} --- < Starting Time : {} >'.format(epoch + 1,num_epochs,localtime)))

		training_loss = 0.0
		training_corrects = 0
		adjust_lr(optimizer, epoch)

		for i, (inputs, labels) in enumerate(data_loader):
			inputs = Variable(inputs.cuda(CUDA_DEVICES))
			labels = Variable(labels.cuda(CUDA_DEVICES))			

			optimizer.zero_grad()

			outputs = model(inputs)
			_, preds = torch.max(outputs.data, 1)
			loss = criterion(outputs, labels)

			loss.backward()
			optimizer.step()

			training_loss += float(loss.item() * inputs.size(0))
			#revise loss.data[0]-->loss.item()
			training_corrects += torch.sum(preds == labels.data)
			#print(f'training_corrects: {training_corrects}')

		training_loss = training_loss / len(train_set)
		training_acc =training_corrects.double() /len(train_set)
		#print(training_acc.type())
		#print(f'training_corrects: {training_corrects}\tlen(train_set):{len(train_set)}\n')
		print('Training loss: {:.4f}\taccuracy: {:.4f}\n'.format(training_loss,training_acc))

		
		if training_acc > best_acc:
			best_acc = training_acc
			best_model_params = copy.deepcopy(model.state_dict())
		
		with open('TrainingAccuracy.txt','a') as fAcc:
			print('{:.4f} '.format(training_acc), file = fAcc)
		with open('TrainingLoss.txt','a') as fLoss:
			print('{:.4f} '.format(training_loss), file = fLoss)
        
		if (epoch + 1) % 10 == 0:
			torch.save(model, 'model-epoch-{:2d}-train.pth'.format(epoch + 1))


	model.load_state_dict(best_model_params)
	torch.save(model, 'model-{:.2f}-best_train_acc.pth'.format(best_acc))
Пример #10
0
def test():
    data_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
    ])
    test_set = IMAGE_Dataset(Path(DATASET_ROOT), data_transform)
    data_loader = DataLoader(dataset=test_set,
                             batch_size=32,
                             shuffle=True,
                             num_workers=1)
    classes = [_dir.name for _dir in Path(DATASET_ROOT).glob('*')]
    print("classes : ", classes)
    classes.sort()
    classes.sort(key=len)

    # Load model
    model = resnet50(pretrained=True)
    # transfer learning
    num_ftrs = model.fc.in_features
    model.fc = nn.Linear(num_ftrs, test_set.num_classes)

    model = torch.load(PATH_TO_WEIGHTS)
    model = model.cuda(CUDA_DEVICES)
    model.eval()

    total_correct = 0
    total = 0
    class_correct = list(0. for i in enumerate(classes))
    class_total = list(0. for i in enumerate(classes))

    with torch.no_grad():
        for inputs, labels in data_loader:
            inputs = Variable(inputs.cuda(CUDA_DEVICES))
            labels = Variable(labels.cuda(CUDA_DEVICES))
            print("labels_1 = ", labels)
            # print(len(inputs))
            outputs = model(inputs)
            _, predicted = torch.max(outputs.data, 1)

            # totoal
            total += labels.size(0)
            total_correct += (predicted == labels).sum().item()
            print("predicted = ", predicted)
            # print("labels_2 = ", labels)
            print("-------------------------------------------------")
            c = (predicted == labels).squeeze()

            # batch size
            for i in range(labels.size(0)):
                label = labels[i]
                class_correct[label] += c[i].item()
                class_total[label] += 1

    print("class_correct : ", class_correct)
    print("class_total : ", class_total)
    for i, c in enumerate(classes):
        print('Accuracy of %5s : %8.4f %%' %
              (c, 100 * class_correct[i] / class_total[i]))

    # Accuracy
    print('\nAccuracy on the ALL test images: %.4f %%' %
          (100 * total_correct / total))
Пример #11
0
def train():
	data_transform = transforms.Compose([

		transforms.Resize((224,224)),
		transforms.RandomHorizontalFlip(),
		transforms.RandomVerticalFlip(),
		transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.3), 
		transforms.RandomRotation(30),
		transforms.RandomAffine(degrees=0, translate=(0.2, 0.2)),
		transforms.ToTensor(),
		transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

	])
	train_set = IMAGE_Dataset(Path(DATASET_ROOT_train), data_transform)
	data_loader = DataLoader(dataset=train_set, batch_size=16, shuffle=True, num_workers=1)
	model = nn.Linear(224 * 224, 3).to(CUDA_DEVICES)
	model = model.cuda(CUDA_DEVICES)
	model.train()
	
	best_model_params = copy.deepcopy(model.state_dict())
	best_acc = 0.0
	num_epochs = 200
	criterion = hinge_loss
	
	optimizer = torch.optim.SGD(params=model.parameters(), lr=0.001, momentum=0.9)
	lr_schduler = optim.lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)	

	for epoch in range(num_epochs):

		print(f'Epoch: {epoch + 1}/{num_epochs}')
		print('-' * len(f'Epoch: {epoch + 1}/{num_epochs}'))

		training_loss = 0.0
		training_corrects = 0


		for i, (inputs, labels) in enumerate(data_loader):
			inputs=inputs[:,0,:,:]
		#	print("shape:  ",inputs.shape)
			inputs = inputs.reshape(-1, 224 * 224)
			inputs = Variable(inputs.cuda(CUDA_DEVICES))
			labels = Variable(labels.cuda(CUDA_DEVICES))
		#	print(inputs.size())
			optimizer.zero_grad()
			
			outputs = model(inputs)
		#	print(outputs.size())
			_, preds = torch.max(outputs.data, 1)
			loss = criterion(outputs, labels)
			loss.backward()
			optimizer.step()
			lr_schduler.step()
			training_loss += loss.item() * inputs.size(0)
		#	print(preds.size(),labels.size())
		#	exit(0)
			training_corrects += torch.sum(preds == labels.data)
			#print(f'training_corrects: {training_corrects}')

		training_loss = training_loss / len(train_set)
		training_acc =training_corrects.double() /len(train_set)
		print(f'Training loss: {training_loss:.4f}\taccuracy: {training_acc:.4f}\n')

		test_acc=test(model)

		if test_acc > best_acc:
			best_acc = test_acc
			best_model_params = copy.deepcopy(model.state_dict())
		if epoch==100 :
			model.load_state_dict(best_model_params)
			torch.save(model, f'model-{100-best_acc:.02f}-best_train_acc.pth')

	model.load_state_dict(best_model_params)
	torch.save(model, f'model-{best_acc:.02f}-best_train_acc.pth')
Пример #12
0
def train():
    data_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        # transforms.CenterCrop((224,224)),
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.3),
        transforms.RandomRotation(30),
        transforms.RandomAffine(degrees=0, translate=(0.2, 0.2)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    train_set = IMAGE_Dataset(Path(DATASET_ROOT_train), data_transform)
    data_loader = DataLoader(dataset=train_set,
                             batch_size=16,
                             shuffle=True,
                             num_workers=1)
    #	model = SENet18()
    model = torch.hub.load('moskomule/senet.pytorch',
                           'se_resnet20',
                           num_classes=3)
    #	model = models.vgg19(pretrained=True)
    #	final_in_features = model.classifier[6].in_features
    #	model.classifier[6].out_features=train_set.num_classes
    model = model.cuda(CUDA_DEVICES)
    model.train()

    best_model_params = copy.deepcopy(model.state_dict())
    best_acc = 0.0
    num_epochs = 200
    criterion = nn.CrossEntropyLoss()

    stepsize = 20
    base_lr = 0.001
    max_lr = 0.01
    base_mm = 0.8
    max_mm = 0.99

    for epoch in range(num_epochs):
        #newlr = get_triangular_lr(epoch,stepsize,base_lr,max_lr)
        #mm=get_dynamic_momentum(epoch,stepsize,base_mm,max_mm)
        optimizer = torch.optim.SGD(params=model.parameters(),
                                    lr=0.001,
                                    momentum=0.9)
        print(f'Epoch: {epoch + 1}/{num_epochs}')
        print('-' * len(f'Epoch: {epoch + 1}/{num_epochs}'))

        training_loss = 0.0
        training_corrects = 0

        for i, (inputs, labels) in enumerate(data_loader):
            inputs = Variable(inputs.cuda(CUDA_DEVICES))
            labels = Variable(labels.cuda(CUDA_DEVICES))
            optimizer.zero_grad()
            outputs = model(inputs)
            _, preds = torch.max(outputs.data, 1)
            loss = criterion(outputs, labels)
            #		loss = Variable(loss, requires_grad = True)
            loss.backward()
            optimizer.step()

            training_loss += loss.item() * inputs.size(0)
            #	print(training_loss)
            #revise loss.data[0]-->loss.item()
            training_corrects += torch.sum(preds == labels.data)
            #print(f'training_corrects: {training_corrects}')

        training_loss = training_loss / len(train_set)
        training_acc = training_corrects.double() / len(train_set)
        print(
            f'Training loss: {training_loss:.4f}\taccuracy: {training_acc:.4f}\n'
        )

        test_acc = test(model)

        if test_acc > best_acc:
            best_acc = test_acc
            best_model_params = copy.deepcopy(model.state_dict())

    model.load_state_dict(best_model_params)
    torch.save(model, f'model-{best_acc:.02f}-best_train_acc.pth')
Пример #13
0
def train(i, train_acc, train_loss):
    data_transform = transforms.Compose([
        #transforms.Resize((224,224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    #print(DATASET_ROOT)
    all_data_set = IMAGE_Dataset(Path(DATASET_ROOT1), data_transform)

    #print('set:',len(train_set))
    indices = list(range(len(all_data_set)))
    #print('old',indices)
    np.random.seed(1)
    np.random.shuffle(indices)
    #print('new',indices)
    split = math.ceil(len(all_data_set) *
                      0.1)  # extract 10% dataset as test-set
    train_idx, valid_idx = indices[split:], indices[:split]
    train_sampler = SubsetRandomSampler(train_idx)
    test_sampler = SubsetRandomSampler(valid_idx)
    #print('test')
    #print(test_sampler)
    #train_set, test_set = torch.utils.data.random_split(train_set, [400, 115])
    print('train_set:', len(train_sampler), 'test_set:', len(test_sampler))

    train_data_loader = DataLoader(dataset=all_data_set,
                                   batch_size=BATCH_SIZE,
                                   shuffle=False,
                                   num_workers=0,
                                   sampler=train_sampler)

    test_data_loader = DataLoader(dataset=all_data_set,
                                  batch_size=BATCH_SIZE,
                                  shuffle=False,
                                  num_workers=0,
                                  sampler=test_sampler)

    #print(train_set.num_classes)

    if i == 1:
        model = models.resnet101(pretrained=True)
        #fc_features=model.fc.in_features
        #model.fc=nn.Linear(fc_features,5)
        f = lambda x: math.ceil(x / 32 - 7 + 1)
        my_output_module = nn.Sequential(
            nn.Linear(f(256) * f(256) * 2048, REG_OUTPUT), nn.Softmax(dim=1))

        model.fc = my_output_module
        # model.fc=nn.Linear(f(256)*f(256)*2048, REG_OUTPUT)
        model = model.cuda()
        model = nn.DataParallel(model, device_ids=DEVICE_IDS)
    if i != 1:
        model = torch.load(PATH_TO_WEIGHTS)
    '''if i==1:
        model=VGG16(num_classes=all_data_set.num_classes)
    elif i!=1:
        model=torch.load(PATH_TO_WEIGHTS)'''
    # model = model.cuda(CUDA_DEVICES)
    model.train()  #train

    best_model_params = copy.deepcopy(model.state_dict())  #複製參數
    best_acc = 0.0
    num_epochs = EPOCH_SIZE
    criterion = nn.MSELoss()
    criterion2 = nn.MSELoss()
    optimizer = torch.optim.SGD(params=model.parameters(),
                                lr=0.001,
                                momentum=0.9)

    train_loss = []
    train_loss2 = []
    best_loss = math.inf

    for epoch in range(num_epochs):
        print(f'Epoch: {epoch + 1}/{num_epochs}')
        print('-' * len(f'Epoch: {epoch + 1}/{num_epochs}'))

        training_loss = 0.0
        training_loss2 = 0.0
        # training_corrects = 0

        for i, (inputs, labels) in enumerate(train_data_loader):
            inputs = inputs.cuda(CUDA_DEVICES)
            labels = labels.cuda(CUDA_DEVICES)

            optimizer.zero_grad()

            outputs = model(inputs)
            # _ , preds = torch.max(outputs.data, 1)
            outputs = outputs.squeeze(1)
            predictions = outputs.data.cpu().numpy()

            predictions = np.array([[round(x, 2) for x in row]
                                    for row in predictions])
            loss = criterion(outputs, labels)
            loss2 = criterion2(outputs * 100, labels * 100)
            loss.backward()
            optimizer.step()
            # sm = F.softmax(outputs,dim=1)
            # print("======== Softmax ========")
            # print(sm.data)
            # print("=========================")
            #print("preds:"+str(preds))

            if i * BATCH_SIZE % 1000 == 0:

                print(
                    "\n\n||||||||||||||||||||| BATCH-%d |||||||||||||||||||||\n"
                    % i)
                print("\n=================== Labels =====================\n")
                print(labels)
                print("\n================= Predictions ==================\n")
                print(predictions)
                print("\n================= Batch Loss ===================\n")
                print(f"Training: {loss.data:.2f}")
                print(f"MSELoss : {loss2.data:.2f}\n")
                print("\n================= Epoch Loss ===================\n")
                print(f'Training:', train_loss)
                print(f'MSEloss :', train_loss2)

            progress = i * BATCH_SIZE / len(train_sampler)
            print(
                f"[Training Progress]: {progress:.4f}% [Batch Loss]: {loss2.data:.2f}",
                end='\r')

            training_loss += loss.item() * inputs.size(0)
            training_loss2 += loss2.item() * inputs.size(0)

        # Calulate Loss and MSELoss in current epoch
        training_loss = training_loss / len(train_sampler)
        training_loss2 = training_loss2 / len(train_sampler)

        # train_acc.append(training_acc)        #save each 10 epochs accuracy
        train_loss.append(int(training_loss))
        train_loss2.append(int(training_loss2))

        print(
            "########################\nFinish Epoch\n#########################\n"
        )

        if training_loss < best_loss:

            best_loss = training_loss
            best_model_params = copy.deepcopy(model.state_dict())

        print("Best Loss: %.2f" % best_loss)

    model.load_state_dict(
        best_model_params)  #model load new best parmsmodel載入參數
    torch.save(model, PATH_TO_WEIGHTS)  #save model 存整個model

    return ([], train_loss, train_loss2, test_data_loader)
Пример #14
0
def test(args):
    data_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    test_set = IMAGE_Dataset(Path(args.img_dir), data_transform)
    data_loader = DataLoader(dataset=test_set,
                             batch_size=32,
                             shuffle=False,
                             num_workers=1)
    classes = [_dir.name for _dir in Path(args.img_dir).glob('*')]

    # load model
    model = torch.load(args.weights)
    model = model.cuda(CUDA_DEVICES)
    model.eval()

    total_correct = 0
    total = 0
    f1_score = 0.0
    class_correct = list(0. for i in enumerate(classes))
    class_total = list(0. for i in enumerate(classes))

    label_num = torch.zeros((1, args.class_num))
    predict_num = torch.zeros((1, args.class_num))
    acc_num = torch.zeros((1, args.class_num))

    # csv
    import csv
    with open('agriculture.csv', 'w', newline='') as csvFile:
        writer = csv.writer(csvFile)
        writer.writerow(['ID', 'LABEL'])

    with torch.no_grad():
        for inputs, labels, paths in data_loader:
            inputs = Variable(inputs.cuda(CUDA_DEVICES))
            labels = Variable(labels.cuda(CUDA_DEVICES))
            outputs = model(inputs)
            _, predicted = torch.max(outputs.data, 1)

            # print(paths)
            # print(predicted)
            # print(labels)

            # total
            total += labels.size(0)
            total_correct += (predicted == labels).sum().item()
            c = (predicted == labels).squeeze()

            # print(predict_class_id)
            # batch size
            for i in range(labels.size(0)):
                label = labels[i]
                class_correct[label] += c[i].item()
                class_total[label] += 1

                p_num = int(predicted[i])
                l_num = int(labels[i])

                # print gt
                # print('{}\t{}\t{}'.format(paths[i], p_num, l_num))
                with open('agriculture.csv', 'a', newline='') as csvFile:
                    writer = csv.writer(csvFile)
                    writer.writerow([paths[i], p_num])

            pre_mask = torch.zeros(outputs.size()).scatter_(
                1,
                predicted.cpu().view(-1, 1), 1.)
            predict_num += pre_mask.sum(0)
            tar_mask = torch.zeros(outputs.size()).scatter_(
                1,
                labels.data.cpu().view(-1, 1), 1.)
            label_num += tar_mask.sum(0)
            acc_mask = pre_mask * tar_mask
            acc_num += acc_mask.sum(0)
        #            print('------------------')
        #            print(predict_num)
        #            print(label_num)
        #            print(acc_num)

        recall = acc_num / label_num
        precision = acc_num / predict_num
        F1 = 2 * recall * precision / (recall + precision)
        accuracy = acc_num.sum(1) / label_num.sum(1)

    F1_num = F1.numpy()
    for i, c in enumerate(classes):
        print('Accuracy of %5s : %8.4f %%' %
              (c, 100 * class_correct[i] / class_total[i]),
              end='')

        if np.isnan(F1_num[0][i]):
            F1_num[0][i] = 0
        print(' , f1-score of %5s : %8.4f %%' % (c, 100 * F1_num[0][i]))

    # Accuracy
    print('\nAccuracy on the ALL test images: %.4f %%' %
          (100 * total_correct / total))

    # f1-score
    f1_score = 100 * (F1.sum()) / args.class_num
    print('Total f1-score : %4f %%' % (f1_score))
    csvFile.close()
Пример #15
0
def train(i, train_acc, train_loss):
    data_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    #print(DATASET_ROOT)
    train_set = IMAGE_Dataset(Path(DATASET_ROOT1), data_transform)
    data_loader = DataLoader(dataset=train_set,
                             batch_size=32,
                             shuffle=True,
                             num_workers=1)
    #print(train_set.num_classes)

    vgg16 = models.vgg16(pretrained=True)  #載入vgg16 model
    pretrained_dict = vgg16.state_dict()  #vgg16預訓練的參數
    model_dict = VGG16.state_dict()  #自訂VGG16的參數

    # 將pretrained_dict裏不屬於model_dict的鍵剔除掉
    pretrained_dict = {
        k: v
        for k, v in pretrained_dict.items() if k in model_dict
    }
    model_dict.update(pretrained_dict)  # 更新現有的model_dict
    VGG16.load_state_dict(model_dict)  # 加載我們真正需要的state_dict

    if i == 1:
        model = VGG16(num_classes=train_set.num_classes)
    if i != 1:
        model = torch.load(PATH_TO_WEIGHTS)
    '''if i==1:
	    model = models.resnet101(pretrained=True)
	    fc_features=model.fc.in_features
	    model.fc=nn.Linear(fc_features,196)
	if i!=1:
	    model=torch.load(PATH_TO_WEIGHTS)'''
    model = model.cuda(CUDA_DEVICES)
    model.train()  #train

    best_model_params = copy.deepcopy(model.state_dict())  #複製參數
    best_acc = 0.0
    num_epochs = 10
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(params=model.parameters(),
                                lr=0.001,
                                momentum=0.9)

    for epoch in range(num_epochs):
        print(f'Epoch: {epoch + 1}/{num_epochs}')
        print('-' * len(f'Epoch: {epoch + 1}/{num_epochs}'))

        training_loss = 0.0
        training_corrects = 0

        for i, (inputs, labels) in enumerate(data_loader):
            inputs = Variable(inputs.cuda(CUDA_DEVICES))
            labels = Variable(labels.cuda(CUDA_DEVICES))

            optimizer.zero_grad()

            outputs = model(inputs)
            _, preds = torch.max(outputs.data, 1)
            loss = criterion(outputs, labels)

            loss.backward()
            optimizer.step()

            training_loss += loss.item() * inputs.size(0)
            #revise loss.data[0]-->loss.item()
            training_corrects += torch.sum(preds == labels.data)
            #print(f'training_corrects: {training_corrects}')
            #if(not(i%10)):
            #	print(f'iteration done :{i}')

        training_loss = training_loss / len(train_set)  #train loss
        training_acc = training_corrects.double() / len(train_set)  #tarin acc
        #print(training_acc.type())
        #print(f'training_corrects: {training_corrects}\tlen(train_set):{len(train_set)}\n')
        print(
            f'Training loss: {training_loss:.4f}\taccuracy: {training_acc:.4f}\n'
        )
        train_acc.append(training_acc)  #save each 10 epochs accuracy
        train_loss.append(training_loss)

        if training_acc > best_acc:
            best_acc = training_acc
            best_model_params = copy.deepcopy(model.state_dict())

    model.load_state_dict(
        best_model_params)  #model load new best parms								#model載入參數
    torch.save(model, f'model-best_train_acc.pth')  #save model			#存整個model
    return (train_acc, train_loss)
Пример #16
0
def test():
    data_transform = transforms.Compose([
        transforms.Resize((256, 256)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    test_set = IMAGE_Dataset(Path(DATASET_ROOT), data_transform)
    data_loader = DataLoader(dataset=test_set,
                             batch_size=32,
                             shuffle=True,
                             num_workers=1)
    classes = [_dir.name for _dir in Path(DATASET_ROOT).glob('*')]
    classes.sort()
    classes.sort(key=len)

    model = torch.load(PATH_TO_WEIGHTS)
    model = model.cuda(CUDA_DEVICES)
    model.eval()

    total_correct = 0
    total = 0
    f1_score = 0.0
    class_correct = list(0. for i in enumerate(classes))
    class_total = list(0. for i in enumerate(classes))

    # log TP TN FP FN

    class_F1_status = list(0. for i in enumerate(classes))
    class_F1_score = list(0. for i in enumerate(classes))
    class_TP = list(0. for i in enumerate(classes))
    class_TN = list(0. for i in enumerate(classes))
    class_FP = list(0. for i in enumerate(classes))
    class_FN = list(0. for i in enumerate(classes))

    classes_num = 38
    label_num = torch.zeros((1, classes_num))
    predict_num = torch.zeros((1, classes_num))
    acc_num = torch.zeros((1, classes_num))

    with torch.no_grad():
        for inputs, labels in data_loader:
            inputs = Variable(inputs.cuda(CUDA_DEVICES))
            labels = Variable(labels.cuda(CUDA_DEVICES))
            outputs = model(inputs)
            _, predicted = torch.max(outputs.data, 1)

            # totoal
            total += labels.size(0)
            total_correct += (predicted == labels).sum().item()
            c = (predicted == labels).squeeze()

            # batch size
            for i in range(labels.size(0)):
                label = labels[i]
                class_correct[label] += c[i].item()
                class_total[label] += 1

            pre_mask = torch.zeros(outputs.size()).scatter_(
                1,
                predicted.cpu().view(-1, 1), 1.)
            predict_num += pre_mask.sum(0)
            tar_mask = torch.zeros(outputs.size()).scatter_(
                1,
                labels.data.cpu().view(-1, 1), 1.)
            label_num += tar_mask.sum(0)
            acc_mask = pre_mask * tar_mask
            acc_num += acc_mask.sum(0)


#            print('------------------')
#            print(predict_num)
#            print(label_num)
#            print(acc_num)

        recall = acc_num / label_num
        precision = acc_num / predict_num
        F1 = 2 * recall * precision / (recall + precision)
        accuracy = acc_num.sum(1) / label_num.sum(1)

        print(F1)
        print(accuracy)

    F1_num = F1.numpy()
    for i, c in enumerate(classes):
        print('Accuracy of %5s : %8.4f %%' %
              (c, 100 * class_correct[i] / class_total[i]),
              end='')

        if np.isnan(F1_num[0][i]):
            F1_num[0][i] = 0
        print(' , f1-score of %5s : %8.4f %%' % (c, 100 * F1_num[0][i]))

    # Accuracy
    print('\nAccuracy on the ALL test images: %.4f %%' %
          (100 * total_correct / total))

    # f1-score
    f1_score = 100 * (F1.sum()) / classes_num
    print('Total f1-score : %4f %%' % (f1_score))