Exemplo n.º 1
0
def fit(args, model, device, optimizer, loss_fn, dataset, labels_list, task_id):
    # Dataloader
    train_loader = trainer.get_loader(mnist.getTrain(dataset), args, device, 'train')
    val_loader = trainer.get_loader(mnist.getVal(dataset), args, device, 'val')
    # Log Best Accuracy
    best_val_loss = 0
    # Early Stopping
    early_stop = 0

    # Training loop
    for epoch in range(1, args.epochs + 1):
        # Prepare model for current task
        model.set_task_id(labels_list[task_id])
        trainer.train(args, model, device, train_loader, optimizer, epoch, loss_fn)
        val_loss, _ = trainer.test(args, model, device, val_loader, loss_fn, val=True)
        if val_loss > best_val_loss:
            best_val_loss = val_loss
            best_state = model.state_dict()
            early_stop = 0
        else:
            early_stop += 1
            if early_stop >= args.early_stop_after:
                break
    
    return best_state
Exemplo n.º 2
0
def get_dataset(args, task_id, split):
    trans = transforms.Compose([
                           transforms.ToTensor(),
                           transforms.Normalize((0.1307,), (0.3081,))
                       ])
    if args.dataset=='splitMNIST10':
        Dataset = mnist.SplitMNIST10

    if split == 'train':
        trainval = Dataset('data', task_id, train=True, download=True, transform=trans)
        dataset = mnist.getTrain(trainval)
    elif split == 'val':
        trainval = Dataset('data', task_id, train=True, download=True, transform=trans)
        dataset = mnist.getVal(trainval)
    elif split == 'trainval':
        dataset = Dataset('data', task_id, train=True, download=True, transform=trans)
    else:
        dataset = Dataset('data', task_id, train=False, download=True, transform=trans)
    return dataset
Exemplo n.º 3
0
def fit(args, model, device, optimizer, loss_fn, coresets, dataset,
        labels_list, task_id):
    # Dataloader
    train_loader = trainer.get_loader(mnist.getTrain(dataset), args, device,
                                      'train')
    val_loader = trainer.get_loader(mnist.getVal(dataset), args, device, 'val')
    # Log Best Accuracy
    best_val_acc = 0
    all_test_accs = []
    # Early Stopping
    early_stop = 0

    # Training loop
    for epoch in range(1, args.epochs + 1):
        # Prepare model for current task
        model.set_range(labels_list[task_id])
        trainer.train(args, model, device, train_loader, optimizer, epoch,
                      loss_fn)
        _, val_acc = trainer.test(args,
                                  model,
                                  device,
                                  val_loader,
                                  loss_fn,
                                  val=True)
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            best_state = model.state_dict()
            early_stop = 0
        else:
            early_stop += 1
            if early_stop >= args.early_stop_after:
                break

        # Evaluate all tasks on test set
        test_accs = test_all_tasks(coresets, args, model, device, loss_fn,
                                   labels_list, 'test')
        all_test_accs.append(test_accs)

    return best_state, all_test_accs
Exemplo n.º 4
0
    skew = m['mu11']/m['mu02']
    M = np.float32([[1, skew, -0.5*20*skew], [0, 1, 0]])
    img = cv2.warpAffine(img,M,(20, 20),flags=affine_flags)
    return img

# HOGDescriptor를 위한 파라미터 설정 및 생성---②
winSize = (20,20)
blockSize = (10,10)
blockStride = (5,5)
cellSize = (5,5)
nbins = 9
hogDesc = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins)

if __name__ =='__main__':
    # MNIST 이미지에서 학습용 이미지와 테스트용 이미지 가져오기 ---③
    train_data, train_label  = mnist.getTrain(reshape=False)
    test_data, test_label = mnist.getTest(reshape=False)
    # 학습 이미지 글씨 바로 세우기 ---④
    deskewed = [list(map(deskew,row)) for row in train_data]
    # 학습 이미지 HOG 계산 ---⑤
    hogdata = [list(map(hogDesc.compute,row)) for row in deskewed]
    train_data = np.float32(hogdata)
    print('SVM training started...train data:', train_data.shape)
    # 학습용 HOG 데이타 재배열  ---⑥
    train_data = train_data.reshape(-1,train_data.shape[2])
    # SVM 알고리즘 객체 생성 및 훈련 ---⑦
    svm = cv2.ml.SVM_create()
    startT = time.time()
    svm.trainAuto(train_data, cv2.ml.ROW_SAMPLE, train_label)
    endT = time.time() - startT
    print('SVM training complete. %.2f Min'%(endT/60))  
Exemplo n.º 5
0
import numpy as np, cv2
import mnist

# 훈련 데이타와 테스트 데이타 가져오기 ---①
train, train_labels = mnist.getTrain()
test, test_labels = mnist.getTest()
# kNN 객체 생성 및 훈련 ---②
knn = cv2.ml.KNearest_create()
knn.train(train, cv2.ml.ROW_SAMPLE, train_labels)
# k값을 1~10까지 변경하면서 예측 ---③
for k in range(1, 11):
    # 결과 예측 ---④
    ret, result, neighbors, distance = knn.findNearest(test, k=k)
    # 정확도 계산 및 출력 ---⑤
    correct = np.sum(result == test_labels)
    accuracy = correct / result.size * 100.0
    print("K:%d, Accuracy :%.2f%%(%d/%d)" %
          (k, accuracy, correct, result.size))